blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 264 | content_id stringlengths 40 40 | detected_licenses listlengths 0 85 | license_type stringclasses 2
values | repo_name stringlengths 5 140 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 905
values | visit_date timestamp[us]date 2015-08-09 11:21:18 2023-09-06 10:45:07 | revision_date timestamp[us]date 1997-09-14 05:04:47 2023-09-17 19:19:19 | committer_date timestamp[us]date 1997-09-14 05:04:47 2023-09-06 06:22:19 | github_id int64 3.89k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22
values | gha_event_created_at timestamp[us]date 2012-06-07 00:51:45 2023-09-14 21:58:39 ⌀ | gha_created_at timestamp[us]date 2008-03-27 23:40:48 2023-08-21 23:17:38 ⌀ | gha_language stringclasses 141
values | src_encoding stringclasses 34
values | language stringclasses 1
value | is_vendor bool 1
class | is_generated bool 2
classes | length_bytes int64 3 10.4M | extension stringclasses 115
values | content stringlengths 3 10.4M | authors listlengths 1 1 | author_id stringlengths 0 158 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f44ca22c4f15cecfa7ca9af7931ee8685718ba5 | d1d31c9bb9bb1347d0c31ff39ce781236cd1c89a | /test/ComponentTypeInformationTest.cpp | 5b929f036d0d16a1610d870d08181b32319b924a | [] | no_license | defacto2k15/pwAsteroids | bf8cb516f788565c11601095348581d74f87897b | 546678ce169a5a1855f2c5c2752f5e00c0c77d0f | refs/heads/master | 2021-01-21T14:04:28.377090 | 2016-05-25T12:58:52 | 2016-05-25T12:58:52 | 44,272,235 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 745 | cpp | //
// Created by defacto on 26.10.15.
//
#include <lib/gmock-1.7.0/gtest/include/gtest/gtest.h>
#include <Model/components/Component.h>
class C1 : public Component{};
class C2 : public Component{};
class C3 : public C2{};
TEST(ComponentTypeInformationTest, WorksForTwoComponentsOfTheSameTypeWhenSecondIsCastedToBase){
C3 *ptrToC3 = nullptr;
ComponentTypeChecker checker(ptrToC3);
C3 *anotherPtrToC3 = new C3;
Component *ptrToC3AsComponent = anotherPtrToC3;
ASSERT_TRUE(checker.wasCastSuccesfull(ptrToC3AsComponent));
}
TEST(ComponentTypeInformationTest, falseIfComponentsAreOfDiffrentType){
C1 *ptrToC1 = new C1;
C2 *ptrToC2 = nullptr;
ComponentTypeChecker checker(ptrToC2);
ASSERT_FALSE(checker.wasCastSuccesfull(ptrToC1));
}
| [
"defacto2k15@gmail.com"
] | defacto2k15@gmail.com |
936c6b1f0f486a1ee84416fc0d8852225ea51cad | 1097ed333a4000634e68a590ee6ffc6129ae61e3 | /Offer/CPP/旋转数组的最小数字.cpp | ee33cc555c7f33c09616b0d831e08e40937f496d | [
"MIT"
] | permissive | AutuanLiu/Code-Storm2019 | 1bbe890c7ca0d033c32348173bfebba612623a90 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | refs/heads/master | 2020-04-23T07:03:08.975232 | 2019-10-24T08:56:26 | 2019-10-24T08:56:26 | 170,995,032 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,299 | cpp | // 旋转数组的最小数字.cpp
// 二分法解题
class Solution {
public:
int minNumberInRotateArray(vector<int> rotateArray)
{
if (rotateArray.size() <= 0)
return -1; // error
int ptr1 = 0;
int ptr2 = rotateArray.size() - 1;
int mid = ptr1; // 一旦第一个元素小于最后一个元素,说明数组本身是有序的,所以初始化为 ptr1
while (rotateArray[ptr1] >= rotateArray[ptr2]) {
// 当 ptr1 和 ptr2 相邻的时候,结束
if (ptr2 - ptr1 == 1) {
mid = ptr2;
break;
}
mid = (ptr1 + ptr2) / 2;
// 当 ptr1, ptr2, mid 三者的数字相同时,只能使用顺序搜索
if (rotateArray[mid] >= rotateArray[ptr1] && rotateArray[mid] <= rotateArray[ptr2])
return inorderSearch(rotateArray);
if (rotateArray[mid] >= rotateArray[ptr1])
ptr1 = mid;
else if (rotateArray[mid] <= rotateArray[ptr2])
ptr2 = mid;
}
return rotateArray[mid];
}
int inorderSearch(vector<int> rotateArray)
{
int min_num = rotateArray[0];
for (auto i = rotateArray.begin(); i != rotateArray.end(); i++) {
if (*i < min_num)
min_num = *i;
}
return min_num;
}
};
// 顺序搜索,只需要找到分界点即可
class Solution {
public:
int minNumberInRotateArray(vector<int> rotateArray)
{
if (rotateArray.size() <= 0)
return -1;
int bound = 0; // 分界点的位置
bool flag = false;
// 只能取到倒数第二个元素
for (; bound < rotateArray.size() - 1; bound++) {
if (rotateArray[bound] > rotateArray[bound + 1]) {
flag = true;
break;
}
}
return flag ? rotateArray[bound + 1] : rotateArray[0];
}
};
class Solution {
public:
int minNumberInRotateArray(vector<int> rotateArray)
{
int n = rotateArray.size();
for (int pos = 1; pos < n; pos++) {
if (rotateArray[pos] < rotateArray[pos - 1]) {
return rotateArray[pos];
}
}
return rotateArray[0];
}
};
| [
"autuanliu@163.com"
] | autuanliu@163.com |
7837ee3dad481b3f279d17fa1497053a19968ef5 | 07fbf3ab32180d94afb1eadd1a8f8ddd657e8656 | /NWNXLib/API/Linux/API/CExoArrayListTemplatedCFileInfo.hpp | 69f8472d3d01916bd71abe4ba9b74906d63bd0b8 | [
"MIT"
] | permissive | ELadner/nwnx-unified | 3b322e8722eab70c9c72a45e71013b89dbbf1e89 | 09f8e8a0c1474e8b16d4746f9cb57ca870a17ce5 | refs/heads/master | 2021-09-04T08:52:06.521478 | 2018-01-17T13:04:29 | 2018-01-17T13:04:29 | 117,771,934 | 0 | 0 | null | 2018-01-17T02:29:14 | 2018-01-17T02:29:14 | null | UTF-8 | C++ | false | false | 931 | hpp | #pragma once
#include <cstdint>
#include "CFileInfo.hpp"
namespace NWNXLib {
namespace API {
struct CExoArrayListTemplatedCFileInfo
{
CFileInfo* element;
int32_t num;
int32_t array_size;
// The below are auto generated stubs.
CExoArrayListTemplatedCFileInfo() = default;
CExoArrayListTemplatedCFileInfo(const CExoArrayListTemplatedCFileInfo&) = default;
CExoArrayListTemplatedCFileInfo& operator=(const CExoArrayListTemplatedCFileInfo&) = default;
~CExoArrayListTemplatedCFileInfo();
void Add(CFileInfo);
void Allocate(int32_t);
};
void CExoArrayListTemplatedCFileInfo__CExoArrayListTemplatedCFileInfoDtor(CExoArrayListTemplatedCFileInfo* thisPtr);
void CExoArrayListTemplatedCFileInfo__Add(CExoArrayListTemplatedCFileInfo* thisPtr, CFileInfo);
void CExoArrayListTemplatedCFileInfo__Allocate(CExoArrayListTemplatedCFileInfo* thisPtr, int32_t);
}
}
| [
"liarethnwn@gmail.com"
] | liarethnwn@gmail.com |
cc0b80bbfaf9d74f44dfe7bcd86ae46a8320ea63 | 102eae403665433dc48f48196a7e9210ca344678 | /MultiThreads/Generated Files/winrt/Windows.Devices.I2c.h | 76c119e200ab095dd01dd690ce0ec172f3c02273 | [] | no_license | AIchemists/multiThreads | 5fd583c46314296cc745d3afa23dbe1994dff9f6 | df57a18dff84cd51eda31184a9ba1c811d30e2c0 | refs/heads/master | 2022-12-18T16:09:57.390044 | 2020-09-22T01:57:58 | 2020-09-22T01:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 21,568 | h | // WARNING: Please don't edit this file. It was generated by C++/WinRT v2.0.200917.4
#ifndef WINRT_Windows_Devices_I2c_H
#define WINRT_Windows_Devices_I2c_H
#include "winrt/base.h"
static_assert(winrt::check_version(CPPWINRT_VERSION, "2.0.200917.4"), "Mismatched C++/WinRT headers.");
#define CPPWINRT_VERSION "2.0.200917.4"
#include "winrt/Windows.Devices.h"
#include "winrt/impl/Windows.Devices.I2c.Provider.2.h"
#include "winrt/impl/Windows.Foundation.2.h"
#include "winrt/impl/Windows.Foundation.Collections.2.h"
#include "winrt/impl/Windows.Devices.I2c.2.h"
namespace winrt::impl
{
template <typename D> WINRT_IMPL_AUTO(int32_t) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::SlaveAddress() const
{
int32_t value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->get_SlaveAddress(&value));
return value;
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::SlaveAddress(int32_t value) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->put_SlaveAddress(value));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cBusSpeed) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::BusSpeed() const
{
Windows::Devices::I2c::I2cBusSpeed value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->get_BusSpeed(reinterpret_cast<int32_t*>(&value)));
return value;
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::BusSpeed(Windows::Devices::I2c::I2cBusSpeed const& value) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->put_BusSpeed(static_cast<int32_t>(value)));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cSharingMode) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::SharingMode() const
{
Windows::Devices::I2c::I2cSharingMode value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->get_SharingMode(reinterpret_cast<int32_t*>(&value)));
return value;
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cConnectionSettings<D>::SharingMode(Windows::Devices::I2c::I2cSharingMode const& value) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettings)->put_SharingMode(static_cast<int32_t>(value)));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cConnectionSettings) consume_Windows_Devices_I2c_II2cConnectionSettingsFactory<D>::Create(int32_t slaveAddress) const
{
void* value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cConnectionSettingsFactory)->Create(slaveAddress, &value));
return Windows::Devices::I2c::I2cConnectionSettings{ value, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cDevice) consume_Windows_Devices_I2c_II2cController<D>::GetDevice(Windows::Devices::I2c::I2cConnectionSettings const& settings) const
{
void* device{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cController)->GetDevice(*(void**)(&settings), &device));
return Windows::Devices::I2c::I2cDevice{ device, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(Windows::Foundation::IAsyncOperation<Windows::Foundation::Collections::IVectorView<Windows::Devices::I2c::I2cController>>) consume_Windows_Devices_I2c_II2cControllerStatics<D>::GetControllersAsync(Windows::Devices::I2c::Provider::II2cProvider const& provider) const
{
void* operation{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cControllerStatics)->GetControllersAsync(*(void**)(&provider), &operation));
return Windows::Foundation::IAsyncOperation<Windows::Foundation::Collections::IVectorView<Windows::Devices::I2c::I2cController>>{ operation, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cController>) consume_Windows_Devices_I2c_II2cControllerStatics<D>::GetDefaultAsync() const
{
void* operation{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cControllerStatics)->GetDefaultAsync(&operation));
return Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cController>{ operation, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(hstring) consume_Windows_Devices_I2c_II2cDevice<D>::DeviceId() const
{
void* value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->get_DeviceId(&value));
return hstring{ value, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cConnectionSettings) consume_Windows_Devices_I2c_II2cDevice<D>::ConnectionSettings() const
{
void* value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->get_ConnectionSettings(&value));
return Windows::Devices::I2c::I2cConnectionSettings{ value, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cDevice<D>::Write(array_view<uint8_t const> buffer) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->Write(buffer.size(), get_abi(buffer)));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cTransferResult) consume_Windows_Devices_I2c_II2cDevice<D>::WritePartial(array_view<uint8_t const> buffer) const
{
Windows::Devices::I2c::I2cTransferResult result{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->WritePartial(buffer.size(), get_abi(buffer), put_abi(result)));
return result;
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cDevice<D>::Read(array_view<uint8_t> buffer) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->Read(buffer.size(), put_abi(buffer)));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cTransferResult) consume_Windows_Devices_I2c_II2cDevice<D>::ReadPartial(array_view<uint8_t> buffer) const
{
Windows::Devices::I2c::I2cTransferResult result{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->ReadPartial(buffer.size(), put_abi(buffer), put_abi(result)));
return result;
}
template <typename D> WINRT_IMPL_AUTO(void) consume_Windows_Devices_I2c_II2cDevice<D>::WriteRead(array_view<uint8_t const> writeBuffer, array_view<uint8_t> readBuffer) const
{
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->WriteRead(writeBuffer.size(), get_abi(writeBuffer), readBuffer.size(), put_abi(readBuffer)));
}
template <typename D> WINRT_IMPL_AUTO(Windows::Devices::I2c::I2cTransferResult) consume_Windows_Devices_I2c_II2cDevice<D>::WriteReadPartial(array_view<uint8_t const> writeBuffer, array_view<uint8_t> readBuffer) const
{
Windows::Devices::I2c::I2cTransferResult result{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDevice)->WriteReadPartial(writeBuffer.size(), get_abi(writeBuffer), readBuffer.size(), put_abi(readBuffer), put_abi(result)));
return result;
}
template <typename D> WINRT_IMPL_AUTO(hstring) consume_Windows_Devices_I2c_II2cDeviceStatics<D>::GetDeviceSelector() const
{
void* value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDeviceStatics)->GetDeviceSelector(&value));
return hstring{ value, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(hstring) consume_Windows_Devices_I2c_II2cDeviceStatics<D>::GetDeviceSelector(param::hstring const& friendlyName) const
{
void* value{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDeviceStatics)->GetDeviceSelectorFromFriendlyName(*(void**)(&friendlyName), &value));
return hstring{ value, take_ownership_from_abi };
}
template <typename D> WINRT_IMPL_AUTO(Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cDevice>) consume_Windows_Devices_I2c_II2cDeviceStatics<D>::FromIdAsync(param::hstring const& deviceId, Windows::Devices::I2c::I2cConnectionSettings const& settings) const
{
void* operation{};
check_hresult(WINRT_IMPL_SHIM(Windows::Devices::I2c::II2cDeviceStatics)->FromIdAsync(*(void**)(&deviceId), *(void**)(&settings), &operation));
return Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cDevice>{ operation, take_ownership_from_abi };
}
#ifndef WINRT_LEAN_AND_MEAN
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cConnectionSettings> : produce_base<D, Windows::Devices::I2c::II2cConnectionSettings>
{
int32_t __stdcall get_SlaveAddress(int32_t* value) noexcept final try
{
typename D::abi_guard guard(this->shim());
*value = detach_from<int32_t>(this->shim().SlaveAddress());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall put_SlaveAddress(int32_t value) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().SlaveAddress(value);
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall get_BusSpeed(int32_t* value) noexcept final try
{
typename D::abi_guard guard(this->shim());
*value = detach_from<Windows::Devices::I2c::I2cBusSpeed>(this->shim().BusSpeed());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall put_BusSpeed(int32_t value) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().BusSpeed(*reinterpret_cast<Windows::Devices::I2c::I2cBusSpeed const*>(&value));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall get_SharingMode(int32_t* value) noexcept final try
{
typename D::abi_guard guard(this->shim());
*value = detach_from<Windows::Devices::I2c::I2cSharingMode>(this->shim().SharingMode());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall put_SharingMode(int32_t value) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().SharingMode(*reinterpret_cast<Windows::Devices::I2c::I2cSharingMode const*>(&value));
return 0;
}
catch (...) { return to_hresult(); }
};
#endif
#ifndef WINRT_LEAN_AND_MEAN
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cConnectionSettingsFactory> : produce_base<D, Windows::Devices::I2c::II2cConnectionSettingsFactory>
{
int32_t __stdcall Create(int32_t slaveAddress, void** value) noexcept final try
{
clear_abi(value);
typename D::abi_guard guard(this->shim());
*value = detach_from<Windows::Devices::I2c::I2cConnectionSettings>(this->shim().Create(slaveAddress));
return 0;
}
catch (...) { return to_hresult(); }
};
#endif
#ifndef WINRT_LEAN_AND_MEAN
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cController> : produce_base<D, Windows::Devices::I2c::II2cController>
{
int32_t __stdcall GetDevice(void* settings, void** device) noexcept final try
{
clear_abi(device);
typename D::abi_guard guard(this->shim());
*device = detach_from<Windows::Devices::I2c::I2cDevice>(this->shim().GetDevice(*reinterpret_cast<Windows::Devices::I2c::I2cConnectionSettings const*>(&settings)));
return 0;
}
catch (...) { return to_hresult(); }
};
#endif
#ifndef WINRT_LEAN_AND_MEAN
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cControllerStatics> : produce_base<D, Windows::Devices::I2c::II2cControllerStatics>
{
int32_t __stdcall GetControllersAsync(void* provider, void** operation) noexcept final try
{
clear_abi(operation);
typename D::abi_guard guard(this->shim());
*operation = detach_from<Windows::Foundation::IAsyncOperation<Windows::Foundation::Collections::IVectorView<Windows::Devices::I2c::I2cController>>>(this->shim().GetControllersAsync(*reinterpret_cast<Windows::Devices::I2c::Provider::II2cProvider const*>(&provider)));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall GetDefaultAsync(void** operation) noexcept final try
{
clear_abi(operation);
typename D::abi_guard guard(this->shim());
*operation = detach_from<Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cController>>(this->shim().GetDefaultAsync());
return 0;
}
catch (...) { return to_hresult(); }
};
#endif
#ifndef WINRT_LEAN_AND_MEAN
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cDevice> : produce_base<D, Windows::Devices::I2c::II2cDevice>
{
int32_t __stdcall get_DeviceId(void** value) noexcept final try
{
clear_abi(value);
typename D::abi_guard guard(this->shim());
*value = detach_from<hstring>(this->shim().DeviceId());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall get_ConnectionSettings(void** value) noexcept final try
{
clear_abi(value);
typename D::abi_guard guard(this->shim());
*value = detach_from<Windows::Devices::I2c::I2cConnectionSettings>(this->shim().ConnectionSettings());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall Write(uint32_t __bufferSize, uint8_t* buffer) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().Write(array_view<uint8_t const>(reinterpret_cast<uint8_t const *>(buffer), reinterpret_cast<uint8_t const *>(buffer) + __bufferSize));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall WritePartial(uint32_t __bufferSize, uint8_t* buffer, struct struct_Windows_Devices_I2c_I2cTransferResult* result) noexcept final try
{
zero_abi<Windows::Devices::I2c::I2cTransferResult>(result);
typename D::abi_guard guard(this->shim());
*result = detach_from<Windows::Devices::I2c::I2cTransferResult>(this->shim().WritePartial(array_view<uint8_t const>(reinterpret_cast<uint8_t const *>(buffer), reinterpret_cast<uint8_t const *>(buffer) + __bufferSize)));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall Read(uint32_t __bufferSize, uint8_t* buffer) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().Read(array_view<uint8_t>(reinterpret_cast<uint8_t*>(buffer), reinterpret_cast<uint8_t*>(buffer) + __bufferSize));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall ReadPartial(uint32_t __bufferSize, uint8_t* buffer, struct struct_Windows_Devices_I2c_I2cTransferResult* result) noexcept final try
{
zero_abi<Windows::Devices::I2c::I2cTransferResult>(result);
typename D::abi_guard guard(this->shim());
*result = detach_from<Windows::Devices::I2c::I2cTransferResult>(this->shim().ReadPartial(array_view<uint8_t>(reinterpret_cast<uint8_t*>(buffer), reinterpret_cast<uint8_t*>(buffer) + __bufferSize)));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall WriteRead(uint32_t __writeBufferSize, uint8_t* writeBuffer, uint32_t __readBufferSize, uint8_t* readBuffer) noexcept final try
{
typename D::abi_guard guard(this->shim());
this->shim().WriteRead(array_view<uint8_t const>(reinterpret_cast<uint8_t const *>(writeBuffer), reinterpret_cast<uint8_t const *>(writeBuffer) + __writeBufferSize), array_view<uint8_t>(reinterpret_cast<uint8_t*>(readBuffer), reinterpret_cast<uint8_t*>(readBuffer) + __readBufferSize));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall WriteReadPartial(uint32_t __writeBufferSize, uint8_t* writeBuffer, uint32_t __readBufferSize, uint8_t* readBuffer, struct struct_Windows_Devices_I2c_I2cTransferResult* result) noexcept final try
{
zero_abi<Windows::Devices::I2c::I2cTransferResult>(result);
typename D::abi_guard guard(this->shim());
*result = detach_from<Windows::Devices::I2c::I2cTransferResult>(this->shim().WriteReadPartial(array_view<uint8_t const>(reinterpret_cast<uint8_t const *>(writeBuffer), reinterpret_cast<uint8_t const *>(writeBuffer) + __writeBufferSize), array_view<uint8_t>(reinterpret_cast<uint8_t*>(readBuffer), reinterpret_cast<uint8_t*>(readBuffer) + __readBufferSize)));
return 0;
}
catch (...) { return to_hresult(); }
};
#endif
template <typename D>
struct produce<D, Windows::Devices::I2c::II2cDeviceStatics> : produce_base<D, Windows::Devices::I2c::II2cDeviceStatics>
{
int32_t __stdcall GetDeviceSelector(void** value) noexcept final try
{
clear_abi(value);
typename D::abi_guard guard(this->shim());
*value = detach_from<hstring>(this->shim().GetDeviceSelector());
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall GetDeviceSelectorFromFriendlyName(void* friendlyName, void** value) noexcept final try
{
clear_abi(value);
typename D::abi_guard guard(this->shim());
*value = detach_from<hstring>(this->shim().GetDeviceSelector(*reinterpret_cast<hstring const*>(&friendlyName)));
return 0;
}
catch (...) { return to_hresult(); }
int32_t __stdcall FromIdAsync(void* deviceId, void* settings, void** operation) noexcept final try
{
clear_abi(operation);
typename D::abi_guard guard(this->shim());
*operation = detach_from<Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cDevice>>(this->shim().FromIdAsync(*reinterpret_cast<hstring const*>(&deviceId), *reinterpret_cast<Windows::Devices::I2c::I2cConnectionSettings const*>(&settings)));
return 0;
}
catch (...) { return to_hresult(); }
};
}
WINRT_EXPORT namespace winrt::Windows::Devices::I2c
{
inline I2cConnectionSettings::I2cConnectionSettings(int32_t slaveAddress) :
I2cConnectionSettings(impl::call_factory<I2cConnectionSettings, II2cConnectionSettingsFactory>([&](II2cConnectionSettingsFactory const& f) { return f.Create(slaveAddress); }))
{
}
inline auto I2cController::GetControllersAsync(Windows::Devices::I2c::Provider::II2cProvider const& provider)
{
return impl::call_factory<I2cController, II2cControllerStatics>([&](II2cControllerStatics const& f) { return f.GetControllersAsync(provider); });
}
inline auto I2cController::GetDefaultAsync()
{
return impl::call_factory_cast<Windows::Foundation::IAsyncOperation<Windows::Devices::I2c::I2cController>(*)(II2cControllerStatics const&), I2cController, II2cControllerStatics>([](II2cControllerStatics const& f) { return f.GetDefaultAsync(); });
}
inline auto I2cDevice::GetDeviceSelector()
{
return impl::call_factory_cast<hstring(*)(II2cDeviceStatics const&), I2cDevice, II2cDeviceStatics>([](II2cDeviceStatics const& f) { return f.GetDeviceSelector(); });
}
inline auto I2cDevice::GetDeviceSelector(param::hstring const& friendlyName)
{
return impl::call_factory<I2cDevice, II2cDeviceStatics>([&](II2cDeviceStatics const& f) { return f.GetDeviceSelector(friendlyName); });
}
inline auto I2cDevice::FromIdAsync(param::hstring const& deviceId, Windows::Devices::I2c::I2cConnectionSettings const& settings)
{
return impl::call_factory<I2cDevice, II2cDeviceStatics>([&](II2cDeviceStatics const& f) { return f.FromIdAsync(deviceId, settings); });
}
}
namespace std
{
#ifndef WINRT_LEAN_AND_MEAN
template<> struct hash<winrt::Windows::Devices::I2c::II2cConnectionSettings> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::II2cConnectionSettingsFactory> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::II2cController> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::II2cControllerStatics> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::II2cDevice> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::II2cDeviceStatics> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::I2cConnectionSettings> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::I2cController> : winrt::impl::hash_base {};
template<> struct hash<winrt::Windows::Devices::I2c::I2cDevice> : winrt::impl::hash_base {};
#endif
}
#endif
| [
"jicheng@yahoo.com"
] | jicheng@yahoo.com |
c8af3c3dc9c493a90ba672bab02585a0dd79e042 | 564a1c1bd548a080a07ca16a96ff2e0521de46c0 | /tests/src/pressed_keys_manager/src/pressed_keys_manager_test.cpp | cd119a65cdd6eff698306bf851fc99f9ff82ecb3 | [
"Unlicense"
] | permissive | lianyu125/Karabiner-Elements | 645aa142aeab2927180ff4101df9d4c83e0ba944 | e65ccbe26b4bc847d61888ff6aa33bcc78ac8e89 | refs/heads/master | 2023-01-19T06:36:54.849723 | 2020-11-26T23:49:16 | 2020-11-26T23:49:16 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,922 | cpp | #include <catch2/catch.hpp>
#include "pressed_keys_manager.hpp"
TEST_CASE("pressed_keys_manager") {
// empty
{
krbn::pressed_keys_manager manager;
REQUIRE(manager.empty());
}
// key_code
{
krbn::pressed_keys_manager manager;
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(manager.empty());
}
// Duplicated key_code
{
krbn::pressed_keys_manager manager;
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(manager.empty());
}
// consumer_key_code
{
krbn::pressed_keys_manager manager;
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::consumer,
pqrs::hid::usage::consumer::mute)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::consumer,
pqrs::hid::usage::consumer::mute)));
REQUIRE(manager.empty());
}
// pointing_button
{
krbn::pressed_keys_manager manager;
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::button,
pqrs::hid::usage::button::button_1)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::button,
pqrs::hid::usage::button::button_1)));
REQUIRE(manager.empty());
}
// combination
{
krbn::pressed_keys_manager manager;
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(!manager.empty());
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(!manager.empty());
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::consumer,
pqrs::hid::usage::consumer::mute)));
REQUIRE(!manager.empty());
manager.insert(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::button,
pqrs::hid::usage::button::button_1)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::keyboard_or_keypad,
pqrs::hid::usage::keyboard_or_keypad::keyboard_a)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::consumer,
pqrs::hid::usage::consumer::mute)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::button,
pqrs::hid::usage::button::button_10)));
REQUIRE(!manager.empty());
manager.erase(krbn::momentary_switch_event(pqrs::hid::usage_pair(
pqrs::hid::usage_page::button,
pqrs::hid::usage::button::button_1)));
REQUIRE(manager.empty());
}
}
| [
"tekezo@pqrs.org"
] | tekezo@pqrs.org |
322564a6bc4f1db4100e2299b1f95e471a8ccf41 | 6fe04bf9ced0dade346a4665f791ac64ee8dcb9e | /dbnamelist.h | 7e99875d5dc7195042ba7af7ea5987225d789130 | [] | no_license | zizle/dbAssistant | 29514276c037540080476518944080e4b3fd6527 | 4ca579d10f1a82c342c3f3c7ba04b438312b0e3f | refs/heads/master | 2021-05-23T16:24:35.391962 | 2020-04-14T15:03:27 | 2020-04-14T15:03:27 | 253,380,126 | 0 | 0 | null | 2020-04-14T15:03:28 | 2020-04-06T02:52:11 | null | UTF-8 | C++ | false | false | 327 | h | #ifndef DBNAMELIST_H
#define DBNAMELIST_H
#include <QListWidget>
class DBNameList : public QListWidget
{
Q_OBJECT
public:
explicit DBNameList(QWidget *parent = nullptr);
bool m_IsConnected = false;
signals:
void nameListActionSignal(QString action, QString dbName);
public slots:
};
#endif // DBNAMELIST_H
| [
"zizle_lin@163.com"
] | zizle_lin@163.com |
e335cda7e142ddf3a27c1373562afb9606cb75ca | d78b48d71abc96fbd45b51103ecf3e5c36486402 | /practicaFinalRedone/TouchGFX/generated/gui_generated/include/gui_generated/common/FrontendApplicationBase.hpp | cf63ad0e2dcd92688856a6ebcea9282f4a004ec4 | [] | no_license | Adrian-Rod-Mol/sistemasEmpotrados | c445c80d9490382149cde22dd80ded00faed5b53 | 9c3475e6a8e99a1186c87020318f9f43fd3adce5 | refs/heads/master | 2023-05-13T06:36:16.926220 | 2021-06-09T17:01:28 | 2021-06-09T17:01:28 | 374,237,389 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,184 | hpp | /*********************************************************************************/
/********** THIS FILE IS GENERATED BY TOUCHGFX DESIGNER, DO NOT MODIFY ***********/
/*********************************************************************************/
#ifndef FRONTENDAPPLICATIONBASE_HPP
#define FRONTENDAPPLICATIONBASE_HPP
#include <mvp/MVPApplication.hpp>
#include <gui/model/Model.hpp>
class FrontendHeap;
class FrontendApplicationBase : public touchgfx::MVPApplication
{
public:
FrontendApplicationBase(Model& m, FrontendHeap& heap);
virtual ~FrontendApplicationBase() { }
// MainScreen
void gotoMainScreenScreenNoTransition();
void gotoMainScreenScreenWipeTransitionEast();
// ConfScreen
void gotoConfScreenScreenWipeTransitionWest();
protected:
touchgfx::Callback<FrontendApplicationBase> transitionCallback;
FrontendHeap& frontendHeap;
Model& model;
// MainScreen
void gotoMainScreenScreenNoTransitionImpl();
void gotoMainScreenScreenWipeTransitionEastImpl();
// ConfScreen
void gotoConfScreenScreenWipeTransitionWestImpl();
};
#endif // FRONTENDAPPLICATIONBASE_HPP
| [
"adrian.rod.mol@gmail.com"
] | adrian.rod.mol@gmail.com |
9ee48f6304aaf1e28305bae593445a2126289807 | aa925e9df0eada356de766d656ad7ba95d818b7b | /algorithm/tree/KthSmallestInBST.cpp | 53d7745f17d4402aa52503a71163645b4505892d | [] | no_license | starboy520/starboy | 38bf51be81b5cc526a77e1ea16b1dab0b3e9f058 | ce386c0f5f4f8625e7030305f32194b75ca5ab1b | refs/heads/master | 2021-01-17T12:10:57.480551 | 2014-08-06T07:09:49 | 2014-08-06T07:09:49 | 554,437 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 885 | cpp | struct TreeNode {
TreeNode* left;
TreeNode* right;
int val;
};
TreeNode* findKthSmallest(TreeNode* root, int& val, int k) {
TreeNode* tmp = NULL;
if (root->left && val < k) {
tmp = findKthSmallest(root->left, val, k)
}
val++;
if (val == k) return tmp;
if (root->right && *n < k)
tmp = findKthSmallest(root->right, val, k);
return tmp;
}
// find a node's successor
TreeNode* inOrderSucessor(TreeNode* root, TreeNode* p) {
if (p->right) {
TreeNode* q = p->right;
while (q->left) q = q->left;
return q;
}
TreeNode* succ = NULL;
while (root != NULL) {
if (p->val < root->val) {
succ = root;
root = root->left;
} else if (n->val > root->val) root = root->right;
else break;
}
return succ;
}
| [
"starboy.qi@gmail.com"
] | starboy.qi@gmail.com |
adf1e8d9dc4bf42d38dee8fb8cf1e37ec0f3332b | 8c48a060ec96f5d4857d4e08c08f5a44d4163e6e | /tester/OverallTest/cpp/main.cpp | 5b09ad811e82a2c8f4ce0019a418838fe4ce21a6 | [] | no_license | ccf19881030/JQHttpServer | cd6e50eb74a361241d87e0dc33857ca580a51268 | 49508b53dea2c65808b2e82cef549441d268484c | refs/heads/master | 2022-12-08T09:37:10.363944 | 2020-09-05T03:53:14 | 2020-09-05T03:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 272 | cpp | // Qt lib import
#include <QCoreApplication>
#include <QtTest>
// Project lib import
#include "OverallTest.h"
int main(int argc, char *argv[])
{
QCoreApplication app( argc, argv );
OverallTest benchMark;
return QTest::qExec( &benchMark, argc, argv );
}
| [
"188080501@qq.com"
] | 188080501@qq.com |
faf80e17980949abd2b6d7cd85c2439262df41f6 | 3f96f4c7d8c32b662a4b913f5596c2c33953ab65 | /RealTimeClock.h | 2992a9bc69242cee8e24e062299519aa02a9c22c | [] | no_license | hannahellis4242/clock | 397dad52941e6d51747c47cab737a8a7e55447f2 | d8f450f246c7178812b7282ca308a8f48a261d18 | refs/heads/master | 2022-08-29T09:19:00.157778 | 2020-05-28T16:04:52 | 2020-05-28T16:04:52 | 255,748,417 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 763 | h | #ifndef REALTIMECLOCK_H
#define REALTIMECLOCK_H
#include "Array.h"
class RealTimeClock
{
private:
volatile uint8_t * out_port_ ;
const volatile uint8_t * in_port_ ;
uint8_t ce_mask_high_ ;
uint8_t ce_mask_low_ ;
public:
RealTimeClock(volatile uint8_t * out_port , const volatile uint8_t * in_port , const uint8_t ce_pin ) ;
Array< uint8_t , 7 > read() const;
void write( const Array< uint8_t , 7 > & data ) const;
uint8_t getControlRegister()const ;
uint8_t getSecondsByte() const;
uint8_t getMinutesByte() const;
uint8_t getHoursByte() const;
uint8_t getDayByte() const;
uint8_t getDateByte() const;
uint8_t getMonthByte() const;
uint8_t getYearByte() const;
void setControlRegister( const uint8_t value )const;
};
#endif
| [
"hannah.ellis@pulsic.com"
] | hannah.ellis@pulsic.com |
cfe6ea69b76a06517c71b274721dafb4b8459b43 | c776476e9d06b3779d744641e758ac3a2c15cddc | /examples/litmus/c/run-scripts/tmp_1/ISA2+dmb.sy+po+ctrl.c.cbmc_out.cpp | 1b8de5e51e7f2305d9a294d4e8bd3b587c0671d9 | [] | no_license | ashutosh0gupta/llvm_bmc | aaac7961c723ba6f7ffd77a39559e0e52432eade | 0287c4fb180244e6b3c599a9902507f05c8a7234 | refs/heads/master | 2023-08-02T17:14:06.178723 | 2023-07-31T10:46:53 | 2023-07-31T10:46:53 | 143,100,825 | 3 | 4 | null | 2023-05-25T05:50:55 | 2018-08-01T03:47:00 | C++ | UTF-8 | C++ | false | false | 43,657 | cpp | // 0:vars:3
// 8:thr2:1
// 3:atom_1_X0_1:1
// 4:atom_2_X0_1:1
// 5:atom_2_X2_0:1
// 6:thr0:1
// 7:thr1:1
#define ADDRSIZE 9
#define NPROC 4
#define NCONTEXT 1
#define ASSUME(stmt) __CPROVER_assume(stmt)
#define ASSERT(stmt) __CPROVER_assert(stmt, "error")
#define max(a,b) (a>b?a:b)
char __get_rng();
char get_rng( char from, char to ) {
char ret = __get_rng();
ASSUME(ret >= from && ret <= to);
return ret;
}
char get_rng_th( char from, char to ) {
char ret = __get_rng();
ASSUME(ret >= from && ret <= to);
return ret;
}
int main(int argc, char **argv) {
// declare arrays for intial value version in contexts
int meminit_[ADDRSIZE*NCONTEXT];
#define meminit(x,k) meminit_[(x)*NCONTEXT+k]
int coinit_[ADDRSIZE*NCONTEXT];
#define coinit(x,k) coinit_[(x)*NCONTEXT+k]
int deltainit_[ADDRSIZE*NCONTEXT];
#define deltainit(x,k) deltainit_[(x)*NCONTEXT+k]
// declare arrays for running value version in contexts
int mem_[ADDRSIZE*NCONTEXT];
#define mem(x,k) mem_[(x)*NCONTEXT+k]
int co_[ADDRSIZE*NCONTEXT];
#define co(x,k) co_[(x)*NCONTEXT+k]
int delta_[ADDRSIZE*NCONTEXT];
#define delta(x,k) delta_[(x)*NCONTEXT+k]
// declare arrays for local buffer and observed writes
int buff_[NPROC*ADDRSIZE];
#define buff(x,k) buff_[(x)*ADDRSIZE+k]
int pw_[NPROC*ADDRSIZE];
#define pw(x,k) pw_[(x)*ADDRSIZE+k]
// declare arrays for context stamps
char cr_[NPROC*ADDRSIZE];
#define cr(x,k) cr_[(x)*ADDRSIZE+k]
char iw_[NPROC*ADDRSIZE];
#define iw(x,k) iw_[(x)*ADDRSIZE+k]
char cw_[NPROC*ADDRSIZE];
#define cw(x,k) cw_[(x)*ADDRSIZE+k]
char cx_[NPROC*ADDRSIZE];
#define cx(x,k) cx_[(x)*ADDRSIZE+k]
char is_[NPROC*ADDRSIZE];
#define is(x,k) is_[(x)*ADDRSIZE+k]
char cs_[NPROC*ADDRSIZE];
#define cs(x,k) cs_[(x)*ADDRSIZE+k]
char crmax_[NPROC*ADDRSIZE];
#define crmax(x,k) crmax_[(x)*ADDRSIZE+k]
char sforbid_[ADDRSIZE*NCONTEXT];
#define sforbid(x,k) sforbid_[(x)*NCONTEXT+k]
// declare arrays for synchronizations
int cl[NPROC];
int cdy[NPROC];
int cds[NPROC];
int cdl[NPROC];
int cisb[NPROC];
int caddr[NPROC];
int cctrl[NPROC];
int cstart[NPROC];
int creturn[NPROC];
// declare arrays for contexts activity
int active[NCONTEXT];
int ctx_used[NCONTEXT];
int r0= 0;
char creg_r0;
int r1= 0;
char creg_r1;
int r2= 0;
char creg_r2;
int r3= 0;
char creg_r3;
int r4= 0;
char creg_r4;
int r5= 0;
char creg_r5;
int r6= 0;
char creg_r6;
int r7= 0;
char creg_r7;
int r8= 0;
char creg_r8;
int r9= 0;
char creg_r9;
int r10= 0;
char creg_r10;
int r11= 0;
char creg_r11;
int r12= 0;
char creg_r12;
char old_cctrl= 0;
char old_cr= 0;
char old_cdy= 0;
char old_cw= 0;
char new_creg= 0;
buff(0,0) = 0;
pw(0,0) = 0;
cr(0,0) = 0;
iw(0,0) = 0;
cw(0,0) = 0;
cx(0,0) = 0;
is(0,0) = 0;
cs(0,0) = 0;
crmax(0,0) = 0;
buff(0,1) = 0;
pw(0,1) = 0;
cr(0,1) = 0;
iw(0,1) = 0;
cw(0,1) = 0;
cx(0,1) = 0;
is(0,1) = 0;
cs(0,1) = 0;
crmax(0,1) = 0;
buff(0,2) = 0;
pw(0,2) = 0;
cr(0,2) = 0;
iw(0,2) = 0;
cw(0,2) = 0;
cx(0,2) = 0;
is(0,2) = 0;
cs(0,2) = 0;
crmax(0,2) = 0;
buff(0,3) = 0;
pw(0,3) = 0;
cr(0,3) = 0;
iw(0,3) = 0;
cw(0,3) = 0;
cx(0,3) = 0;
is(0,3) = 0;
cs(0,3) = 0;
crmax(0,3) = 0;
buff(0,4) = 0;
pw(0,4) = 0;
cr(0,4) = 0;
iw(0,4) = 0;
cw(0,4) = 0;
cx(0,4) = 0;
is(0,4) = 0;
cs(0,4) = 0;
crmax(0,4) = 0;
buff(0,5) = 0;
pw(0,5) = 0;
cr(0,5) = 0;
iw(0,5) = 0;
cw(0,5) = 0;
cx(0,5) = 0;
is(0,5) = 0;
cs(0,5) = 0;
crmax(0,5) = 0;
buff(0,6) = 0;
pw(0,6) = 0;
cr(0,6) = 0;
iw(0,6) = 0;
cw(0,6) = 0;
cx(0,6) = 0;
is(0,6) = 0;
cs(0,6) = 0;
crmax(0,6) = 0;
buff(0,7) = 0;
pw(0,7) = 0;
cr(0,7) = 0;
iw(0,7) = 0;
cw(0,7) = 0;
cx(0,7) = 0;
is(0,7) = 0;
cs(0,7) = 0;
crmax(0,7) = 0;
buff(0,8) = 0;
pw(0,8) = 0;
cr(0,8) = 0;
iw(0,8) = 0;
cw(0,8) = 0;
cx(0,8) = 0;
is(0,8) = 0;
cs(0,8) = 0;
crmax(0,8) = 0;
cl[0] = 0;
cdy[0] = 0;
cds[0] = 0;
cdl[0] = 0;
cisb[0] = 0;
caddr[0] = 0;
cctrl[0] = 0;
cstart[0] = get_rng(0,NCONTEXT-1);
creturn[0] = get_rng(0,NCONTEXT-1);
buff(1,0) = 0;
pw(1,0) = 0;
cr(1,0) = 0;
iw(1,0) = 0;
cw(1,0) = 0;
cx(1,0) = 0;
is(1,0) = 0;
cs(1,0) = 0;
crmax(1,0) = 0;
buff(1,1) = 0;
pw(1,1) = 0;
cr(1,1) = 0;
iw(1,1) = 0;
cw(1,1) = 0;
cx(1,1) = 0;
is(1,1) = 0;
cs(1,1) = 0;
crmax(1,1) = 0;
buff(1,2) = 0;
pw(1,2) = 0;
cr(1,2) = 0;
iw(1,2) = 0;
cw(1,2) = 0;
cx(1,2) = 0;
is(1,2) = 0;
cs(1,2) = 0;
crmax(1,2) = 0;
buff(1,3) = 0;
pw(1,3) = 0;
cr(1,3) = 0;
iw(1,3) = 0;
cw(1,3) = 0;
cx(1,3) = 0;
is(1,3) = 0;
cs(1,3) = 0;
crmax(1,3) = 0;
buff(1,4) = 0;
pw(1,4) = 0;
cr(1,4) = 0;
iw(1,4) = 0;
cw(1,4) = 0;
cx(1,4) = 0;
is(1,4) = 0;
cs(1,4) = 0;
crmax(1,4) = 0;
buff(1,5) = 0;
pw(1,5) = 0;
cr(1,5) = 0;
iw(1,5) = 0;
cw(1,5) = 0;
cx(1,5) = 0;
is(1,5) = 0;
cs(1,5) = 0;
crmax(1,5) = 0;
buff(1,6) = 0;
pw(1,6) = 0;
cr(1,6) = 0;
iw(1,6) = 0;
cw(1,6) = 0;
cx(1,6) = 0;
is(1,6) = 0;
cs(1,6) = 0;
crmax(1,6) = 0;
buff(1,7) = 0;
pw(1,7) = 0;
cr(1,7) = 0;
iw(1,7) = 0;
cw(1,7) = 0;
cx(1,7) = 0;
is(1,7) = 0;
cs(1,7) = 0;
crmax(1,7) = 0;
buff(1,8) = 0;
pw(1,8) = 0;
cr(1,8) = 0;
iw(1,8) = 0;
cw(1,8) = 0;
cx(1,8) = 0;
is(1,8) = 0;
cs(1,8) = 0;
crmax(1,8) = 0;
cl[1] = 0;
cdy[1] = 0;
cds[1] = 0;
cdl[1] = 0;
cisb[1] = 0;
caddr[1] = 0;
cctrl[1] = 0;
cstart[1] = get_rng(0,NCONTEXT-1);
creturn[1] = get_rng(0,NCONTEXT-1);
buff(2,0) = 0;
pw(2,0) = 0;
cr(2,0) = 0;
iw(2,0) = 0;
cw(2,0) = 0;
cx(2,0) = 0;
is(2,0) = 0;
cs(2,0) = 0;
crmax(2,0) = 0;
buff(2,1) = 0;
pw(2,1) = 0;
cr(2,1) = 0;
iw(2,1) = 0;
cw(2,1) = 0;
cx(2,1) = 0;
is(2,1) = 0;
cs(2,1) = 0;
crmax(2,1) = 0;
buff(2,2) = 0;
pw(2,2) = 0;
cr(2,2) = 0;
iw(2,2) = 0;
cw(2,2) = 0;
cx(2,2) = 0;
is(2,2) = 0;
cs(2,2) = 0;
crmax(2,2) = 0;
buff(2,3) = 0;
pw(2,3) = 0;
cr(2,3) = 0;
iw(2,3) = 0;
cw(2,3) = 0;
cx(2,3) = 0;
is(2,3) = 0;
cs(2,3) = 0;
crmax(2,3) = 0;
buff(2,4) = 0;
pw(2,4) = 0;
cr(2,4) = 0;
iw(2,4) = 0;
cw(2,4) = 0;
cx(2,4) = 0;
is(2,4) = 0;
cs(2,4) = 0;
crmax(2,4) = 0;
buff(2,5) = 0;
pw(2,5) = 0;
cr(2,5) = 0;
iw(2,5) = 0;
cw(2,5) = 0;
cx(2,5) = 0;
is(2,5) = 0;
cs(2,5) = 0;
crmax(2,5) = 0;
buff(2,6) = 0;
pw(2,6) = 0;
cr(2,6) = 0;
iw(2,6) = 0;
cw(2,6) = 0;
cx(2,6) = 0;
is(2,6) = 0;
cs(2,6) = 0;
crmax(2,6) = 0;
buff(2,7) = 0;
pw(2,7) = 0;
cr(2,7) = 0;
iw(2,7) = 0;
cw(2,7) = 0;
cx(2,7) = 0;
is(2,7) = 0;
cs(2,7) = 0;
crmax(2,7) = 0;
buff(2,8) = 0;
pw(2,8) = 0;
cr(2,8) = 0;
iw(2,8) = 0;
cw(2,8) = 0;
cx(2,8) = 0;
is(2,8) = 0;
cs(2,8) = 0;
crmax(2,8) = 0;
cl[2] = 0;
cdy[2] = 0;
cds[2] = 0;
cdl[2] = 0;
cisb[2] = 0;
caddr[2] = 0;
cctrl[2] = 0;
cstart[2] = get_rng(0,NCONTEXT-1);
creturn[2] = get_rng(0,NCONTEXT-1);
buff(3,0) = 0;
pw(3,0) = 0;
cr(3,0) = 0;
iw(3,0) = 0;
cw(3,0) = 0;
cx(3,0) = 0;
is(3,0) = 0;
cs(3,0) = 0;
crmax(3,0) = 0;
buff(3,1) = 0;
pw(3,1) = 0;
cr(3,1) = 0;
iw(3,1) = 0;
cw(3,1) = 0;
cx(3,1) = 0;
is(3,1) = 0;
cs(3,1) = 0;
crmax(3,1) = 0;
buff(3,2) = 0;
pw(3,2) = 0;
cr(3,2) = 0;
iw(3,2) = 0;
cw(3,2) = 0;
cx(3,2) = 0;
is(3,2) = 0;
cs(3,2) = 0;
crmax(3,2) = 0;
buff(3,3) = 0;
pw(3,3) = 0;
cr(3,3) = 0;
iw(3,3) = 0;
cw(3,3) = 0;
cx(3,3) = 0;
is(3,3) = 0;
cs(3,3) = 0;
crmax(3,3) = 0;
buff(3,4) = 0;
pw(3,4) = 0;
cr(3,4) = 0;
iw(3,4) = 0;
cw(3,4) = 0;
cx(3,4) = 0;
is(3,4) = 0;
cs(3,4) = 0;
crmax(3,4) = 0;
buff(3,5) = 0;
pw(3,5) = 0;
cr(3,5) = 0;
iw(3,5) = 0;
cw(3,5) = 0;
cx(3,5) = 0;
is(3,5) = 0;
cs(3,5) = 0;
crmax(3,5) = 0;
buff(3,6) = 0;
pw(3,6) = 0;
cr(3,6) = 0;
iw(3,6) = 0;
cw(3,6) = 0;
cx(3,6) = 0;
is(3,6) = 0;
cs(3,6) = 0;
crmax(3,6) = 0;
buff(3,7) = 0;
pw(3,7) = 0;
cr(3,7) = 0;
iw(3,7) = 0;
cw(3,7) = 0;
cx(3,7) = 0;
is(3,7) = 0;
cs(3,7) = 0;
crmax(3,7) = 0;
buff(3,8) = 0;
pw(3,8) = 0;
cr(3,8) = 0;
iw(3,8) = 0;
cw(3,8) = 0;
cx(3,8) = 0;
is(3,8) = 0;
cs(3,8) = 0;
crmax(3,8) = 0;
cl[3] = 0;
cdy[3] = 0;
cds[3] = 0;
cdl[3] = 0;
cisb[3] = 0;
caddr[3] = 0;
cctrl[3] = 0;
cstart[3] = get_rng(0,NCONTEXT-1);
creturn[3] = get_rng(0,NCONTEXT-1);
// Dumping initializations
mem(0+0,0) = 0;
mem(0+1,0) = 0;
mem(0+2,0) = 0;
mem(8+0,0) = 0;
mem(3+0,0) = 0;
mem(4+0,0) = 0;
mem(5+0,0) = 0;
mem(6+0,0) = 0;
mem(7+0,0) = 0;
// Dumping context matching equalities
co(0,0) = 0;
delta(0,0) = -1;
co(1,0) = 0;
delta(1,0) = -1;
co(2,0) = 0;
delta(2,0) = -1;
co(3,0) = 0;
delta(3,0) = -1;
co(4,0) = 0;
delta(4,0) = -1;
co(5,0) = 0;
delta(5,0) = -1;
co(6,0) = 0;
delta(6,0) = -1;
co(7,0) = 0;
delta(7,0) = -1;
co(8,0) = 0;
delta(8,0) = -1;
// Dumping thread 1
int ret_thread_1 = 0;
cdy[1] = get_rng(0,NCONTEXT-1);
ASSUME(cdy[1] >= cstart[1]);
T1BLOCK0:
// call void @llvm.dbg.value(metadata i8* %arg, metadata !37, metadata !DIExpression()), !dbg !46
// br label %label_1, !dbg !47
goto T1BLOCK1;
T1BLOCK1:
// call void @llvm.dbg.label(metadata !45), !dbg !48
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0), metadata !38, metadata !DIExpression()), !dbg !49
// call void @llvm.dbg.value(metadata i64 1, metadata !41, metadata !DIExpression()), !dbg !49
// store atomic i64 1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0) monotonic, align 8, !dbg !50
// ST: Guess
iw(1,0) = get_rng(0,NCONTEXT-1);// 1 ASSIGN STIW
old_cw = cw(1,0);
cw(1,0) = get_rng(0,NCONTEXT-1);// 1 ASSIGN STCOM
// Check
ASSUME(active[iw(1,0)] == 1);
ASSUME(active[cw(1,0)] == 1);
ASSUME(sforbid(0,cw(1,0))== 0);
ASSUME(iw(1,0) >= 0);
ASSUME(iw(1,0) >= 0);
ASSUME(cw(1,0) >= iw(1,0));
ASSUME(cw(1,0) >= old_cw);
ASSUME(cw(1,0) >= cr(1,0));
ASSUME(cw(1,0) >= cl[1]);
ASSUME(cw(1,0) >= cisb[1]);
ASSUME(cw(1,0) >= cdy[1]);
ASSUME(cw(1,0) >= cdl[1]);
ASSUME(cw(1,0) >= cds[1]);
ASSUME(cw(1,0) >= cctrl[1]);
ASSUME(cw(1,0) >= caddr[1]);
// Update
caddr[1] = max(caddr[1],0);
buff(1,0) = 1;
mem(0,cw(1,0)) = 1;
co(0,cw(1,0))+=1;
delta(0,cw(1,0)) = -1;
ASSUME(creturn[1] >= cw(1,0));
// call void (...) @dmbsy(), !dbg !51
// dumbsy: Guess
old_cdy = cdy[1];
cdy[1] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[1] >= old_cdy);
ASSUME(cdy[1] >= cisb[1]);
ASSUME(cdy[1] >= cdl[1]);
ASSUME(cdy[1] >= cds[1]);
ASSUME(cdy[1] >= cctrl[1]);
ASSUME(cdy[1] >= cw(1,0+0));
ASSUME(cdy[1] >= cw(1,0+1));
ASSUME(cdy[1] >= cw(1,0+2));
ASSUME(cdy[1] >= cw(1,8+0));
ASSUME(cdy[1] >= cw(1,3+0));
ASSUME(cdy[1] >= cw(1,4+0));
ASSUME(cdy[1] >= cw(1,5+0));
ASSUME(cdy[1] >= cw(1,6+0));
ASSUME(cdy[1] >= cw(1,7+0));
ASSUME(cdy[1] >= cr(1,0+0));
ASSUME(cdy[1] >= cr(1,0+1));
ASSUME(cdy[1] >= cr(1,0+2));
ASSUME(cdy[1] >= cr(1,8+0));
ASSUME(cdy[1] >= cr(1,3+0));
ASSUME(cdy[1] >= cr(1,4+0));
ASSUME(cdy[1] >= cr(1,5+0));
ASSUME(cdy[1] >= cr(1,6+0));
ASSUME(cdy[1] >= cr(1,7+0));
ASSUME(creturn[1] >= cdy[1]);
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1), metadata !42, metadata !DIExpression()), !dbg !52
// call void @llvm.dbg.value(metadata i64 1, metadata !44, metadata !DIExpression()), !dbg !52
// store atomic i64 1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1) monotonic, align 8, !dbg !53
// ST: Guess
iw(1,0+1*1) = get_rng(0,NCONTEXT-1);// 1 ASSIGN STIW
old_cw = cw(1,0+1*1);
cw(1,0+1*1) = get_rng(0,NCONTEXT-1);// 1 ASSIGN STCOM
// Check
ASSUME(active[iw(1,0+1*1)] == 1);
ASSUME(active[cw(1,0+1*1)] == 1);
ASSUME(sforbid(0+1*1,cw(1,0+1*1))== 0);
ASSUME(iw(1,0+1*1) >= 0);
ASSUME(iw(1,0+1*1) >= 0);
ASSUME(cw(1,0+1*1) >= iw(1,0+1*1));
ASSUME(cw(1,0+1*1) >= old_cw);
ASSUME(cw(1,0+1*1) >= cr(1,0+1*1));
ASSUME(cw(1,0+1*1) >= cl[1]);
ASSUME(cw(1,0+1*1) >= cisb[1]);
ASSUME(cw(1,0+1*1) >= cdy[1]);
ASSUME(cw(1,0+1*1) >= cdl[1]);
ASSUME(cw(1,0+1*1) >= cds[1]);
ASSUME(cw(1,0+1*1) >= cctrl[1]);
ASSUME(cw(1,0+1*1) >= caddr[1]);
// Update
caddr[1] = max(caddr[1],0);
buff(1,0+1*1) = 1;
mem(0+1*1,cw(1,0+1*1)) = 1;
co(0+1*1,cw(1,0+1*1))+=1;
delta(0+1*1,cw(1,0+1*1)) = -1;
ASSUME(creturn[1] >= cw(1,0+1*1));
// ret i8* null, !dbg !54
ret_thread_1 = (- 1);
// Dumping thread 2
int ret_thread_2 = 0;
cdy[2] = get_rng(0,NCONTEXT-1);
ASSUME(cdy[2] >= cstart[2]);
T2BLOCK0:
// call void @llvm.dbg.value(metadata i8* %arg, metadata !57, metadata !DIExpression()), !dbg !71
// br label %label_2, !dbg !53
goto T2BLOCK1;
T2BLOCK1:
// call void @llvm.dbg.label(metadata !70), !dbg !73
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1), metadata !60, metadata !DIExpression()), !dbg !74
// %0 = load atomic i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1) monotonic, align 8, !dbg !56
// LD: Guess
old_cr = cr(2,0+1*1);
cr(2,0+1*1) = get_rng(0,NCONTEXT-1);// 2 ASSIGN LDCOM
// Check
ASSUME(active[cr(2,0+1*1)] == 2);
ASSUME(cr(2,0+1*1) >= iw(2,0+1*1));
ASSUME(cr(2,0+1*1) >= 0);
ASSUME(cr(2,0+1*1) >= cdy[2]);
ASSUME(cr(2,0+1*1) >= cisb[2]);
ASSUME(cr(2,0+1*1) >= cdl[2]);
ASSUME(cr(2,0+1*1) >= cl[2]);
// Update
creg_r0 = cr(2,0+1*1);
crmax(2,0+1*1) = max(crmax(2,0+1*1),cr(2,0+1*1));
caddr[2] = max(caddr[2],0);
if(cr(2,0+1*1) < cw(2,0+1*1)) {
r0 = buff(2,0+1*1);
} else {
if(pw(2,0+1*1) != co(0+1*1,cr(2,0+1*1))) {
ASSUME(cr(2,0+1*1) >= old_cr);
}
pw(2,0+1*1) = co(0+1*1,cr(2,0+1*1));
r0 = mem(0+1*1,cr(2,0+1*1));
}
ASSUME(creturn[2] >= cr(2,0+1*1));
// call void @llvm.dbg.value(metadata i64 %0, metadata !62, metadata !DIExpression()), !dbg !74
// %conv = trunc i64 %0 to i32, !dbg !57
// call void @llvm.dbg.value(metadata i32 %conv, metadata !58, metadata !DIExpression()), !dbg !71
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2), metadata !63, metadata !DIExpression()), !dbg !77
// call void @llvm.dbg.value(metadata i64 1, metadata !65, metadata !DIExpression()), !dbg !77
// store atomic i64 1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2) monotonic, align 8, !dbg !59
// ST: Guess
iw(2,0+2*1) = get_rng(0,NCONTEXT-1);// 2 ASSIGN STIW
old_cw = cw(2,0+2*1);
cw(2,0+2*1) = get_rng(0,NCONTEXT-1);// 2 ASSIGN STCOM
// Check
ASSUME(active[iw(2,0+2*1)] == 2);
ASSUME(active[cw(2,0+2*1)] == 2);
ASSUME(sforbid(0+2*1,cw(2,0+2*1))== 0);
ASSUME(iw(2,0+2*1) >= 0);
ASSUME(iw(2,0+2*1) >= 0);
ASSUME(cw(2,0+2*1) >= iw(2,0+2*1));
ASSUME(cw(2,0+2*1) >= old_cw);
ASSUME(cw(2,0+2*1) >= cr(2,0+2*1));
ASSUME(cw(2,0+2*1) >= cl[2]);
ASSUME(cw(2,0+2*1) >= cisb[2]);
ASSUME(cw(2,0+2*1) >= cdy[2]);
ASSUME(cw(2,0+2*1) >= cdl[2]);
ASSUME(cw(2,0+2*1) >= cds[2]);
ASSUME(cw(2,0+2*1) >= cctrl[2]);
ASSUME(cw(2,0+2*1) >= caddr[2]);
// Update
caddr[2] = max(caddr[2],0);
buff(2,0+2*1) = 1;
mem(0+2*1,cw(2,0+2*1)) = 1;
co(0+2*1,cw(2,0+2*1))+=1;
delta(0+2*1,cw(2,0+2*1)) = -1;
ASSUME(creturn[2] >= cw(2,0+2*1));
// %cmp = icmp eq i32 %conv, 1, !dbg !60
// %conv1 = zext i1 %cmp to i32, !dbg !60
// call void @llvm.dbg.value(metadata i32 %conv1, metadata !66, metadata !DIExpression()), !dbg !71
// call void @llvm.dbg.value(metadata i64* @atom_1_X0_1, metadata !67, metadata !DIExpression()), !dbg !80
// %1 = zext i32 %conv1 to i64
// call void @llvm.dbg.value(metadata i64 %1, metadata !69, metadata !DIExpression()), !dbg !80
// store atomic i64 %1, i64* @atom_1_X0_1 seq_cst, align 8, !dbg !62
// ST: Guess
iw(2,3) = get_rng(0,NCONTEXT-1);// 2 ASSIGN STIW
old_cw = cw(2,3);
cw(2,3) = get_rng(0,NCONTEXT-1);// 2 ASSIGN STCOM
// Check
ASSUME(active[iw(2,3)] == 2);
ASSUME(active[cw(2,3)] == 2);
ASSUME(sforbid(3,cw(2,3))== 0);
ASSUME(iw(2,3) >= max(creg_r0,0));
ASSUME(iw(2,3) >= 0);
ASSUME(cw(2,3) >= iw(2,3));
ASSUME(cw(2,3) >= old_cw);
ASSUME(cw(2,3) >= cr(2,3));
ASSUME(cw(2,3) >= cl[2]);
ASSUME(cw(2,3) >= cisb[2]);
ASSUME(cw(2,3) >= cdy[2]);
ASSUME(cw(2,3) >= cdl[2]);
ASSUME(cw(2,3) >= cds[2]);
ASSUME(cw(2,3) >= cctrl[2]);
ASSUME(cw(2,3) >= caddr[2]);
// Update
caddr[2] = max(caddr[2],0);
buff(2,3) = (r0==1);
mem(3,cw(2,3)) = (r0==1);
co(3,cw(2,3))+=1;
delta(3,cw(2,3)) = -1;
ASSUME(creturn[2] >= cw(2,3));
// ret i8* null, !dbg !63
ret_thread_2 = (- 1);
// Dumping thread 3
int ret_thread_3 = 0;
cdy[3] = get_rng(0,NCONTEXT-1);
ASSUME(cdy[3] >= cstart[3]);
T3BLOCK0:
// call void @llvm.dbg.value(metadata i8* %arg, metadata !85, metadata !DIExpression()), !dbg !104
// br label %label_3, !dbg !59
goto T3BLOCK1;
T3BLOCK1:
// call void @llvm.dbg.label(metadata !102), !dbg !106
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2), metadata !87, metadata !DIExpression()), !dbg !107
// %0 = load atomic i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2) monotonic, align 8, !dbg !62
// LD: Guess
old_cr = cr(3,0+2*1);
cr(3,0+2*1) = get_rng(0,NCONTEXT-1);// 3 ASSIGN LDCOM
// Check
ASSUME(active[cr(3,0+2*1)] == 3);
ASSUME(cr(3,0+2*1) >= iw(3,0+2*1));
ASSUME(cr(3,0+2*1) >= 0);
ASSUME(cr(3,0+2*1) >= cdy[3]);
ASSUME(cr(3,0+2*1) >= cisb[3]);
ASSUME(cr(3,0+2*1) >= cdl[3]);
ASSUME(cr(3,0+2*1) >= cl[3]);
// Update
creg_r1 = cr(3,0+2*1);
crmax(3,0+2*1) = max(crmax(3,0+2*1),cr(3,0+2*1));
caddr[3] = max(caddr[3],0);
if(cr(3,0+2*1) < cw(3,0+2*1)) {
r1 = buff(3,0+2*1);
} else {
if(pw(3,0+2*1) != co(0+2*1,cr(3,0+2*1))) {
ASSUME(cr(3,0+2*1) >= old_cr);
}
pw(3,0+2*1) = co(0+2*1,cr(3,0+2*1));
r1 = mem(0+2*1,cr(3,0+2*1));
}
ASSUME(creturn[3] >= cr(3,0+2*1));
// call void @llvm.dbg.value(metadata i64 %0, metadata !89, metadata !DIExpression()), !dbg !107
// %conv = trunc i64 %0 to i32, !dbg !63
// call void @llvm.dbg.value(metadata i32 %conv, metadata !86, metadata !DIExpression()), !dbg !104
// %tobool = icmp ne i32 %conv, 0, !dbg !64
// br i1 %tobool, label %if.then, label %if.else, !dbg !66
old_cctrl = cctrl[3];
cctrl[3] = get_rng(0,NCONTEXT-1);
ASSUME(cctrl[3] >= old_cctrl);
ASSUME(cctrl[3] >= creg_r1);
ASSUME(cctrl[3] >= 0);
if((r1!=0)) {
goto T3BLOCK2;
} else {
goto T3BLOCK3;
}
T3BLOCK2:
// br label %lbl_LC00, !dbg !67
goto T3BLOCK4;
T3BLOCK3:
// br label %lbl_LC00, !dbg !68
goto T3BLOCK4;
T3BLOCK4:
// call void @llvm.dbg.label(metadata !103), !dbg !115
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0), metadata !91, metadata !DIExpression()), !dbg !116
// %1 = load atomic i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0) monotonic, align 8, !dbg !71
// LD: Guess
old_cr = cr(3,0);
cr(3,0) = get_rng(0,NCONTEXT-1);// 3 ASSIGN LDCOM
// Check
ASSUME(active[cr(3,0)] == 3);
ASSUME(cr(3,0) >= iw(3,0));
ASSUME(cr(3,0) >= 0);
ASSUME(cr(3,0) >= cdy[3]);
ASSUME(cr(3,0) >= cisb[3]);
ASSUME(cr(3,0) >= cdl[3]);
ASSUME(cr(3,0) >= cl[3]);
// Update
creg_r2 = cr(3,0);
crmax(3,0) = max(crmax(3,0),cr(3,0));
caddr[3] = max(caddr[3],0);
if(cr(3,0) < cw(3,0)) {
r2 = buff(3,0);
} else {
if(pw(3,0) != co(0,cr(3,0))) {
ASSUME(cr(3,0) >= old_cr);
}
pw(3,0) = co(0,cr(3,0));
r2 = mem(0,cr(3,0));
}
ASSUME(creturn[3] >= cr(3,0));
// call void @llvm.dbg.value(metadata i64 %1, metadata !93, metadata !DIExpression()), !dbg !116
// %conv4 = trunc i64 %1 to i32, !dbg !72
// call void @llvm.dbg.value(metadata i32 %conv4, metadata !90, metadata !DIExpression()), !dbg !104
// %cmp = icmp eq i32 %conv, 1, !dbg !73
// %conv5 = zext i1 %cmp to i32, !dbg !73
// call void @llvm.dbg.value(metadata i32 %conv5, metadata !94, metadata !DIExpression()), !dbg !104
// call void @llvm.dbg.value(metadata i64* @atom_2_X0_1, metadata !95, metadata !DIExpression()), !dbg !120
// %2 = zext i32 %conv5 to i64
// call void @llvm.dbg.value(metadata i64 %2, metadata !97, metadata !DIExpression()), !dbg !120
// store atomic i64 %2, i64* @atom_2_X0_1 seq_cst, align 8, !dbg !75
// ST: Guess
iw(3,4) = get_rng(0,NCONTEXT-1);// 3 ASSIGN STIW
old_cw = cw(3,4);
cw(3,4) = get_rng(0,NCONTEXT-1);// 3 ASSIGN STCOM
// Check
ASSUME(active[iw(3,4)] == 3);
ASSUME(active[cw(3,4)] == 3);
ASSUME(sforbid(4,cw(3,4))== 0);
ASSUME(iw(3,4) >= max(creg_r1,0));
ASSUME(iw(3,4) >= 0);
ASSUME(cw(3,4) >= iw(3,4));
ASSUME(cw(3,4) >= old_cw);
ASSUME(cw(3,4) >= cr(3,4));
ASSUME(cw(3,4) >= cl[3]);
ASSUME(cw(3,4) >= cisb[3]);
ASSUME(cw(3,4) >= cdy[3]);
ASSUME(cw(3,4) >= cdl[3]);
ASSUME(cw(3,4) >= cds[3]);
ASSUME(cw(3,4) >= cctrl[3]);
ASSUME(cw(3,4) >= caddr[3]);
// Update
caddr[3] = max(caddr[3],0);
buff(3,4) = (r1==1);
mem(4,cw(3,4)) = (r1==1);
co(4,cw(3,4))+=1;
delta(4,cw(3,4)) = -1;
ASSUME(creturn[3] >= cw(3,4));
// %cmp7 = icmp eq i32 %conv4, 0, !dbg !76
// %conv8 = zext i1 %cmp7 to i32, !dbg !76
// call void @llvm.dbg.value(metadata i32 %conv8, metadata !98, metadata !DIExpression()), !dbg !104
// call void @llvm.dbg.value(metadata i64* @atom_2_X2_0, metadata !99, metadata !DIExpression()), !dbg !123
// %3 = zext i32 %conv8 to i64
// call void @llvm.dbg.value(metadata i64 %3, metadata !101, metadata !DIExpression()), !dbg !123
// store atomic i64 %3, i64* @atom_2_X2_0 seq_cst, align 8, !dbg !78
// ST: Guess
iw(3,5) = get_rng(0,NCONTEXT-1);// 3 ASSIGN STIW
old_cw = cw(3,5);
cw(3,5) = get_rng(0,NCONTEXT-1);// 3 ASSIGN STCOM
// Check
ASSUME(active[iw(3,5)] == 3);
ASSUME(active[cw(3,5)] == 3);
ASSUME(sforbid(5,cw(3,5))== 0);
ASSUME(iw(3,5) >= max(creg_r2,0));
ASSUME(iw(3,5) >= 0);
ASSUME(cw(3,5) >= iw(3,5));
ASSUME(cw(3,5) >= old_cw);
ASSUME(cw(3,5) >= cr(3,5));
ASSUME(cw(3,5) >= cl[3]);
ASSUME(cw(3,5) >= cisb[3]);
ASSUME(cw(3,5) >= cdy[3]);
ASSUME(cw(3,5) >= cdl[3]);
ASSUME(cw(3,5) >= cds[3]);
ASSUME(cw(3,5) >= cctrl[3]);
ASSUME(cw(3,5) >= caddr[3]);
// Update
caddr[3] = max(caddr[3],0);
buff(3,5) = (r2==0);
mem(5,cw(3,5)) = (r2==0);
co(5,cw(3,5))+=1;
delta(5,cw(3,5)) = -1;
ASSUME(creturn[3] >= cw(3,5));
// ret i8* null, !dbg !79
ret_thread_3 = (- 1);
// Dumping thread 0
int ret_thread_0 = 0;
cdy[0] = get_rng(0,NCONTEXT-1);
ASSUME(cdy[0] >= cstart[0]);
T0BLOCK0:
// %thr0 = alloca i64, align 8
// %thr1 = alloca i64, align 8
// %thr2 = alloca i64, align 8
// call void @llvm.dbg.value(metadata i32 %argc, metadata !133, metadata !DIExpression()), !dbg !173
// call void @llvm.dbg.value(metadata i8** %argv, metadata !134, metadata !DIExpression()), !dbg !173
// %0 = bitcast i64* %thr0 to i8*, !dbg !83
// call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) #7, !dbg !83
// call void @llvm.dbg.declare(metadata i64* %thr0, metadata !135, metadata !DIExpression()), !dbg !175
// %1 = bitcast i64* %thr1 to i8*, !dbg !85
// call void @llvm.lifetime.start.p0i8(i64 8, i8* %1) #7, !dbg !85
// call void @llvm.dbg.declare(metadata i64* %thr1, metadata !139, metadata !DIExpression()), !dbg !177
// %2 = bitcast i64* %thr2 to i8*, !dbg !87
// call void @llvm.lifetime.start.p0i8(i64 8, i8* %2) #7, !dbg !87
// call void @llvm.dbg.declare(metadata i64* %thr2, metadata !140, metadata !DIExpression()), !dbg !179
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2), metadata !141, metadata !DIExpression()), !dbg !180
// call void @llvm.dbg.value(metadata i64 0, metadata !143, metadata !DIExpression()), !dbg !180
// store atomic i64 0, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 2) monotonic, align 8, !dbg !90
// ST: Guess
iw(0,0+2*1) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,0+2*1);
cw(0,0+2*1) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,0+2*1)] == 0);
ASSUME(active[cw(0,0+2*1)] == 0);
ASSUME(sforbid(0+2*1,cw(0,0+2*1))== 0);
ASSUME(iw(0,0+2*1) >= 0);
ASSUME(iw(0,0+2*1) >= 0);
ASSUME(cw(0,0+2*1) >= iw(0,0+2*1));
ASSUME(cw(0,0+2*1) >= old_cw);
ASSUME(cw(0,0+2*1) >= cr(0,0+2*1));
ASSUME(cw(0,0+2*1) >= cl[0]);
ASSUME(cw(0,0+2*1) >= cisb[0]);
ASSUME(cw(0,0+2*1) >= cdy[0]);
ASSUME(cw(0,0+2*1) >= cdl[0]);
ASSUME(cw(0,0+2*1) >= cds[0]);
ASSUME(cw(0,0+2*1) >= cctrl[0]);
ASSUME(cw(0,0+2*1) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,0+2*1) = 0;
mem(0+2*1,cw(0,0+2*1)) = 0;
co(0+2*1,cw(0,0+2*1))+=1;
delta(0+2*1,cw(0,0+2*1)) = -1;
ASSUME(creturn[0] >= cw(0,0+2*1));
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1), metadata !144, metadata !DIExpression()), !dbg !182
// call void @llvm.dbg.value(metadata i64 0, metadata !146, metadata !DIExpression()), !dbg !182
// store atomic i64 0, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 1) monotonic, align 8, !dbg !92
// ST: Guess
iw(0,0+1*1) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,0+1*1);
cw(0,0+1*1) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,0+1*1)] == 0);
ASSUME(active[cw(0,0+1*1)] == 0);
ASSUME(sforbid(0+1*1,cw(0,0+1*1))== 0);
ASSUME(iw(0,0+1*1) >= 0);
ASSUME(iw(0,0+1*1) >= 0);
ASSUME(cw(0,0+1*1) >= iw(0,0+1*1));
ASSUME(cw(0,0+1*1) >= old_cw);
ASSUME(cw(0,0+1*1) >= cr(0,0+1*1));
ASSUME(cw(0,0+1*1) >= cl[0]);
ASSUME(cw(0,0+1*1) >= cisb[0]);
ASSUME(cw(0,0+1*1) >= cdy[0]);
ASSUME(cw(0,0+1*1) >= cdl[0]);
ASSUME(cw(0,0+1*1) >= cds[0]);
ASSUME(cw(0,0+1*1) >= cctrl[0]);
ASSUME(cw(0,0+1*1) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,0+1*1) = 0;
mem(0+1*1,cw(0,0+1*1)) = 0;
co(0+1*1,cw(0,0+1*1))+=1;
delta(0+1*1,cw(0,0+1*1)) = -1;
ASSUME(creturn[0] >= cw(0,0+1*1));
// call void @llvm.dbg.value(metadata i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0), metadata !147, metadata !DIExpression()), !dbg !184
// call void @llvm.dbg.value(metadata i64 0, metadata !149, metadata !DIExpression()), !dbg !184
// store atomic i64 0, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @vars, i64 0, i64 0) monotonic, align 8, !dbg !94
// ST: Guess
iw(0,0) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,0);
cw(0,0) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,0)] == 0);
ASSUME(active[cw(0,0)] == 0);
ASSUME(sforbid(0,cw(0,0))== 0);
ASSUME(iw(0,0) >= 0);
ASSUME(iw(0,0) >= 0);
ASSUME(cw(0,0) >= iw(0,0));
ASSUME(cw(0,0) >= old_cw);
ASSUME(cw(0,0) >= cr(0,0));
ASSUME(cw(0,0) >= cl[0]);
ASSUME(cw(0,0) >= cisb[0]);
ASSUME(cw(0,0) >= cdy[0]);
ASSUME(cw(0,0) >= cdl[0]);
ASSUME(cw(0,0) >= cds[0]);
ASSUME(cw(0,0) >= cctrl[0]);
ASSUME(cw(0,0) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,0) = 0;
mem(0,cw(0,0)) = 0;
co(0,cw(0,0))+=1;
delta(0,cw(0,0)) = -1;
ASSUME(creturn[0] >= cw(0,0));
// call void @llvm.dbg.value(metadata i64* @atom_1_X0_1, metadata !150, metadata !DIExpression()), !dbg !186
// call void @llvm.dbg.value(metadata i64 0, metadata !152, metadata !DIExpression()), !dbg !186
// store atomic i64 0, i64* @atom_1_X0_1 monotonic, align 8, !dbg !96
// ST: Guess
iw(0,3) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,3);
cw(0,3) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,3)] == 0);
ASSUME(active[cw(0,3)] == 0);
ASSUME(sforbid(3,cw(0,3))== 0);
ASSUME(iw(0,3) >= 0);
ASSUME(iw(0,3) >= 0);
ASSUME(cw(0,3) >= iw(0,3));
ASSUME(cw(0,3) >= old_cw);
ASSUME(cw(0,3) >= cr(0,3));
ASSUME(cw(0,3) >= cl[0]);
ASSUME(cw(0,3) >= cisb[0]);
ASSUME(cw(0,3) >= cdy[0]);
ASSUME(cw(0,3) >= cdl[0]);
ASSUME(cw(0,3) >= cds[0]);
ASSUME(cw(0,3) >= cctrl[0]);
ASSUME(cw(0,3) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,3) = 0;
mem(3,cw(0,3)) = 0;
co(3,cw(0,3))+=1;
delta(3,cw(0,3)) = -1;
ASSUME(creturn[0] >= cw(0,3));
// call void @llvm.dbg.value(metadata i64* @atom_2_X0_1, metadata !153, metadata !DIExpression()), !dbg !188
// call void @llvm.dbg.value(metadata i64 0, metadata !155, metadata !DIExpression()), !dbg !188
// store atomic i64 0, i64* @atom_2_X0_1 monotonic, align 8, !dbg !98
// ST: Guess
iw(0,4) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,4);
cw(0,4) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,4)] == 0);
ASSUME(active[cw(0,4)] == 0);
ASSUME(sforbid(4,cw(0,4))== 0);
ASSUME(iw(0,4) >= 0);
ASSUME(iw(0,4) >= 0);
ASSUME(cw(0,4) >= iw(0,4));
ASSUME(cw(0,4) >= old_cw);
ASSUME(cw(0,4) >= cr(0,4));
ASSUME(cw(0,4) >= cl[0]);
ASSUME(cw(0,4) >= cisb[0]);
ASSUME(cw(0,4) >= cdy[0]);
ASSUME(cw(0,4) >= cdl[0]);
ASSUME(cw(0,4) >= cds[0]);
ASSUME(cw(0,4) >= cctrl[0]);
ASSUME(cw(0,4) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,4) = 0;
mem(4,cw(0,4)) = 0;
co(4,cw(0,4))+=1;
delta(4,cw(0,4)) = -1;
ASSUME(creturn[0] >= cw(0,4));
// call void @llvm.dbg.value(metadata i64* @atom_2_X2_0, metadata !156, metadata !DIExpression()), !dbg !190
// call void @llvm.dbg.value(metadata i64 0, metadata !158, metadata !DIExpression()), !dbg !190
// store atomic i64 0, i64* @atom_2_X2_0 monotonic, align 8, !dbg !100
// ST: Guess
iw(0,5) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STIW
old_cw = cw(0,5);
cw(0,5) = get_rng(0,NCONTEXT-1);// 0 ASSIGN STCOM
// Check
ASSUME(active[iw(0,5)] == 0);
ASSUME(active[cw(0,5)] == 0);
ASSUME(sforbid(5,cw(0,5))== 0);
ASSUME(iw(0,5) >= 0);
ASSUME(iw(0,5) >= 0);
ASSUME(cw(0,5) >= iw(0,5));
ASSUME(cw(0,5) >= old_cw);
ASSUME(cw(0,5) >= cr(0,5));
ASSUME(cw(0,5) >= cl[0]);
ASSUME(cw(0,5) >= cisb[0]);
ASSUME(cw(0,5) >= cdy[0]);
ASSUME(cw(0,5) >= cdl[0]);
ASSUME(cw(0,5) >= cds[0]);
ASSUME(cw(0,5) >= cctrl[0]);
ASSUME(cw(0,5) >= caddr[0]);
// Update
caddr[0] = max(caddr[0],0);
buff(0,5) = 0;
mem(5,cw(0,5)) = 0;
co(5,cw(0,5))+=1;
delta(5,cw(0,5)) = -1;
ASSUME(creturn[0] >= cw(0,5));
// %call = call i32 @pthread_create(i64* noundef %thr0, %union.pthread_attr_t* noundef null, i8* (i8*)* noundef @t0, i8* noundef null) #7, !dbg !101
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cstart[1] >= cdy[0]);
// %call11 = call i32 @pthread_create(i64* noundef %thr1, %union.pthread_attr_t* noundef null, i8* (i8*)* noundef @t1, i8* noundef null) #7, !dbg !102
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cstart[2] >= cdy[0]);
// %call12 = call i32 @pthread_create(i64* noundef %thr2, %union.pthread_attr_t* noundef null, i8* (i8*)* noundef @t2, i8* noundef null) #7, !dbg !103
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cstart[3] >= cdy[0]);
// %3 = load i64, i64* %thr0, align 8, !dbg !104, !tbaa !105
// LD: Guess
old_cr = cr(0,6);
cr(0,6) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,6)] == 0);
ASSUME(cr(0,6) >= iw(0,6));
ASSUME(cr(0,6) >= 0);
ASSUME(cr(0,6) >= cdy[0]);
ASSUME(cr(0,6) >= cisb[0]);
ASSUME(cr(0,6) >= cdl[0]);
ASSUME(cr(0,6) >= cl[0]);
// Update
creg_r4 = cr(0,6);
crmax(0,6) = max(crmax(0,6),cr(0,6));
caddr[0] = max(caddr[0],0);
if(cr(0,6) < cw(0,6)) {
r4 = buff(0,6);
} else {
if(pw(0,6) != co(6,cr(0,6))) {
ASSUME(cr(0,6) >= old_cr);
}
pw(0,6) = co(6,cr(0,6));
r4 = mem(6,cr(0,6));
}
ASSUME(creturn[0] >= cr(0,6));
// %call13 = call i32 @pthread_join(i64 noundef %3, i8** noundef null), !dbg !109
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cdy[0] >= creturn[1]);
// %4 = load i64, i64* %thr1, align 8, !dbg !110, !tbaa !105
// LD: Guess
old_cr = cr(0,7);
cr(0,7) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,7)] == 0);
ASSUME(cr(0,7) >= iw(0,7));
ASSUME(cr(0,7) >= 0);
ASSUME(cr(0,7) >= cdy[0]);
ASSUME(cr(0,7) >= cisb[0]);
ASSUME(cr(0,7) >= cdl[0]);
ASSUME(cr(0,7) >= cl[0]);
// Update
creg_r5 = cr(0,7);
crmax(0,7) = max(crmax(0,7),cr(0,7));
caddr[0] = max(caddr[0],0);
if(cr(0,7) < cw(0,7)) {
r5 = buff(0,7);
} else {
if(pw(0,7) != co(7,cr(0,7))) {
ASSUME(cr(0,7) >= old_cr);
}
pw(0,7) = co(7,cr(0,7));
r5 = mem(7,cr(0,7));
}
ASSUME(creturn[0] >= cr(0,7));
// %call14 = call i32 @pthread_join(i64 noundef %4, i8** noundef null), !dbg !111
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cdy[0] >= creturn[2]);
// %5 = load i64, i64* %thr2, align 8, !dbg !112, !tbaa !105
// LD: Guess
old_cr = cr(0,8);
cr(0,8) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,8)] == 0);
ASSUME(cr(0,8) >= iw(0,8));
ASSUME(cr(0,8) >= 0);
ASSUME(cr(0,8) >= cdy[0]);
ASSUME(cr(0,8) >= cisb[0]);
ASSUME(cr(0,8) >= cdl[0]);
ASSUME(cr(0,8) >= cl[0]);
// Update
creg_r6 = cr(0,8);
crmax(0,8) = max(crmax(0,8),cr(0,8));
caddr[0] = max(caddr[0],0);
if(cr(0,8) < cw(0,8)) {
r6 = buff(0,8);
} else {
if(pw(0,8) != co(8,cr(0,8))) {
ASSUME(cr(0,8) >= old_cr);
}
pw(0,8) = co(8,cr(0,8));
r6 = mem(8,cr(0,8));
}
ASSUME(creturn[0] >= cr(0,8));
// %call15 = call i32 @pthread_join(i64 noundef %5, i8** noundef null), !dbg !113
// dumbsy: Guess
old_cdy = cdy[0];
cdy[0] = get_rng(0,NCONTEXT-1);
// Check
ASSUME(cdy[0] >= old_cdy);
ASSUME(cdy[0] >= cisb[0]);
ASSUME(cdy[0] >= cdl[0]);
ASSUME(cdy[0] >= cds[0]);
ASSUME(cdy[0] >= cctrl[0]);
ASSUME(cdy[0] >= cw(0,0+0));
ASSUME(cdy[0] >= cw(0,0+1));
ASSUME(cdy[0] >= cw(0,0+2));
ASSUME(cdy[0] >= cw(0,8+0));
ASSUME(cdy[0] >= cw(0,3+0));
ASSUME(cdy[0] >= cw(0,4+0));
ASSUME(cdy[0] >= cw(0,5+0));
ASSUME(cdy[0] >= cw(0,6+0));
ASSUME(cdy[0] >= cw(0,7+0));
ASSUME(cdy[0] >= cr(0,0+0));
ASSUME(cdy[0] >= cr(0,0+1));
ASSUME(cdy[0] >= cr(0,0+2));
ASSUME(cdy[0] >= cr(0,8+0));
ASSUME(cdy[0] >= cr(0,3+0));
ASSUME(cdy[0] >= cr(0,4+0));
ASSUME(cdy[0] >= cr(0,5+0));
ASSUME(cdy[0] >= cr(0,6+0));
ASSUME(cdy[0] >= cr(0,7+0));
ASSUME(creturn[0] >= cdy[0]);
ASSUME(cdy[0] >= creturn[3]);
// call void @llvm.dbg.value(metadata i64* @atom_1_X0_1, metadata !160, metadata !DIExpression()), !dbg !205
// %6 = load atomic i64, i64* @atom_1_X0_1 seq_cst, align 8, !dbg !115
// LD: Guess
old_cr = cr(0,3);
cr(0,3) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,3)] == 0);
ASSUME(cr(0,3) >= iw(0,3));
ASSUME(cr(0,3) >= 0);
ASSUME(cr(0,3) >= cdy[0]);
ASSUME(cr(0,3) >= cisb[0]);
ASSUME(cr(0,3) >= cdl[0]);
ASSUME(cr(0,3) >= cl[0]);
// Update
creg_r7 = cr(0,3);
crmax(0,3) = max(crmax(0,3),cr(0,3));
caddr[0] = max(caddr[0],0);
if(cr(0,3) < cw(0,3)) {
r7 = buff(0,3);
} else {
if(pw(0,3) != co(3,cr(0,3))) {
ASSUME(cr(0,3) >= old_cr);
}
pw(0,3) = co(3,cr(0,3));
r7 = mem(3,cr(0,3));
}
ASSUME(creturn[0] >= cr(0,3));
// call void @llvm.dbg.value(metadata i64 %6, metadata !162, metadata !DIExpression()), !dbg !205
// %conv = trunc i64 %6 to i32, !dbg !116
// call void @llvm.dbg.value(metadata i32 %conv, metadata !159, metadata !DIExpression()), !dbg !173
// call void @llvm.dbg.value(metadata i64* @atom_2_X0_1, metadata !164, metadata !DIExpression()), !dbg !208
// %7 = load atomic i64, i64* @atom_2_X0_1 seq_cst, align 8, !dbg !118
// LD: Guess
old_cr = cr(0,4);
cr(0,4) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,4)] == 0);
ASSUME(cr(0,4) >= iw(0,4));
ASSUME(cr(0,4) >= 0);
ASSUME(cr(0,4) >= cdy[0]);
ASSUME(cr(0,4) >= cisb[0]);
ASSUME(cr(0,4) >= cdl[0]);
ASSUME(cr(0,4) >= cl[0]);
// Update
creg_r8 = cr(0,4);
crmax(0,4) = max(crmax(0,4),cr(0,4));
caddr[0] = max(caddr[0],0);
if(cr(0,4) < cw(0,4)) {
r8 = buff(0,4);
} else {
if(pw(0,4) != co(4,cr(0,4))) {
ASSUME(cr(0,4) >= old_cr);
}
pw(0,4) = co(4,cr(0,4));
r8 = mem(4,cr(0,4));
}
ASSUME(creturn[0] >= cr(0,4));
// call void @llvm.dbg.value(metadata i64 %7, metadata !166, metadata !DIExpression()), !dbg !208
// %conv19 = trunc i64 %7 to i32, !dbg !119
// call void @llvm.dbg.value(metadata i32 %conv19, metadata !163, metadata !DIExpression()), !dbg !173
// call void @llvm.dbg.value(metadata i64* @atom_2_X2_0, metadata !168, metadata !DIExpression()), !dbg !211
// %8 = load atomic i64, i64* @atom_2_X2_0 seq_cst, align 8, !dbg !121
// LD: Guess
old_cr = cr(0,5);
cr(0,5) = get_rng(0,NCONTEXT-1);// 0 ASSIGN LDCOM
// Check
ASSUME(active[cr(0,5)] == 0);
ASSUME(cr(0,5) >= iw(0,5));
ASSUME(cr(0,5) >= 0);
ASSUME(cr(0,5) >= cdy[0]);
ASSUME(cr(0,5) >= cisb[0]);
ASSUME(cr(0,5) >= cdl[0]);
ASSUME(cr(0,5) >= cl[0]);
// Update
creg_r9 = cr(0,5);
crmax(0,5) = max(crmax(0,5),cr(0,5));
caddr[0] = max(caddr[0],0);
if(cr(0,5) < cw(0,5)) {
r9 = buff(0,5);
} else {
if(pw(0,5) != co(5,cr(0,5))) {
ASSUME(cr(0,5) >= old_cr);
}
pw(0,5) = co(5,cr(0,5));
r9 = mem(5,cr(0,5));
}
ASSUME(creturn[0] >= cr(0,5));
// call void @llvm.dbg.value(metadata i64 %8, metadata !170, metadata !DIExpression()), !dbg !211
// %conv23 = trunc i64 %8 to i32, !dbg !122
// call void @llvm.dbg.value(metadata i32 %conv23, metadata !167, metadata !DIExpression()), !dbg !173
// %and = and i32 %conv19, %conv23, !dbg !123
creg_r10 = max(creg_r8,creg_r9);
ASSUME(active[creg_r10] == 0);
r10 = r8 & r9;
// call void @llvm.dbg.value(metadata i32 %and, metadata !171, metadata !DIExpression()), !dbg !173
// %and24 = and i32 %conv, %and, !dbg !124
creg_r11 = max(creg_r7,creg_r10);
ASSUME(active[creg_r11] == 0);
r11 = r7 & r10;
// call void @llvm.dbg.value(metadata i32 %and24, metadata !172, metadata !DIExpression()), !dbg !173
// %cmp = icmp eq i32 %and24, 1, !dbg !125
// br i1 %cmp, label %if.then, label %if.end, !dbg !127
old_cctrl = cctrl[0];
cctrl[0] = get_rng(0,NCONTEXT-1);
ASSUME(cctrl[0] >= old_cctrl);
ASSUME(cctrl[0] >= creg_r11);
ASSUME(cctrl[0] >= 0);
if((r11==1)) {
goto T0BLOCK1;
} else {
goto T0BLOCK2;
}
T0BLOCK1:
// call void @__assert_fail(i8* noundef getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i64 0, i64 0), i8* noundef getelementptr inbounds ([104 x i8], [104 x i8]* @.str.1, i64 0, i64 0), i32 noundef 74, i8* noundef getelementptr inbounds ([23 x i8], [23 x i8]* @__PRETTY_FUNCTION__.main, i64 0, i64 0)) #8, !dbg !128
// unreachable, !dbg !128
r12 = 1;
T0BLOCK2:
// %9 = bitcast i64* %thr2 to i8*, !dbg !131
// call void @llvm.lifetime.end.p0i8(i64 8, i8* %9) #7, !dbg !131
// %10 = bitcast i64* %thr1 to i8*, !dbg !131
// call void @llvm.lifetime.end.p0i8(i64 8, i8* %10) #7, !dbg !131
// %11 = bitcast i64* %thr0 to i8*, !dbg !131
// call void @llvm.lifetime.end.p0i8(i64 8, i8* %11) #7, !dbg !131
// ret i32 0, !dbg !132
ret_thread_0 = 0;
ASSERT(r12== 0);
}
| [
"tuan-phong.ngo@it.uu.se"
] | tuan-phong.ngo@it.uu.se |
7c856708503aec1bfe10c12834901d773b249882 | 10b4ba9e8707576a5b0210da43e6261c7e2f072e | /pat/成绩排名.cpp | 9714b5ae1ac315c0d3b7857aa4cdd173e8c8e73e | [] | no_license | liyingfei142118/Document2018 | 2ec1116cbe61ddfb63144cf06281f0c1b3b1823e | 662c5a6a50f5c7b6251027601c6f0a13307dca58 | refs/heads/master | 2020-07-03T10:31:23.981102 | 2019-08-12T08:08:34 | 2019-08-12T08:08:34 | 201,878,629 | 1 | 0 | null | null | null | null | GB18030 | C++ | false | false | 1,375 | cpp | /*读入n名学生的姓名、学号、成绩,分别输出成绩最高和成绩最低学生的姓名和学号。
输入格式:每个测试输入包含1个测试用例,格式为
第1行:正整数n
第2行:第1个学生的姓名 学号 成绩
第3行:第2个学生的姓名 学号 成绩
... ... ...
第n+1行:第n个学生的姓名 学号 成绩
其中姓名和学号均为不超过10个字符的字符串,成绩为0到100之间的一个整数,
这里保证在一组测试用例中没有两个学生的成绩是相同的。
输出格式:对每个测试用例输出2行,第1行是成绩最高学生的姓名和学号,
第2行是成绩最低学生的姓名和学号,字符串间有1空格。
输入样例:
3
Joe Math990112 89
Mike CS991301 100
Mary EE990830 95
输出样例:
Mike CS991301
Joe Math990112
*/
#include<iostream>
using namespace std;
typedef struct
{
char name[11];
char ID[11];
int grad;
}student;
int main()
{
int n;
cin>>n;
student stu[n+1];
int max=0,m,min=100,n1;
for(int i=1;i<=n;i++)
{
cin>>stu[i].name;
cin>>stu[i].ID;
cin>>stu[i].grad;
if(stu[i].grad>max)
{
max=stu[i].grad;
m=i;
}
if(stu[i].grad<min)
{
min=stu[i].grad;
n1=i;
}
}
cout<<stu[m].name<<" "<<stu[m].ID<<endl;
cout<<stu[n1].name<<" "<<stu[n1].ID<<endl;
}
| [
"liyingfei103@jobmail.vip"
] | liyingfei103@jobmail.vip |
90e3241c86b5ce4a32b1ada77a53bdf247fd2f06 | 22d5d10c1f67efe97b8854760b7934d8e16d269b | /LeetCodeCPP/47. PermutationsII/main.cpp | 7a88a97f926481b3ad11969dd754c48358f0cdd4 | [
"Apache-2.0"
] | permissive | 18600130137/leetcode | 42241ece7fce1536255d427a87897015b26fd16d | fd2dc72c0b85da50269732f0fcf91326c4787d3a | refs/heads/master | 2020-04-24T01:48:03.049019 | 2019-10-17T06:02:57 | 2019-10-17T06:02:57 | 171,612,908 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,591 | cpp | //
// main.cpp
// 47. PermutationsII
//
// Created by admin on 2019/3/18.
// Copyright © 2019年 liu. All rights reserved.
//
#include <iostream>
#include <vector>
using namespace std;
class Solution {
private:
// void helper(vector<vector<int>> &ret,vector<int> &nums,int start){
// if(start>=nums.size()-1){
// ret.push_back(nums);
// return;
// }
// for(int i=start;i<nums.size();i++){
// if(i>start && nums[i]==nums[start]){
// continue;
// }
// swap(nums[start],nums[i]);
// helper(ret,nums,start+1);
// swap(nums[start],nums[i]);
// }
// }
void helper1(vector<vector<int>> &ret,vector<int> nums,int start){
if(start>=nums.size()-1){
ret.push_back(nums);
return;
}
for(int i=start;i<nums.size();i++){
if(i>start && nums[i]==nums[start]){
continue;
}
swap(nums[start],nums[i]);
helper1(ret,nums,start+1);
}
}
public:
vector<vector<int>> permuteUnique(vector<int>& nums) {
sort(nums.begin(),nums.end());
vector<vector<int>> ret;
helper1(ret,nums,0);
return ret;
}
};
int main(int argc, const char * argv[]) {
vector<int> input={1,1,2,2};
Solution so=Solution();
vector<vector<int>> ret=so.permuteUnique(input);
for(vector<int> item:ret){
for(int i:item){
cout<<i<<" ";
}
cout<<endl;
}
return 0;
}
| [
"guodongliu6@crediteses.cn"
] | guodongliu6@crediteses.cn |
0dd0f914eea606945a4af6c214d24c7b137c2754 | 7de2a17a61f83d3aea9d0078d2210d7dfd7960a3 | /src/ImProcGLTexture_ClrChk.h | a27c206ec597f80f46a3c66839beb9968224c504 | [] | no_license | hhcoder/GlCameraRendering | 688ceeed70289a7f8b8bfd1163a2da00cb434883 | 26ad341cfe075a532e629841e73054e03f31c8d7 | refs/heads/master | 2020-09-24T10:48:32.021361 | 2019-12-04T00:18:38 | 2019-12-04T00:18:38 | 225,741,857 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,487 | h | #ifndef _IMPROCGLTEXTURE_CLRCHK_H_
#define _IMPROCGLTEXTURE_CLRCHK_H_
#include "ImProcGLTexture.h"
#include "ImProcUtilColorChecker.h"
namespace ImProc
{
class GLTexture_ColorChecker : public GLTexture
{
public:
GLTexture_ColorChecker(GLuint iw, GLuint ih)
: nFrameWidth(iw),
nFrameHeight(ih),
textureId(0),
pCC(NULL)
{
pCC = new img_rgb(nFrameWidth, nFrameHeight);
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
}
virtual void Update(GLubyte* iframe)
{
GLubyte* pixels = pCC->get_buf();
ActiveTexture0();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, nFrameWidth, nFrameHeight, 0, GL_RGB,
GL_UNSIGNED_BYTE, pixels);
}
~GLTexture_ColorChecker()
{
if(NULL!=pCC)
delete pCC;
if(0!=textureId)
glDeleteTextures(1, &textureId);
}
private:
void ActiveTexture0(void)
{
// Set up the interpolation method
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
// Active this texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureId);
}
private:
GLuint textureId;
GLuint nFrameWidth;
GLuint nFrameHeight;
private:
img_rgb* pCC;
};//end namespace ImProc
};
#endif
| [
"hhwu.coder@outlook.com"
] | hhwu.coder@outlook.com |
7272179785d922b36366316ff38297b3d9f3e6a6 | 6d41a2bdaad0bf0399466417a2270fc73147c153 | /programs in April/0421/校门外的树/校门外的树/校门外的树.cpp | 8c9eb1bb3686600229ac0f3fa5d0f17e66d04d98 | [] | no_license | yuheng95/CPP_Project | b0c7aaf4af24ac158e0ab931f274ccb957e0850d | 5d5630bc21bc6d65e7b0e8dfbf6cf6e17ccfcecf | refs/heads/master | 2021-07-05T19:41:08.813794 | 2017-09-30T18:23:24 | 2017-09-30T18:23:24 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 411 | cpp | #include <iostream>;
using namespace std;
int main() {
int L, M = 0;
int a[10001];
int sum = 0;
cin >> L >> M;
for (int i = 0; i <= L; i++) {
a[i] = 1;
}
for (int p = 1; p <= M; p++) {
int start = 0;
int end = 0;
cin >> start >> end;
for (int j = start; j <= end; j++) {
a[j] = 0;
}
}
for (int i = 0; i <= L; i++) {
if (a[i] == 1) {
sum++;
}
}
cout << sum << endl;
return 0;
}; | [
"aqwmx11@pku.edu.cn"
] | aqwmx11@pku.edu.cn |
6042e76ba3b1e9c11658256d013f9897f6a2b028 | d850e50d2cb97b85c524802201efb624889fe132 | /a095.cpp | af825e6fbf2642d8f257c077ff6fce1cdb1ac8fa | [] | no_license | rosynirvana/ZeroJudge | 347e8bd5b18adb20d8c39efa700056696c720f6c | c48e7daf99da3f8c5928d20b51510c4ddbdea161 | refs/heads/master | 2021-01-21T22:29:10.608134 | 2013-09-04T12:38:12 | 2013-09-04T12:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 176 | cpp | #include <iostream>
int main()
{
int N, M;
while(std::cin >> N){
std::cin >> M;
if(M != N)
std::cout << M+1 << "\n";
else
std::cout << M << "\n";
}
return 0;
} | [
"kongchuijin@gmail.com"
] | kongchuijin@gmail.com |
293b37023bbcdf27b6ae924ee5e55fc45b00fc1f | 5ede0e0fd1668416fc74fc5a3c997547b7708abc | /src/map_patterns.cpp | d93d1e1499a4c6bc0c71e3f9ead2eefb4c0c565b | [] | no_license | ethanfine/ia | d1f8671177cacb4bc351d5257e65c8cc05e6732d | 134ff030939fc3286545d7f58cc20cbfbc5547fa | refs/heads/develop | 2020-04-06T05:09:13.466507 | 2015-12-08T19:19:16 | 2015-12-08T19:19:16 | 47,472,684 | 0 | 0 | null | 2015-12-05T21:05:43 | 2015-12-05T21:05:42 | null | UTF-8 | C++ | false | false | 3,486 | cpp | #include "map_patterns.hpp"
#include "init.hpp"
#include <vector>
#include "map.hpp"
#include "feature_rigid.hpp"
#include "game_time.hpp"
namespace map_patterns
{
void cells_in_room(const Room& room, std::vector<P>& adj_to_walls,
std::vector<P>& away_from_walls)
{
TRACE_FUNC_BEGIN_VERBOSE;
std::vector<P> pos_bucket;
pos_bucket.clear();
const Rect& r = room.r_;
for (int x = r.p0.x; x <= r.p1.x; ++x)
{
for (int y = r.p0.y; y <= r.p1.y; ++y)
{
if (map::room_map[x][y] == &room)
{
auto* const f = map::cells[x][y].rigid;
if (f->can_move_cmn() && f->can_have_rigid())
{
pos_bucket.push_back(P(x, y));
}
}
}
}
adj_to_walls.clear();
away_from_walls.clear();
for (P& pos : pos_bucket)
{
const int NR_BLK_R = walk_blockers_in_dir(Dir::right, pos);
const int NR_BLK_D = walk_blockers_in_dir(Dir::down, pos);
const int NR_BLK_L = walk_blockers_in_dir(Dir::left, pos);
const int NR_BLK_U = walk_blockers_in_dir(Dir::up, pos);
const bool IS_ZERO_BLK_ALL_DIR =
NR_BLK_R == 0 && NR_BLK_D == 0 && NR_BLK_L == 0 && NR_BLK_U == 0;
if (IS_ZERO_BLK_ALL_DIR)
{
away_from_walls.push_back(pos);
continue;
}
bool is_door_adjacent = false;
for (int dx = -1; dx <= 1; ++dx)
{
for (int dy = -1; dy <= 1; ++dy)
{
const auto* const f = map::cells[pos.x + dx][pos.y + dy].rigid;
if (f->id() == Feature_id::door) {is_door_adjacent = true;}
}
}
if (is_door_adjacent) {continue;}
if (
(NR_BLK_R == 3 && NR_BLK_U == 1 && NR_BLK_D == 1 && NR_BLK_L == 0) ||
(NR_BLK_R == 1 && NR_BLK_U == 3 && NR_BLK_D == 0 && NR_BLK_L == 1) ||
(NR_BLK_R == 1 && NR_BLK_U == 0 && NR_BLK_D == 3 && NR_BLK_L == 1) ||
(NR_BLK_R == 0 && NR_BLK_U == 1 && NR_BLK_D == 1 && NR_BLK_L == 3))
{
adj_to_walls.push_back(pos);
continue;
}
}
TRACE_FUNC_END_VERBOSE;
}
int walk_blockers_in_dir(const Dir dir, const P& pos)
{
int nr_blockers = 0;
switch (dir)
{
case Dir::right:
{
for (int dy = -1; dy <= 1; ++dy)
{
const auto* const f = map::cells[pos.x + 1][pos.y + dy].rigid;
if (!f->can_move_cmn()) {nr_blockers += 1;}
}
} break;
case Dir::down:
{
for (int dx = -1; dx <= 1; ++dx)
{
const auto* const f = map::cells[pos.x + dx][pos.y + 1].rigid;
if (!f->can_move_cmn()) {nr_blockers += 1;}
}
} break;
case Dir::left:
{
for (int dy = -1; dy <= 1; ++dy)
{
const auto* const f = map::cells[pos.x - 1][pos.y + dy].rigid;
if (!f->can_move_cmn()) {nr_blockers += 1;}
}
} break;
case Dir::up:
{
for (int dx = -1; dx <= 1; ++dx)
{
const auto* const f = map::cells[pos.x + dx][pos.y - 1].rigid;
if (!f->can_move_cmn()) {nr_blockers += 1;}
}
} break;
case Dir::down_left:
case Dir::down_right:
case Dir::up_left:
case Dir::up_right:
case Dir::center:
case Dir::END:
break;
}
return nr_blockers;
}
} //map_patterns
| [
"m.tornq@gmail.com"
] | m.tornq@gmail.com |
ec09b5e90c55664f190a29f17fa93e67396b7b1d | 3850eac3882e8753be5f8d2d33bbc45f261141ab | /linked_list/linked_list_matrix.cpp | b41f5709f6e51cce36f12c00033a26695274f5d9 | [] | no_license | ankitkumarsamota121/geeksforgeeks_practice | 0f2ab48bc76dceadc465ad8bf588a70db053f83c | 27cbca1d44e2e6105ae8729ab9f4ea1b4a97d13c | refs/heads/master | 2023-02-07T14:58:37.107833 | 2021-01-03T11:55:47 | 2021-01-03T11:55:47 | 255,017,249 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,257 | cpp | // { Driver Code Starts
#include <bits/stdc++.h>
#define MAX 20
using namespace std;
struct Node
{
int data;
Node *right, *down;
Node(int x)
{
data = x;
right = NULL;
down = NULL;
}
};
void display(Node *head)
{
Node *Rp;
Node *Dp = head;
while (Dp)
{
Rp = Dp;
while (Rp)
{
cout << Rp->data << " ";
Rp = Rp->right;
}
Dp = Dp->down;
}
}
Node *constructLinkedMatrix(int mat[MAX][MAX], int n);
// driver program
int main()
{
int t;
cin >> t;
while (t--)
{
int n;
cin >> n;
int mat[MAX][MAX];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
cin >> mat[i][j];
Node *head = constructLinkedMatrix(mat, n);
if (!head)
cout << "-1";
else
display(head);
cout << "\n";
}
return 0;
}
// } Driver Code Ends
/*structure of the node of the linked list is as
struct Node
{
int data;
Node* right, *down;
Node(int x){
data = x;
right = NULL;
down = NULL;
}
};
*/
// n is the size of the matrix
// function must return the pointer to the first element
// of the in linked list i.e. that should be the element at arr[0][0]
Node *constructLinkedMatrix(int mat[MAX][MAX], int n)
{
// code here
Node *head = NULL;
Node *p = NULL;
Node *q = NULL;
Node *start = NULL;
Node *t = NULL;
for (int row = 0; row < n; ++row){
q = start;
for (int col = 0; col < n; ++col){
t = new Node(mat[row][col]);
if (head == NULL){
head = t;
p = head;
start = head;
}
else if (col == 0){
p = t;
if (q){
q->down = t;
q = q->right;
}
}
else {
p->right = t;
p = p->right;
if (q){
q->down = t;
q = q->right;
}
}
}
p = NULL;
q = start;
if (start->down) start = start->down;
}
return head;
}
| [
"ankitkumarsamota121@gmail.com"
] | ankitkumarsamota121@gmail.com |
01106bff80159e1d5a09fd11d3b2fe330c760509 | da8fc6a09a690cf111a1d88e9f5a2d749b81af27 | /StoneSword/src/StoneSword/Events/ApplicationEvent.h | 98d5beeb504d263ef56d2ba8f9a57eb42c4a689c | [
"Apache-2.0"
] | permissive | qqqcode/StoneSword | 0864b5b0016538805581e37e553edc21192ca1f8 | 0d6217b934508196327d3bd001b42f20d8ea9c7f | refs/heads/master | 2023-03-01T18:04:05.241472 | 2021-02-01T16:53:26 | 2021-02-01T16:53:26 | 325,945,127 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,340 | h | #pragma once
#include "Event.h"
namespace StoneSword {
class STONESWORD_API WindowResizeEvent : public Event
{
public:
WindowResizeEvent(unsigned int width, unsigned int height)
: m_Width(width), m_Height(height) {}
unsigned int GetWidth() const { return m_Width; }
unsigned int GetHeight() const { return m_Height; }
std::string ToString() const override
{
std::stringstream ss;
ss << "WindowResizeEvent: " << m_Width << ", " << m_Height;
return ss.str();
}
EVENT_CLASS_TYPE(WindowResize)
EVENT_CLASS_CATEGORY(EventCategoryApplication)
private:
unsigned int m_Width, m_Height;
};
class STONESWORD_API WindowCloseEvent : public Event
{
public:
WindowCloseEvent() = default;
EVENT_CLASS_TYPE(WindowClose)
EVENT_CLASS_CATEGORY(EventCategoryApplication)
};
class STONESWORD_API AppTickEvent : public Event
{
public:
AppTickEvent() = default;
EVENT_CLASS_TYPE(AppTick)
EVENT_CLASS_CATEGORY(EventCategoryApplication)
};
class STONESWORD_API AppUpdateEvent : public Event
{
public:
AppUpdateEvent() = default;
EVENT_CLASS_TYPE(AppUpdate)
EVENT_CLASS_CATEGORY(EventCategoryApplication)
};
class STONESWORD_API AppRenderEvent : public Event
{
public:
AppRenderEvent() = default;
EVENT_CLASS_TYPE(AppRender)
EVENT_CLASS_CATEGORY(EventCategoryApplication)
};
} | [
"1278323354@qq.com"
] | 1278323354@qq.com |
e08c9eb25af2e62a661b50b1bc8e79264e1d765e | 10ef710dda5acd5206be8ca0f0d6bf7f1c8075b7 | /Segundo cuatrimestre/8A - Duplicar una lista y mas/listaDuplica.h | d060b7de9eba30ef7cb3063fe10baaf2a2aa9a1e | [] | no_license | imartin28/EDA | 0aee47a11c148a7e7a2742e3498c317229f45d49 | f989f0e96ced60dfb3514b60e33767519481ab3b | refs/heads/master | 2020-08-02T06:41:38.426057 | 2019-09-27T08:01:59 | 2019-09-27T08:01:59 | 211,266,446 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,680 | h | //
// lista_duplica.h
// EDA_2
//
// Created by Irene Martin berlanga on 15/02/2019.
// Copyright © 2019 Irene Martin berlanga. All rights reserved.
//
#ifndef lista_duplica_h
#define lista_duplica_h
#include <iostream>
#include <iomanip>
#include <fstream>
#include "queue_eda.h"
template <class T>
class listaDuplica : public queue<T> {
using Nodo = typename queue<T>::Nodo; // para poder usar Nodo aquí
public:
void print(std::ostream & o = std::cout) const {
if(!this->empty())
{
Nodo *actual = this->prim;
while(actual->sig != nullptr)
{
o << actual->elem;
o << " ";
actual = actual->sig;
}
o << actual->elem;
}
}
// Duplicar los nodos de una lista enlazada simple
void duplica() {
if(!this->empty()){
Nodo *nodo_actual = this->prim;
Nodo *nodo_nuevo = nullptr;
while(nodo_actual != nullptr){
nodo_nuevo = new Nodo(nodo_actual->elem, nodo_actual->sig);
nodo_actual->sig = nodo_nuevo;
// this->prim = nodo_nuevo;
nodo_actual = nodo_nuevo->sig;
// nodo_nuevo->sig = nodo_actual->sig;
++this->nelems;
}
this->ult = nodo_nuevo;
}
}
};
template <class T>
inline std::ostream & operator<<(std::ostream & out, listaDuplica<T> const& lista)
{
lista.print(out);
return out;
}
#endif /* lista_duplica_h */
| [
"imart02@ucm.es"
] | imart02@ucm.es |
6c7ebc66296005eab8c9d6352eb790f8ee5ad069 | 4434644992afc0ced6a255d1701464fb82e19f91 | /w_autogyro/lcd_manager.cpp | f833726a09f1d01216717004803ee45950ba7691 | [] | no_license | albaniac/Sergey_gyro_mega128_mpu6050_lcd | 307d1cd205d71e5d7a8f95f91e4e67e52ced19e4 | 5284bc287306d2ec693ab4744d7b43ca79a824bb | refs/heads/master | 2020-04-10T11:26:34.632573 | 2017-12-29T13:47:40 | 2017-12-29T13:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,897 | cpp | #include "lcd_manager.h"
#include <stdio.h>
#include <string.h>
//=======================================================================================
LCDManager::LCDManager(){
};
//=======================================================================================
void LCDManager::SetXY (double x, double y){
sprintf(&str_[0][5], "X:%4d", (int)x);
sprintf(&str_[1][5], "Y:%4d", (int)y);
};
//=======================================================================================
void LCDManager::SetLeftFirstWeelVal (double val){
sprintf(&str_[0][0], "%3d", (int)val);
str_[0][3] = '%';
};
//=======================================================================================
void LCDManager::SetLeftLastWeelVal (double val){
sprintf(&str_[1][0], "%3d", (int)val);
str_[1][3] = '%';
};
//=======================================================================================
void LCDManager::SetRightFirstWeelVal (double val){
sprintf(&str_[0][12], "%3d", (int)val);
str_[0][15] = '%';
};
//=======================================================================================
void LCDManager::setRightLastWeelVal (double val){
sprintf(&str_[1][12], "%3d", (int)val);
str_[1][15] = '%';
};
//=======================================================================================
void LCDManager::SetModeName (const char* mode_name){
is_need_refresh_[2] = true;
sprintf(&str_[2][0], "%s*", mode_name);
};
//=======================================================================================
void LCDManager::ClearModeName (){
memcpy(&str_[2][0], clear_str_, LCD_STR_MAX_LEN);
};
//=======================================================================================
void LCDManager::SetMenuName (const char* menu_name){
is_need_refresh_[3] = true;
sprintf(&str_[3][0], "%s*", menu_name);
};
//=======================================================================================
void LCDManager::ClearMenuName (){
memcpy(&str_[3][0], clear_str_, LCD_STR_MAX_LEN);
};
//=======================================================================================
void LCDManager::SetErrorText (const char* err_text){
is_need_refresh_[3] = true;
sprintf(&str_[3][0], "%s*", err_text);
};
//=======================================================================================
void LCDManager::ClearErrorText (){
memcpy(&str_[3][0], clear_str_, LCD_STR_MAX_LEN);
}
//=======================================================================================
void LCDManager::Refresh(){
//LCD_Clear_lcd();
//LCD_Write_String(0, 0, clear_str_);
//LCD_Write_String(1, 0, clear_str_);
//LCD_Write_String(2, 0, clear_str_);
//LCD_Write_String(3, 0, clear_str_);
LCDWriteString(0, 0, &str_[0][0]);
LCDWriteString(1, 0, &str_[1][0]);
if (is_need_refresh_[2]){
LCDWriteString(2, 0, clear_str_);
LCDWriteString(2, 0, &str_[2][0]);
is_need_refresh_[2] = false;
}
if (is_need_refresh_[3]){
LCDWriteString(3, 0, clear_str_);
LCDWriteString(3, 0, &str_[3][0]);
is_need_refresh_[3] = false;
}
};
//=======================================================================================
void LCDManager::Init(){
memcpy(clear_str_ , " *", LCD_STR_MAX_LEN);
memcpy(&str_[0][0], "111% X:-179 222%*", LCD_STR_MAX_LEN);
memcpy(&str_[1][0], "333% Y:-123 444%*", LCD_STR_MAX_LEN);
memcpy(&str_[2][0], " *", LCD_STR_MAX_LEN);
memcpy(&str_[3][0], " *", LCD_STR_MAX_LEN);
for (unsigned char i = 0; i < LCD_STR_COUNT; i++){
is_need_refresh_[i] = false;
}
LCDInit();
LCDLigth(LCD_LIGHT_ON);
};
//=======================================================================================
static LCDManager g_lcd_manager;
LCDManager * g_lcd_manager_p = &g_lcd_manager;
//=======================================================================================
| [
"blobby@radico.ru"
] | blobby@radico.ru |
795674d1fbc85a65e3222945bf988ba5f7a7e09b | 66330f7a1ff0b8447b4245474ab4de48727fd1c5 | /libs/multiprecision/plots/cpp_bin_float_tgamma_errors.cpp | 5e3dd11051ab81908028e9c1a12059142a4a9191 | [
"MIT"
] | permissive | everscalecodes/knapsack-snark | fd3cc6155125ae6ff0fc56aa979f84ba6a8c49c7 | 633515a13906407338a81b9874d964869ddec624 | refs/heads/main | 2023-07-18T06:05:22.319230 | 2021-08-31T16:10:16 | 2021-08-31T16:10:16 | 447,180,824 | 0 | 1 | MIT | 2022-01-12T10:53:21 | 2022-01-12T10:53:20 | null | UTF-8 | C++ | false | false | 2,044 | cpp |
// (C) Copyright Nick Thompson 2020.
// (C) Copyright John Maddock 2020.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <iostream>
#include <boost/math/tools/ulps_plot.hpp>
#include <boost/core/demangle.hpp>
#include <nil/crypto3/multiprecision/mpfr.hpp>
#include <nil/crypto3/multiprecision/cpp_bin_float.hpp>
using boost::math::tools::ulps_plot;
int main() {
using PreciseReal = nil::crypto3::multiprecision::mpfr_float_100;
using CoarseReal = nil::crypto3::multiprecision::cpp_bin_float_50;
typedef boost::math::policies::policy<
boost::math::policies::promote_float<false>,
boost::math::policies::promote_double<false> >
no_promote_policy;
auto ai_coarse = [](CoarseReal const& x)->CoarseReal {
return tgamma(x);
};
auto ai_precise = [](PreciseReal const& x)->PreciseReal {
return tgamma(x);
};
std::string filename = "cpp_bin_float_tgamma.svg";
int samples = 100000;
// How many pixels wide do you want your .svg?
int width = 700;
// Near a root, we have unbounded relative error. So for functions with roots, we define an ULP clip:
PreciseReal clip = 400;
// Should we perturb the abscissas?
bool perturb_abscissas = false;
auto plot = ulps_plot<decltype(ai_precise), PreciseReal, CoarseReal>(ai_precise, CoarseReal(-20), CoarseReal(200), samples, perturb_abscissas);
// Note the argument chaining:
plot.clip(clip).width(width);
plot.background_color("white").font_color("black");
// Sometimes it's useful to set a title, but in many cases it's more useful to just use a caption.
//std::string title = "Airy Ai ULP plot at " + boost::core::demangle(typeid(CoarseReal).name()) + " precision";
//plot.title(title);
plot.vertical_lines(6);
plot.add_fn(ai_coarse);
// You can write the plot to a stream:
//std::cout << plot;
// Or to a file:
plot.write(filename);
}
| [
"curryrasul@gmail.com"
] | curryrasul@gmail.com |
f3304244e588c20c1a41348dec3ea815903f8c47 | b4925f354c0236406d07cdab4e47667f12f58067 | /distribuidos/VisualizadordeInterfaces/SocketDatagrama.cpp | 75fe0e82a65cba0b237977171ec5968562791e57 | [
"MIT"
] | permissive | MauricioCerv10/schoolhistory | 93be0e0adc57a79e5feea4665cac92d925edd9d8 | 7e5ef296909cc1e6daa54846e595b299e4be4e6e | refs/heads/master | 2023-07-01T16:14:25.918824 | 2021-07-30T03:12:15 | 2021-07-30T03:12:15 | 390,902,375 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,526 | cpp | #include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <strings.h>
#include <stdio.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <string.h>
#include "SocketDatagrama.h"
#include "PaqueteDatagrama.h"
using namespace std;
SocketDatagrama::SocketDatagrama(int a){
s=socket(AF_INET, SOCK_DGRAM, a);
bzero((char *)&direccionLocal, sizeof(direccionLocal));
direccionLocal.sin_family = AF_INET;
direccionLocal.sin_addr.s_addr = INADDR_ANY;
direccionLocal.sin_port = htons(7200);
bind(s, (struct sockaddr *)&direccionLocal, sizeof(direccionLocal));
bzero((char *)&direccionForanea, sizeof(direccionForanea));
direccionForanea.sin_family = AF_INET;
}
SocketDatagrama::~SocketDatagrama(){
close(s);
}
int SocketDatagrama::recibe(PaqueteDatagrama &p){
char dat[p.obtieneLongitud()];
unsigned int clileng = sizeof(direccionForanea);
recvfrom(s, dat, p.obtieneLongitud()*sizeof(char), 0, (struct sockaddr *) &direccionForanea, &clileng);
p.inicializaDatos(dat);
char str[16];
inet_ntop(AF_INET, &direccionForanea.sin_addr.s_addr, str, 16);
p.inicializaIp(str);
p.inicializaPuerto(direccionForanea.sin_port);
return 0;
}
int SocketDatagrama::envia(PaqueteDatagrama &p){
inet_pton(AF_INET, p.obtieneDireccion(), &direccionForanea.sin_addr);
direccionForanea.sin_port = htons(p.obtienePuerto());
sendto(s, p.obtieneDatos(), p.obtieneLongitud() * sizeof(char), 0, (struct sockaddr *) &direccionForanea, sizeof(direccionForanea));
return 0;
} | [
"mauriciocervantesdelgadillo10@gmail.com"
] | mauriciocervantesdelgadillo10@gmail.com |
d28903d2fd712f64bec3d7a33765e3799a3513bf | d324dafd7b383d1fccac2e6f954d2c35264d84d8 | /multigpu_graphics_attila/src/trace/ACD/Implementation/ACDTexture3DImp.h | f21ec86142a602dfd6e55409b70c45e13349d39c | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"MIT"
] | permissive | flair2005/Scalable-Multi-GPU-Rendering | 847efbaddd7c091c7bea20ebec1f22fcd5d80022 | 1fe0fa74cee5891424db73654551335a7fd5380c | refs/heads/main | 2023-02-06T07:57:02.429875 | 2020-12-29T01:06:10 | 2020-12-29T01:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 4,426 | h | /**************************************************************************
*
* Copyright (c) 2002 - 2011 by Computer Architecture Department,
* Universitat Politecnica de Catalunya.
* All rights reserved.
*
* The contents of this file may not be disclosed to third parties,
* copied or duplicated in any form, in whole or in part, without the
* prior permission of the authors, Computer Architecture Department
* and Universitat Politecnica de Catalunya.
*
*/
#ifndef ACD_TEXTURE3D_IMP
#define ACD_TEXTURE3D_IMP
#include "ACDTexture3D.h"
#include "TextureMipmapChain.h"
#include "TextureMipmap.h"
#include "MemoryObject.h"
#include <set>
namespace acdlib
{
class ACDTexture3DImp : public ACDTexture3D, public MemoryObject
{
public:
ACDTexture3DImp();
/// Methods inherited from ACDResource interface
virtual void setUsage(ACD_USAGE usage);
virtual ACD_USAGE getUsage() const;
virtual void setMemoryLayout(ACD_MEMORY_LAYOUT layout);
virtual ACD_MEMORY_LAYOUT getMemoryLayout() const;
virtual ACD_RESOURCE_TYPE getType() const;
virtual void setPriority(acd_uint prio);
virtual acd_uint getPriority() const;
virtual acd_uint getSubresources() const;
virtual acd_bool wellDefined() const;
/// Methods inherited form ACDTexture interface
virtual acd_uint getBaseLevel() const;
virtual acd_uint getMaxLevel() const;
virtual void setBaseLevel(acd_uint minMipLevel);
virtual void setMaxLevel(acd_uint maxMipLevel);
virtual acd_uint getSettedMipmaps();
/// Methods inherited from ACDTexture2D interface
virtual acd_uint getWidth(acd_uint mipmap) const;
virtual acd_uint getHeight(acd_uint mipmap) const;
virtual acd_uint getDepth(acd_uint mipLevel) const;
virtual ACD_FORMAT getFormat(acd_uint mipmap) const;
virtual acd_bool isMultisampled(acd_uint mipmap) const;
virtual acd_uint getSamples(acd_uint mipmap) const;
virtual acd_uint getTexelSize(acd_uint mipmap) const;
virtual void setData( acd_uint mipLevel,
acd_uint width,
acd_uint height,
acd_uint depth,
ACD_FORMAT format,
acd_uint rowPitch,
const acd_ubyte* srcTexelData,
acd_uint texSize,
acd_bool preloadData = false);
virtual void setData(acd_uint mipLevel,
acd_uint width,
acd_uint height,
acd_uint depth,
acd_bool multisampling,
acd_uint samples,
ACD_FORMAT format);
virtual void updateData( acd_uint mipLevel,
acd_uint x,
acd_uint y,
acd_uint z,
acd_uint width,
acd_uint height,
acd_uint depth,
ACD_FORMAT format,
acd_uint rowPitch,
const acd_ubyte* srcTexelData,
acd_bool preloadData = false);
virtual acd_bool map( acd_uint mipLevel,
ACD_MAP mapType,
acd_ubyte*& pData,
acd_uint& dataRowPitch,
acd_uint& dataPlanePitch);
virtual acd_bool unmap(acd_uint mipLevel, acd_bool preloadData = false);
/// Method required by MemoryObject derived classes
virtual const acd_ubyte* memoryData(acd_uint region, acd_uint& memorySizeInBytes) const;
virtual const acd_char* stringType() const;
void dumpMipmap(acd_uint region, acd_ubyte* mipName);
const acd_ubyte* getData(acd_uint mipLevel, acd_uint& memorySizeInBytes, acd_uint& rowPitch, acd_uint& planePitch) const;
virtual void setSamplerID(acd_int id) { samplerID = id; }
virtual acd_int getSamplerID() const { return samplerID; }
private:
const TextureMipmap* _getMipmap(acd_uint mipLevel, const acd_char* methodStr) const;
acd_uint _baseLevel;
acd_uint _maxLevel;
TextureMipmapChain _mips;
std::set<acd_uint> _mappedMips;
ACD_MEMORY_LAYOUT layout;
acd_int samplerID;
}; // class ACDTexture3DImp
}
#endif // ACD_TEXTURE3D_IMP
| [
"renxiaowei66@gmail.com"
] | renxiaowei66@gmail.com |
394a805fa897aa08cd8375de5bebd9aad769eb0b | 3c1f699c1da70d1b5f3075d74887129acbedb949 | /include/BigUint.hpp | b6145521de07624d7fb4af8df074ddbe572f01e2 | [
"MIT"
] | permissive | peterzuger/BigInt | 8acebad47bbbb19a608663398563cf4ce273f17e | 2c106cbb24db5728f34d6e7748f423e9d4301b65 | refs/heads/master | 2021-05-18T01:56:55.779994 | 2020-07-19T08:42:15 | 2020-07-19T08:42:15 | 251,055,820 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 6,727 | hpp | /**
* @file BigInt/include/BigUint.hpp
* @author Peter Züger
* @date 29.03.2020
* @brief Library for representing big integers
*
* The MIT License (MIT)
*
* Copyright (c) 2020 Philippe Peter
* Copyright (c) 2020 Peter Züger
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef BIGINT_BIGUINT_HPP
#define BIGINT_BIGUINT_HPP
#include <array>
#include <climits>
#include <istream>
#include <limits>
#include <ostream>
namespace Big{
template<std::size_t N>
class BigUint{
static_assert(!(N % (sizeof(unsigned int) * CHAR_BIT)),
"Big::BigUint: N must be a multiple of 'sizeof(unsigned int) * CHAR_BIT'");
std::array<unsigned int, N / (sizeof(unsigned int) * CHAR_BIT)> data;
public:
constexpr BigUint() = default;
constexpr BigUint(const BigUint& other)noexcept;
constexpr BigUint(BigUint&& other)noexcept;
constexpr BigUint(unsigned long long other)noexcept;
constexpr BigUint(float other)noexcept;
constexpr BigUint(double other)noexcept;
constexpr BigUint(long double other)noexcept;
template<std::size_t M>
constexpr BigUint(const BigUint<M>& other)noexcept;
BigUint& operator=(const BigUint& other)noexcept;
BigUint& operator=(BigUint&& other)noexcept;
BigUint& operator=(unsigned long long other)noexcept;
BigUint& operator=(float other)noexcept;
BigUint& operator=(double other)noexcept;
BigUint& operator=(long double other)noexcept;
void swap(BigUint& other)noexcept;
constexpr BigUint& operator~()noexcept;
constexpr BigUint& operator+=(const BigUint& rhs)noexcept;
constexpr BigUint& operator-=(const BigUint& rhs)noexcept;
constexpr BigUint& operator*=(const BigUint& rhs)noexcept;
constexpr BigUint& operator/=(const BigUint& rhs)noexcept;
constexpr BigUint& operator%=(const BigUint& rhs)noexcept;
constexpr BigUint& operator^=(const BigUint& rhs)noexcept;
constexpr BigUint& operator&=(const BigUint& rhs)noexcept;
constexpr BigUint& operator|=(const BigUint& rhs)noexcept;
template <class IntType>constexpr BigUint& operator<<=(IntType shift)noexcept;
template <class IntType>constexpr BigUint& operator>>=(IntType shift)noexcept;
constexpr BigUint& operator++()noexcept;
constexpr BigUint operator++(int)noexcept;
constexpr BigUint& operator--()noexcept;
constexpr BigUint operator--(int)noexcept;
template<std::size_t M>
friend std::ostream& operator<<(std::ostream& os, const BigUint<M>& obj);
template<std::size_t M>
friend std::istream& operator>>(std::istream& is, BigUint<M>& obj);
};
template<std::size_t N>
std::ostream& operator<<(std::ostream& os, const BigUint<N>& obj){
// write obj to stream
return os;
}
template<std::size_t N>
std::istream& operator>>(std::istream& is, BigUint<N>& obj){
// read obj from stream
if( /* T could not be constructed */ true )
is.setstate(std::ios::failbit);
return is;
}
template<std::size_t N>
void swap(BigUint<N>& x, BigUint<N>& y)noexcept{
x.swap(y);
}
template<std::size_t N>
constexpr BigUint<N> operator+(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs += rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator-(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs -= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator*(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs *= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator/(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs /= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator%(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs %= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator^(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs ^= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator&(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs &= rhs;
}
template<std::size_t N>
constexpr BigUint<N> operator|(BigUint<N> lhs, const BigUint<N>& rhs)noexcept{
return lhs |= rhs;
}
template <std::size_t N, class IntType>
constexpr BigUint<N> operator<<(const BigUint<N>& lhs, IntType shift)noexcept{
return lhs <<= shift;
}
template <std::size_t N, class IntType>
constexpr BigUint<N> operator>>(const BigUint<N>& lhs, IntType shift)noexcept{
return lhs >>= shift;
}
template<std::size_t N>
constexpr bool operator< (const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return false; // TODO
}
template<std::size_t N>
constexpr bool operator> (const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return rhs < lhs;
}
template<std::size_t N>
constexpr bool operator<=(const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return !(lhs > rhs);
}
template<std::size_t N>
constexpr bool operator>=(const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return !(lhs < rhs);
}
template<std::size_t N>
constexpr bool operator==(const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return false;
}
template<std::size_t N>
constexpr bool operator!=(const BigUint<N>& lhs, const BigUint<N>& rhs) noexcept {
return !(lhs == rhs);
}
};
#endif /* BIGINT_BIGUINT_HPP */
| [
"zueger.peter@icloud.com"
] | zueger.peter@icloud.com |
025ddbee6d4849149e13c1bbb8d7c6eed8b97873 | b0bdd09dbbaa05bcfb1c02263325188c4ba9c588 | /src/SLR1.cpp | 924b708c221a2aed4dfa51852c9bc8587d140a23 | [] | no_license | Nightbot1448/SLR1 | b26068ed3a49b13b8282dbb91cb8ebd605417686 | 20fa5523d192a19cdb45aef16620b6b41f34a5ce | refs/heads/master | 2021-07-19T06:11:31.784183 | 2021-01-23T12:40:30 | 2021-01-23T12:40:30 | 235,463,065 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,113 | cpp | #include "SLR_parser.h"
#include "SLR_base.h"
#include <iostream>
#include <fstream>
#ifdef __linux__
#include <getopt.h>
#endif
int main(int argc, char **argv)
{
bool print_parsing_table = false;
bool print_tree = false;
std::string input_string("-n+(n*n--n/n)+n");
std::string input_file;
const char* short_options = "pti:f:";
int res=0;
int option_index;
#ifdef __linux__
const struct option long_options[] = {
{"file",required_argument,NULL,'f'},
{"input",required_argument,NULL,'i'},
{"print_tree",no_argument,NULL,'t'},
{"print_table",no_argument,NULL,'p'},
{NULL,0,NULL,0}
};
while ((res=getopt_long(argc,argv,short_options,
long_options,&option_index))!=-1){
switch(res){
case 'p': {
print_parsing_table = true;
break;
}
case 't': {
print_tree = true;
break;
}
case 'i': {
input_string = optarg;
break;
}
case 'f': {
input_file = optarg;
break;
}
case '?': default: {
std::cout << "unknown option" << std::endl;
break;
}
}
}
#endif
Grammar grammar;
grammar.emplace('S', "E");
grammar.emplace('E', "T");
grammar.emplace('E', "E+T");
grammar.emplace('E', "E-T");
grammar.emplace('T', "F");
grammar.emplace('T', "T*F");
grammar.emplace('T', "T/F");
grammar.emplace('F', "(E)");
grammar.emplace('F', "-F");
grammar.emplace('F', "n");
SLR1_parser parser;
try{
parser = SLR1_parser(grammar, print_parsing_table);
}
catch(std::logic_error &e){
std::cout << e.what() << std::endl;
}
if (input_file.empty()) {
std::cout << "Input string: " << input_string << std::endl
<< "Result : "<< (parser.parse(input_string, print_tree) ? "accepted" : "reject") << std::endl;
}
else {
std::ifstream in(input_file, std::ios::in);
if (in.is_open()) {
std::string str;
while (in >> str) {
std::cout << "Input string: " << str << std::endl
<< "Result : " << (parser.parse(str, print_tree) ? "accepted" : "reject")
<< std::endl << "---" << std::endl;
}
}
else{
std::cout << "file wasn't open; exiting" << std::endl;
return 0;
}
}
return 0;
}
| [
"night1337bot@gmail.com"
] | night1337bot@gmail.com |
7c0ba1339830b09e5890de8a9ea60307d9c1aa35 | 02c506346de40061bc7bf3cc4873bbb19581606c | /client/examonline/scorepaper.h | c379bac39a81426d92e1d91351048eeb197a741a | [] | no_license | 787028221/exam | f95327f56e5018257eb16fbe2ad3d869115c2bcc | 7203a851a8bab4fe595b093215fe91a8eed42696 | refs/heads/master | 2019-01-20T07:14:55.384154 | 2016-05-21T13:34:36 | 2016-05-21T13:34:58 | 57,008,066 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 359 | h | #ifndef SCOREPAPER_H
#define SCOREPAPER_H
#include <QDialog>
#include<QtNetwork>
#include<QtNetwork/QTcpSocket>
namespace Ui {
class scorepaper;
}
class scorepaper : public QDialog
{
Q_OBJECT
public:
explicit scorepaper(QWidget *parent = 0);
~scorepaper();
private:
Ui::scorepaper *ui;
void showPaper();
};
#endif // SCOREPAPER_H
| [
"787028221@qq.com"
] | 787028221@qq.com |
0dd7e9f882b8a7bf3ebb6cbadae295070e560d55 | 80bee850d1197772d61e05d8febc014e9980d7c0 | /Addons/MostWanted/MostWanted_Client/Notifications.hpp | f3de98f67f3b232f5bdff8face5cd591b28655f8 | [
"MIT"
] | permissive | x-cessive/Exile | 0443bb201bda31201fadc9c0ac80823fb2d7a25d | c5d1f679879a183549e1c87d078d462cbba32c25 | refs/heads/master | 2021-11-29T08:40:00.286597 | 2021-11-14T17:36:51 | 2021-11-14T17:36:51 | 82,304,207 | 10 | 8 | null | 2017-04-11T14:44:21 | 2017-02-17T14:20:22 | SQF | UTF-8 | C++ | false | false | 1,248 | hpp | class MostWanted
{
displayName = "MostWanted";
class NewBounty
{
displayName = "New Bounty";
description = "%3INMATES!%4%1A perspective client has set a bounty on a fellow inmate.%1The client is offering <t color='#ff0000'>%11</t> poptabs for their head.%1Your local Office Trader has the details and the contract if you choose to accept it.";
image = "";
noImage = true;
tip = "";
arguments[] = {
"MostWanted_BountyAmount"
};
};
class SuccessfulKill
{
displayName = "Kill Confirmed";
description = "%3Ouch! That looked like that hurt.%4%1You have successfully completed your bounty contract.%1Talk to your local Office Trader to collect your bounty of <t color='#ff0000'>%11</t>.";
image = "";
noImage = true;
tip = "";
arguments[] = {
"MostWanted_SuccessfulKill"
};
};
class BountyClaimed
{
displayName = "Bounty Claimed";
description = "%3Bounty Claimed!%4%1One very lucky inmate has claimed the bounty on <t color='#ff0000'>%11</t>.%1Contracts on this inmate have been cleared!%1Please visit your local Office Trader to get another contract.";
image = "";
noImage = true;
tip = "";
arguments[] = {
"MostWanted_BountyName"
};
};
};
| [
"mrsage@xcsv.tv"
] | mrsage@xcsv.tv |
d16c73c25ed72c5d8b723bcfe281629015944900 | 1754c9ca732121677ac6a9637db31419d32dbcf1 | /dependencies/libsbml-vs2017-release-32/include/sbml/validator/OverdeterminedValidator.h | 8a965fee5242fbffa7e5ebbaa8e6ecb848cf7d14 | [
"BSD-2-Clause"
] | permissive | sys-bio/Libstructural | 1701e239e3f4f64674b86e9e1053e9c61fe868a7 | fb698bcaeaef95f0d07c010f80c84d2cb6e93793 | refs/heads/master | 2021-09-14T17:54:17.538528 | 2018-05-16T21:12:24 | 2018-05-16T21:12:24 | 114,693,721 | 3 | 1 | null | 2017-12-18T22:25:11 | 2017-12-18T22:25:10 | null | UTF-8 | C++ | false | false | 2,257 | h | /**
* @cond doxygenLibsbmlInternal
*
* @file OverdeterminedValidator.h
* @brief Performs consistency checks on an SBML model
* @author Sarah Keating
*
* <!--------------------------------------------------------------------------
* This file is part of libSBML. Please visit http://sbml.org for more
* information about SBML, and the latest version of libSBML.
*
* Copyright (C) 2013-2017 jointly by the following organizations:
* 1. California Institute of Technology, Pasadena, CA, USA
* 2. EMBL European Bioinformatics Institute (EMBL-EBI), Hinxton, UK
* 3. University of Heidelberg, Heidelberg, Germany
*
* Copyright (C) 2009-2013 jointly by the following organizations:
* 1. California Institute of Technology, Pasadena, CA, USA
* 2. EMBL European Bioinformatics Institute (EMBL-EBI), Hinxton, UK
*
* Copyright (C) 2006-2008 by the California Institute of Technology,
* Pasadena, CA, USA
*
* Copyright (C) 2002-2005 jointly by the following organizations:
* 1. California Institute of Technology, Pasadena, CA, USA
* 2. Japan Science and Technology Agency, Japan
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation. A copy of the license agreement is provided
* in the file named "LICENSE.txt" included with this software distribution and
* also available online as http://sbml.org/software/libsbml/license.html
* ---------------------------------------------------------------------- -->*/
#ifndef OverdeterminedValidator_h
#define OverdeterminedValidator_h
#ifdef __cplusplus
#include <sbml/validator/Validator.h>
#include <sbml/SBMLError.h>
LIBSBML_CPP_NAMESPACE_BEGIN
class OverdeterminedValidator: public Validator
{
public:
OverdeterminedValidator () :
Validator( LIBSBML_CAT_OVERDETERMINED_MODEL ) { }
virtual ~OverdeterminedValidator () { }
/**
* Initializes this Validator with a set of Constraints.
*/
virtual void init ();
};
LIBSBML_CPP_NAMESPACE_END
#endif /* __cplusplus */
#endif /* OverdeterminedValidator_h */
/** @endcond */
| [
"yosefmaru@gmail.com"
] | yosefmaru@gmail.com |
5951a6b0505e92a3c624a2e0b1d23de2ffa9f810 | e13b9e222a53ff2bd46716c2874944f6a4441ae8 | /tcpclientthread.cpp | b3d9c38540b142d9edd438aba9ca84f8f794645c | [] | no_license | lc641777437/collector_manager | ae6f654c6a8b37fede54b6ad47d9a0e98bf4c0f4 | 6077bb3f7a014d28717583efa4bd5657af9ecf2f | refs/heads/master | 2020-04-15T22:44:06.344773 | 2018-11-18T10:44:54 | 2018-11-18T10:44:54 | 68,067,082 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 15,629 | cpp | #include "mainwindow.h"
#include "ui_mainwindow.h"
#include "tcpclientthread.h"
extern QVector<double> times;
extern QVector<double> value[16]; // value是一个vector的数组,即二维数组
extern int samplerate;
/***************** 初始化 ********************************/
TcpClientThread::TcpClientThread(QString message, QObject *parent) :
QThread(parent)
, message(message)
{
pMainWindow = static_cast<MainWindow *>(parent);
id = 0;
last_second = -1;
}
TcpClientThread::~TcpClientThread()
{
this->wait();
qDebug() <<"(tcpclientthread.cpp)"<< this<<"over";
this->exit();
}
/***************** 回掉函数 ******************************/
// 设备连接到了网络 回掉函数:
void TcpClientThread::socketconnected()
{
timer->stop();
pMainWindow->changeState(STATE_CONNECT_SOCKET);
pMainWindow->ui->pushButton_SocketConnect->setText("断开连接");
}
// 接收设备的回复 回掉函数:
void TcpClientThread::socketreadyread()
{
// 设备 对下发的命令字的回复
if(false == pMainWindow->isStartCollect)
{
CommandData.append(socket->readAll());
while(1){
if(CommandData.length() <= 2)break;
// 格式匹配: 找到开始位置
if(CommandData[0] == 0XA6 && CommandData[1] == 0XA6){
timer->stop();
// 设备对 "设置参数" 的回复: 弹出对话框提示ok
if(CommandData[2] == CMD_SET_PARAM){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '设置参数' 的回复: ================="<<endl<<endl;
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr("设置参数成功!\n"));
if(pMainWindow->samplerateTmp != 0)pMainWindow->samplerate = pMainWindow->samplerateTmp;
}
// 设备对 "获取参数" 的回复: 设备回复上传设置的参数,然后打开 “设置参数” 对话框
else if(CommandData[2] == CMD_GET_PARAM){
if(CommandData.length() < 10)return;//length is not enough
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '获取参数' 的回复: ================="<<endl<<endl;
/********** 工程流水号 ************************/
QByteArray proId;
for(int i = 10;i<58;i++){
proId.append(CommandData.at(i));
}
QString proIdStr = QString(proId);
qDebug()<<"qByte: "<<proId<<endl<<"str: "<<proIdStr<<endl;
/********** 项目名称 ************************/
QByteArray proName;
for(int i = 58;i<58+48;i++){
proName.append(CommandData.at(i));
}
QString proNameStr = QString(proName);
qDebug()<<"qByte: "<<proName<<endl<<"str: "<<proNameStr<<endl;
/********** 测试点的名称 ************************/
QString testPointNameList[16];
for(int j = 0;j<16;j++){
QByteArray testPointName;
for(int i = 10+48*2 + 24*j;i<10+48*2 + 24*(j+1);i++){
testPointName.append(CommandData.at(i));
}
QString testPointName_str = QString(testPointName);
testPointNameList[j] = testPointName_str;
qDebug()<<"测试点"<<j<<"原数据: "<<testPointName<<endl<<"测试点名称: "<<testPointName_str<<endl;
}
// 根据设备的回复 打开"设置参数"的对话框
Dialog* event = new Dialog(pMainWindow, CommandData[3],
CommandData[4], CommandData[5],
CommandData[6], CommandData[7],
CommandData[8], CommandData[9],
proIdStr,proNameStr,
testPointNameList);
event->setModal(true);
event->show();
qDebug()<<"(tcpclientthread.cpp)参数设置对话框"<<endl;
}
// 设备回复 "开始采集"
else if(CommandData[2] == CMD_SEND_START){
if(CommandData.length() < 3)return;//length is not enough
pMainWindow->isStartCollect = true;
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '开始采集' 的回复: ================="<<endl<<endl;
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr("已经开始采集!\n"));
}
// 设备回复 "结束采集"
else if(CommandData[2] == CMD_SEND_STOP){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '结束采集' 的回复: ================="<<endl<<endl;
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr("已经结束采集!\n"));
}
// 设备回复 "恢复出厂设置"
else if(CommandData[2] == CMD_SET_FACTORY){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '恢复出厂设置' 的回复: ================="<<endl<<endl;
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr("设备恢复出厂设置成功!\n"));
}
// 设备回复 "设置服务器"
else if(CommandData[2] == CMD_SET_SERVER){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============= 设备对 '设置服务器' 的回复: ================="<<endl<<endl;
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr("设置服务器成功!\n"));
}
if(CommandData[2] == 0x80){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<"(tcpclientthread.cpp)结束包:"<<CommandData<<endl;
}else if(CommandData[2] == 0x81){
if(CommandData.length() < 3)return;//length is not enough
qDebug()<<"(tcpclientthread.cpp)开始包:"<<CommandData<<endl;
}
else qDebug()<<"(tcpclientthread.cpp)命令字长度: "<<CommandData.length()<<",回复内容:"<<CommandData.toHex();
// 清空接收buffer
CommandData.clear();
break;
}
else
{
// 移除掉前面的错误字符,继续判断
CommandData.remove(0,1);
}
}
}
// 设备 回复数据
else{
int i = 0;
// 读取数据:每次读取4+16*3=52个字节
ReadData.append(socket->readAll());
qDebug()<<endl<<endl<<"(tcpclientthread.cpp)============== 设备 回复数据: ==========="<<endl;
// 寻找数据开始点: 0xA5 0xA5
while(1){//remove the nothing header
if(i >= ReadData.length()){
i=0;
break;
}
if(ReadData[i] == 0XA5 && ReadData[i+1] == 0XA5){
break;
}
i++;
}
// 移除掉前面的非相关数据
if(i != 0){
ReadData.remove(0,i);
}
qDebug()<<"(tcpclientthread.cpp)数据长度: "<<ReadData.length()<<endl;
while(1){
if(ReadData.length() < 54)break;
if(ReadData[0] == 0XA5 && ReadData[1] == 0XA5){
if(ReadData[52] == 0XA5 && ReadData[53] == 0XA5){
ADValue_proc(ReadData);
ReadData.remove(0,52);
}else{
ReadData.remove(0,2);
break;
}
}else{
break;
}
}
}
}
// tcp连接超时 回掉函数:
void TcpClientThread::tcptimeout()
{
QMessageBox::information(pMainWindow, tr("采集分析软件"), tr(" 设备无响应\n\n请检查设备电源或者网络是否连接正常!\n"));
CommandData.clear();
timer->stop();
}
/***************** 功能函数 ********************************/
// 将数据填充到画图线程需要的数组中,并保存到文件中
void TcpClientThread::ADValue_proc(QByteArray &ReadBuf)
{
double data[16] = {0};
double f[16] = {0}; // 扰度值
double V[16] = {0}; // 电压值
int data1,data2,data3;
pMainWindow->timeCount++;
// 判断哪些通道被选中 (readBuf的第(0,1没用到)2,3个字节用于标识通道是否选择)
for(int j = 0; j < 2;j++){
for(int i = 0; i < 8; i++){
if(ReadBuf[j + 2] & (0x01<<(7-i))){
pMainWindow->isChannal[j * 8 + i] = 0;
}else{
pMainWindow->isChannal[j * 8 + i] = 1;
}
}
}
// 16帧数据,每帧代表每个通道的值
int tmpData = 0;
// 计算h值
for(int i = 0;i < 16;i++){
// 接下来的每3个字节组成一帧数据
data1 = ReadBuf[i * 3 + 4 + 0];
data1 = data1<<16;
data2 = ReadBuf[i * 3 + 4 + 1];
data2 = data2<<8;
data3 = ReadBuf[i * 3 + 4 + 2];
tmpData = (data1&0x00ff0000)|(data2&0x0000ff00)|(data3&0x000000ff);
// 负数处理
if(tmpData&0x00800000){
tmpData = -(((~tmpData)&0x00ffffff) + 1);
}
// 计算H值
double tmp = 0;
double coefficient = 0.0;
double values = tmpData;
for(int i = 0; i < 10; i++){
double tempValue = values - i * 786432.0;
switch(i){
case 0:
coefficient = pMainWindow->coefficient1;
break;
case 1:
coefficient = pMainWindow->coefficient2;
break;
case 2:
coefficient = pMainWindow->coefficient3;
break;
case 3:
coefficient = pMainWindow->coefficient4;
break;
case 4:
coefficient = pMainWindow->coefficient5;
break;
case 5:
coefficient = pMainWindow->coefficient6;
break;
case 6:
coefficient = pMainWindow->coefficient7;
break;
case 7:
coefficient = pMainWindow->coefficient8;
break;
case 8:
coefficient = pMainWindow->coefficient9;
break;
case 9:
coefficient = pMainWindow->coefficient10;
break;
}
if(tempValue > 786432.0){
tmp += 786432 * coefficient;
} else if(tempValue > 0){
tmp += tempValue * coefficient;
} else{
break;
}
}
V[i] = tmp / 786432;
tmp = (tmp/393216.0 - 4)/16.0*(pMainWindow->PmaxList[i] - pMainWindow->PminList[i])
+ pMainWindow->PminList[i];
data[i] = tmp / (9.8 * pMainWindow->Density);
// 保存初始H值
if(pMainWindow->readInitialValue){
if(pMainWindow->H[i] == 0) {
pMainWindow->H[i] = data[i];
}
else{
pMainWindow->H[i] = ( pMainWindow->H[i] + data[i] ) / 2.0;
}
}
}
if(pMainWindow->readInitialValue == false){
// 计算挠度值
for(int i = 0;i<16;i++){
f[i] = data[i] - pMainWindow->H[i] -
(data[pMainWindow->baseValue] - pMainWindow->H[pMainWindow->baseValue]);
}
// 将数据添加到画图线程中去,显示画图
if(times[times.length()-1] < MAX_SHOW_TIME){
times.append(pMainWindow->timeCount * 1000.0 / pMainWindow->samplerate);
// 将每个通道的值添加到相应的代表通道的数组中
for(int i = 0;i < 16;i++){
value[i].append(f[i]);
}
}else{
//数据左移,以便能够显示新值
time_MoveLeftInsert(pMainWindow->timeCount * 1000.0 / pMainWindow->samplerate);
for(int i = 0;i < 16;i++){
data_MoveLeftInsert(i,f[i]);
}
}
// 将数据写到文件中去
if(!pMainWindow->file.isOpen())pMainWindow->file.open( QIODevice::ReadWrite | QIODevice::Append |QIODevice::Text);
QTextStream steam(&pMainWindow->file);
// 数据文件保存1: 时分秒,毫秒
steam<<QTime::currentTime().toString()<<":"<<QTime::currentTime().msec()<<",";
if(last_second == QTime::currentTime().second()) id++;
else {
id=0;
last_second = QTime::currentTime().second();
}
// 数据文件保存1: 递增id
steam<<id<<",";
// 数据文件保存1: 16个扰度值
for(int i = 0; i < 16; i++){
if(pMainWindow->isChannal[i])
steam<<f[i]<<","<<V[i]<<",";
else
steam<<"-,-,";
}
// 数据文件保存1: 16个平均值
if(0){
for(int i = 0; i < 16; i++){
if(pMainWindow->isChannal[i]){
steam<<(f_avg[i]/200.0)<<",";
f_avg[i] = 0;
}
else steam<<"-,";
}
}else{
for(int i = 0; i < 16; i++){
if(pMainWindow->isChannal[i]){
f_avg[i] = f_avg[i] + f[i];
}
}
}
steam<<endl;
if(!pMainWindow->vfile.isOpen())pMainWindow->vfile.open( QIODevice::ReadWrite | QIODevice::Append |QIODevice::Text);
QTextStream steam2(&pMainWindow->vfile);
// 数据文件保存2: 时分秒,毫秒
steam2<<QTime::currentTime().toString()<<":"<<QTime::currentTime().msec()<<",";
// 数据文件保存: 保存16个电压值
for(int i = 0; i < 16; i++){
if(pMainWindow->isChannal[i])
steam2<<V[i]<<",";
else
steam2<<"-,";
}
steam2<<endl;
}
}
// 辅助函数 -- 时间值左移
void TcpClientThread::time_MoveLeftInsert(double data)
{
for(int i = 0;i < times.length()-1;i++){
times[i] = times[i+1];
}
times[times.length()-1] = data;
}
// 辅助函数 -- 数据值左移
void TcpClientThread::data_MoveLeftInsert(int channal, double data)
{
for(int i = 0;i < times.length()-1;i++){
value[channal][i] = value[channal][i+1];
}
value[channal][times.length()-1] = data;
}
| [
"641777437@qq.com"
] | 641777437@qq.com |
5b3a5a427fb5c5934e5651f65e84a22cc9474932 | 20b49a6ef1fa417d67abef2d29a598c9e41c478e | /CSES/Graph Algorithms/highScore.cpp | 23f369dd0f7541a0647b7dcefc154b5f83a3e3d1 | [] | no_license | switchpiggy/Competitive_Programming | 956dac4a71fdf65de2959dd142a2032e2f0710e1 | beaaae4ece70889b0af1494d68c630a6e053558a | refs/heads/master | 2023-04-15T19:13:12.348433 | 2021-04-04T06:12:29 | 2021-04-04T06:12:29 | 290,905,106 | 1 | 3 | null | 2020-10-05T20:16:53 | 2020-08-27T23:38:48 | C++ | UTF-8 | C++ | false | false | 1,782 | cpp | #include <bits/stdc++.h>
using namespace std;
typedef long long int ll;
typedef long double ld;
#define benq queue
#define pbenq priority_queue
#define all(x) x.begin(), x.end()
#define sz(x) (ll)x.size()
#define m1(x) memset(x, 1, sizeof(x))
#define m0(x) memset(x, 0, sizeof(x))
#define mn(x) memset(x, -0x3f, sizeof(x));
#define inf(x) memset(x, 0x3f, sizeof(x))
#define MOD 1000000007
#define INF 0x3f3f3f3f3f3f3f3f
#define PI 3.14159265358979323846264338
#define flout cout << fixed << setprecision(12)
ll n, m, a, b, x, dist[2507];
vector<pair<pair<ll, ll>, ll>> v;
vector<ll> adj[100007], adj2[100007];
bool bad[100007], vis[100007], r[100007];
bool dfs(ll x) {
vis[x] = 1;
if(bad[x] && r[x]) return 1;
for(ll i : adj[x]) {
if(vis[i]) continue;
if(dfs(i)) return 1;
}
return 0;
}
void dfs2(ll x) {
vis[x] = r[x] = 1;
for(ll i : adj2[x]) {
if(vis[i]) continue;
dfs2(i);
}
}
int main() {
ios_base::sync_with_stdio(0);
cin.tie(0);
cin >> n >> m;
mn(dist);
for(ll i = 0; i < m; ++i) {
cin >> a >> b >> x;
v.push_back({{a, b}, x});
adj[b].push_back(a);
adj2[a].push_back(b);
}
dist[1] = 0;
for(ll i = 0; i < n - 1; ++i) {
for(auto j : v) {
if(dist[j.first.first] != -INF) dist[j.first.second] = max(dist[j.first.second], dist[j.first.first] + j.second);
}
}
for(auto j : v) {
if(dist[j.first.first] != -INF && dist[j.first.second] < dist[j.first.first] + j.second) {
bad[j.first.first] = 1;
dist[j.first.second] = dist[j.first.first] + j.second;
}
}
dfs2(1);
m0(vis);
if(dfs(n)) cout << "-1\n";
else cout << dist[n] << '\n';
return 0;
} | [
"switchpiggy@users.noreply.github.com"
] | switchpiggy@users.noreply.github.com |
282a40574da52846ce82302f4033862a5e950873 | 8f726a302a527a43a656c6c8a791fe59a73b029d | /Chapter_1/1.5_One_Away/Levenshtein_distance.cpp | ea7a0e3935f07322a73615bb15ddb56cd688e207 | [] | no_license | aaraki/CtCI | f9d22117e6bc495695839ddd193744e5ef1b18c7 | 6f904251f2e7def3f24e05034ce24341bfc9351f | refs/heads/master | 2022-04-18T15:31:33.421149 | 2020-03-09T02:38:23 | 2020-03-09T02:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,050 | cpp | #include <iostream>
#include <vector>
#include <list>
#include <stack>
#include <queue>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <string>
#include <algorithm>
using namespace std;
bool oneEditAway(string s, string t) {
int n = (int)s.size();
int m = (int)t.size();
vector<vector<int>> dp(n + 1, vector<int>(m + 1, INT32_MAX));
for (int i = 0; i <= n; i++) dp[i][0] = i;
for (int i = 0; i <= m; i++) dp[0][i] = i;
for (int i = 1; i <= n; i++) {
for (int j = 1; j <= m; j++) {
dp[i][j] = min(dp[i][j], dp[i - 1][j] + 1);
dp[i][j] = min(dp[i][j], dp[i][j - 1] + 1);
dp[i][j] = min(dp[i][j], dp[i - 1][j - 1] + (s[i - 1] != t[j - 1]));
}
}
return dp[n][m] <= 1;
}
int main() {
cout << oneEditAway("pale", "ple") << endl; // true
cout << oneEditAway("pales", "pale") << endl; // true;
cout << oneEditAway("pale", "bale") << endl; // true
cout << oneEditAway("pale", "bae") << endl; //false
return 0;
} | [
"tatsuhiro.no.jones@gmail.com"
] | tatsuhiro.no.jones@gmail.com |
4b0998ab7bbe1b25038d28cae141b8b6ee204a93 | c27e82cde645bb5bb33c0c2c5f418dc3ba7a491c | /src/shell/command_parser/structures/OutAppend.cpp | 214edb500e8161b5327fb67600f4d7773d5f1de0 | [] | no_license | pik694/UXP1A_shell | 8729cb28507dc5f9a0029226b44b10b0519b821d | f6efd8d1cd3ebc8f0e85505da429c4c63566d9ff | refs/heads/master | 2020-03-13T13:58:17.432490 | 2018-06-14T17:22:14 | 2018-06-14T17:22:14 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 181 | cpp | //
// Created by Daniel Bigos on 13.06.18.
//
#include "OutAppend.h"
using namespace shell::parser::structures;
OutAppend::OutAppend( const std::string &path ) : Out( path ) { }
| [
"daniel.bigos96@gmail.com"
] | daniel.bigos96@gmail.com |
4453a277bdf96bebd35d6c10b85c6df1a42f075a | d52399169d3ce1ca274583241ed471fcac857ec9 | /OS/labs/lab12/os12COM/lab12/lab12/CFactory.h | 9bf083a713ffb6b9e29b57045799236337a2a0cb | [] | no_license | sshkodunishka/thirdCourse2Sem | bc7d7af3b8376aa0265a926041d1765c9173cd78 | 0eb3bb66af0e7ccc24c82a616167f08803ea3c2e | refs/heads/main | 2023-05-06T18:36:05.330071 | 2021-06-03T05:06:08 | 2021-06-03T05:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 461 | h | #pragma once
#include <objbase.h>
class CFactory : public IClassFactory
{
public :
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void** ppv);
virtual ULONG STDMETHODCALLTYPE AddRef(void);
virtual ULONG STDMETHODCALLTYPE Release(void);
virtual HRESULT STDMETHODCALLTYPE CreateInstance(IUnknown* pUO, const IID& id, void** ppv);
virtual HRESULT STDMETHODCALLTYPE LockServer(BOOL b);
CFactory();
~CFactory();
private:
ULONG m_Ref;
}; | [
"anton.borisov.17@mail.ru"
] | anton.borisov.17@mail.ru |
a878d481ae5d8be7753c0657d55a4a200e43ef93 | 2b60f6b0c50b5637206c3051be1b9a2d70979406 | /src/version.cpp | 9dcd85625ccabdfcd39f7ab4563f751c27524b97 | [
"MIT"
] | permissive | dogecrap/dogecrap | 97976d180b6d73af3176fa43e2aef78ea5b2058a | 307721834e9dc525b060414851252927143e5ccb | refs/heads/master | 2021-03-12T22:41:19.332301 | 2014-01-22T21:48:27 | 2014-01-22T21:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,672 | cpp | // Copyright (c) 2012 The Bitcoin developers
// Copyright (c) 2012 Litecoin Developers
// Distributed under the MIT/X11 software license, see the acrapmpanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <string>
#include "version.h"
// Name of client reported in the 'version' message. Report the same name
// for both bitcoind and bitcoin-qt, to make it harder for attackers to
// target servers or GUI users specifically.
const std::string CLIENT_NAME("Satoshi");
// Client version number
#define CLIENT_VERSION_SUFFIX "-foo"
// The following part of the code determines the CLIENT_BUILD variable.
// Several mechanisms are used for this:
// * first, if HAVE_BUILD_INFO is defined, include build.h, a file that is
// generated by the build environment, possibly containing the output
// of git-describe in a macro called BUILD_DESC
// * secondly, if this is an exported version of the code, GIT_ARCHIVE will
// be defined (automatically using the export-subst git attribute), and
// GIT_COMMIT will contain the commit id.
// * then, three options exist for determining CLIENT_BUILD:
// * if BUILD_DESC is defined, use that literally (output of git-describe)
// * if not, but GIT_COMMIT is defined, use v[maj].[min].[rev].[build]-g[commit]
// * otherwise, use v[maj].[min].[rev].[build]-unk
// finally CLIENT_VERSION_SUFFIX is added
// First, include build.h if requested
#ifdef HAVE_BUILD_INFO
# include "build.h"
#endif
// git will put "#define GIT_ARCHIVE 1" on the next line inside archives. $Format:%n#define GIT_ARCHIVE 1$
#ifdef GIT_ARCHIVE
# define GIT_COMMIT_ID "$Format:%h$"
# define GIT_COMMIT_DATE "$Format:%cD"
#endif
#define STRINGIFY(s) #s
#define BUILD_DESC_FROM_COMMIT(maj,min,rev,build,commit) \
"v" STRINGIFY(maj) "." STRINGIFY(min) "." STRINGIFY(rev) "." STRINGIFY(build) "-g" commit
#define BUILD_DESC_FROM_UNKNOWN(maj,min,rev,build) \
"v" STRINGIFY(maj) "." STRINGIFY(min) "." STRINGIFY(rev) "." STRINGIFY(build) "-unk"
#ifndef BUILD_DESC
# ifdef GIT_COMMIT_ID
# define BUILD_DESC BUILD_DESC_FROM_COMMIT(CLIENT_VERSION_MAJOR, CLIENT_VERSION_MINOR, CLIENT_VERSION_REVISION, CLIENT_VERSION_BUILD, GIT_COMMIT_ID)
# else
# define BUILD_DESC BUILD_DESC_FROM_UNKNOWN(CLIENT_VERSION_MAJOR, CLIENT_VERSION_MINOR, CLIENT_VERSION_REVISION, CLIENT_VERSION_BUILD)
# endif
#endif
#ifndef BUILD_DATE
# ifdef GIT_COMMIT_DATE
# define BUILD_DATE GIT_COMMIT_DATE
# else
# define BUILD_DATE __DATE__ ", " __TIME__
# endif
#endif
const std::string CLIENT_BUILD(BUILD_DESC CLIENT_VERSION_SUFFIX);
const std::string CLIENT_DATE(BUILD_DATE);
| [
"you@example.com"
] | you@example.com |
a0ae71ec28c9e9436e81fb0fadabda6d923ddd27 | 17353cfd2c984f2b57ab09dce5b793f34b051f19 | /unsorted_include_todo/Title/Section.h | c34ffd80181d3d4f3e76fde755292d151f5c7e80 | [] | no_license | mxygon/pikmin2 | 573df84b127b27f1c5db6be22680b63fd34565d5 | fa16b706d562d3f276406d8a87e01ad541515737 | refs/heads/main | 2023-09-02T06:56:56.216154 | 2021-11-12T09:34:26 | 2021-11-12T09:34:26 | 427,367,127 | 1 | 0 | null | 2021-11-12T13:19:54 | 2021-11-12T13:19:53 | null | UTF-8 | C++ | false | false | 2,110 | h | #ifndef _TITLE_SECTION_H
#define _TITLE_SECTION_H
namespace Game {
struct BaseHIOSection {
virtual void _00() = 0; // _00
virtual void _04() = 0; // _04
virtual void _08() = 0; // _08
virtual void _0C() = 0; // _0C
virtual void _10() = 0; // _10
virtual void _14() = 0; // _14
virtual void _18() = 0; // _18
virtual void _1C() = 0; // _1C
virtual void _20() = 0; // _20
virtual void _24() = 0; // _24
virtual void _28() = 0; // _28
virtual void _2C() = 0; // _2C
virtual void _30() = 0; // _30
virtual void _34() = 0; // _34
virtual void _38() = 0; // _38
virtual void _3C() = 0; // _3C
virtual void initHIO(HIORootNode*); // _40
virtual void refreshHIO(); // _44
// _00 VTBL
};
} // namespace Game
namespace Title {
struct Section : public BaseHIOSection {
virtual ~Section(); // _00
virtual void run(); // _04
virtual void update(); // _08
virtual void draw(Graphics&); // _0C
virtual void init(); // _10
virtual void drawInit(Graphics&); // _14
virtual void drawInit(Graphics&, EDrawInitMode); // _18
virtual void doExit(); // _1C
virtual void forceFinish(); // _20
virtual void forceReset(); // _24
virtual void getCurrentSection(); // _28
virtual void doLoadingStart(); // _2C
virtual void doLoading(); // _30
virtual void doUpdate(); // _34
virtual void doDraw(Graphics&); // _38
virtual void isFinishable(); // _3C
virtual void initHIO(HIORootNode*); // _40
virtual void refreshHIO(); // _44
virtual void loadResource(); // _48
// _00 VTBL
};
} // namespace Title
#endif
| [
"84647527+intns@users.noreply.github.com"
] | 84647527+intns@users.noreply.github.com |
fad642e3a822a58e175e378996f2695c176435d9 | fa2069464c2ab9866fe6d5dd656dc48670037ba0 | /include/pixiu/request_utils.hpp | 6f005184c6de35bfdf1a3f71605bf6d4895311dc | [
"MIT"
] | permissive | blockspacer/pixiu | 2b4f881094f3876dd9a30d8acf431d5020f5a041 | a75f06a363df0bdec37ff270b67ee877bfaed03a | refs/heads/master | 2022-03-11T08:54:56.031718 | 2019-08-22T19:20:34 | 2019-08-22T19:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 495 | hpp | #pragma once
#include "client/request_param.hpp"
namespace pixiu {
using request_param = client_bits::request_param;
constexpr struct MakeRequest {
auto operator()(
const boost::beast::http::verb method,
const std::string& host,
boost::string_view target,
int version,
nlohmann::json param
) const {
request_param rp;
rp.target = target;
rp.method = method;
rp.param = std::move(param);
return rp.make_request(host, version);
}
} make_request;
} | [
"CHChang810716@gmail.com"
] | CHChang810716@gmail.com |
9b4dc0f311dfe13070d74a507f6b6da8d0d854b8 | 883ab39434c0a31cb0f04b8fe7f5e7761f1939ca | /main.cpp | 30814549176d8e791c19bad9cddd6f0fc072659f | [] | no_license | jiubing/QtDemo | 7b88130df00190971ee4c570caa37e4c43cdbbf7 | 58433760aabfa3689b1b0b28a9c74a847f9aaa23 | refs/heads/master | 2022-12-04T11:00:33.676335 | 2020-08-29T01:28:23 | 2020-08-29T01:28:23 | 291,176,460 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 185 | cpp | #include "mainwindow.h"
#include <QApplication>
//hhhhhhhhhhhhhhhhhhhhhhh
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
return a.exec();
}
| [
"1761960198@qq.co"
] | 1761960198@qq.co |
b2261580a5edb6c594a0609140c72747e0ef074d | fc38a55144a0ad33bd94301e2d06abd65bd2da3c | /thirdparty/cgal/CGAL-4.13/include/CGAL/Minkowski_sum_2/Minkowski_sum_by_reduced_convolution_2.h | ef37f9d9336bc1fb96224b6e811a8fe50238c63a | [
"LGPL-2.0-or-later",
"LGPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-commercial-license",
"MIT",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-only",
"LGPL-2.1-or-later",
"LicenseRef-scancode-proprietary-license",
"Licens... | permissive | bobpepin/dust3d | 20fc2fa4380865bc6376724f0843100accd4b08d | 6dcc6b1675cb49ef3fac4a58845f9c9025aa4c9f | refs/heads/master | 2022-11-30T06:00:10.020207 | 2020-08-09T09:54:29 | 2020-08-09T09:54:29 | 286,051,200 | 0 | 0 | MIT | 2020-08-08T13:45:15 | 2020-08-08T13:45:14 | null | UTF-8 | C++ | false | false | 17,395 | h | // Copyright (c) 2015 Tel-Aviv University (Israel).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
// You can redistribute it and/or modify it under the terms of the GNU
// General Public License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// Licensees holding a valid commercial license may use this file in
// accordance with the commercial license agreement provided with the software.
//
// This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
// WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
//
// $URL$
// $Id$
// SPDX-License-Identifier: GPL-3.0+
//
// Author(s): Sebastian Morr <sebastian@morr.cc>
#ifndef CGAL_MINKOWSKI_SUM_BY_REDUCED_CONVOLUTION_2_H
#define CGAL_MINKOWSKI_SUM_BY_REDUCED_CONVOLUTION_2_H
#include <CGAL/license/Minkowski_sum_2.h>
#include <CGAL/basic.h>
#include <CGAL/Arrangement_with_history_2.h>
#include <CGAL/Arr_segment_traits_2.h>
#include <CGAL/Minkowski_sum_2/AABB_collision_detector_2.h>
#include <queue>
#include <boost/unordered_set.hpp>
#include <boost/unordered_map.hpp>
namespace CGAL {
// This algorithm was first described by Evan Behar and Jyh-Ming Lien in "Fast
// and Robust 2D Minkowski Sum Using Reduced Convolution", IROS 2011.
// This implementation is based on Alon Baram's 2013 master's thesis "Polygonal
// Minkowski Sums via Convolution: Theory and Practice" at Tel-Aviv University.
template <typename Kernel_, typename Container_>
class Minkowski_sum_by_reduced_convolution_2
{
private:
typedef Kernel_ Kernel;
typedef Container_ Container;
// Basic types:
typedef CGAL::Polygon_2<Kernel, Container> Polygon_2;
typedef CGAL::Polygon_with_holes_2<Kernel, Container> Polygon_with_holes_2;
typedef typename Kernel::Point_2 Point_2;
typedef typename Kernel::Vector_2 Vector_2;
typedef typename Kernel::Direction_2 Direction_2;
typedef typename Kernel::Triangle_2 Triangle_2;
typedef typename Kernel::FT FT;
// Segment-related types:
typedef Arr_segment_traits_2<Kernel> Traits_2;
typedef typename Traits_2::X_monotone_curve_2 Segment_2;
typedef std::list<Segment_2> Segment_list;
typedef Arr_default_dcel<Traits_2> Dcel;
typedef std::pair<int, int> State;
// Arrangement-related types:
typedef Arrangement_with_history_2<Traits_2, Dcel> Arrangement_history_2;
typedef typename Arrangement_history_2::Halfedge_handle Halfedge_handle;
typedef typename Arrangement_history_2::Face_iterator Face_iterator;
typedef typename Arrangement_history_2::Face_handle Face_handle;
typedef typename Arrangement_history_2::Ccb_halfedge_circulator
Ccb_halfedge_circulator;
typedef typename Arrangement_history_2::Originating_curve_iterator
Originating_curve_iterator;
typedef typename Arrangement_history_2::Inner_ccb_iterator Inner_ccb_iterator;
// Function object types:
typename Kernel::Construct_translated_point_2 f_add;
typename Kernel::Construct_vector_2 f_vector;
typename Kernel::Construct_direction_2 f_direction;
typename Kernel::Orientation_2 f_orientation;
typename Kernel::Compare_xy_2 f_compare_xy;
typename Kernel::Counterclockwise_in_between_2 f_ccw_in_between;
public:
Minkowski_sum_by_reduced_convolution_2()
{
// Obtain kernel functors
Kernel ker;
f_add = ker.construct_translated_point_2_object();
f_vector = ker.construct_vector_2_object();
f_direction = ker.construct_direction_2_object();
f_orientation = ker.orientation_2_object();
f_compare_xy = ker.compare_xy_2_object();
f_ccw_in_between = ker.counterclockwise_in_between_2_object();
}
template <typename OutputIterator>
void operator()(const Polygon_2& pgn1, const Polygon_2& pgn2,
Polygon_2& outer_boundary, OutputIterator holes) const
{
CGAL_precondition(pgn1.is_simple());
CGAL_precondition(pgn2.is_simple());
CGAL_precondition(pgn1.orientation() == COUNTERCLOCKWISE);
CGAL_precondition(pgn2.orientation() == COUNTERCLOCKWISE);
const Polygon_with_holes_2 pwh1(pgn1);
const Polygon_with_holes_2 pwh2(pgn2);
common_operator(pwh1, pwh2, outer_boundary, holes);
}
template <typename OutputIterator>
void operator()(const Polygon_with_holes_2& pgn1,
const Polygon_with_holes_2& pgn2,
Polygon_2& outer_boundary, OutputIterator holes) const
{
common_operator(pgn1, pgn2, outer_boundary, holes);
}
template <typename OutputIterator>
void operator()(const Polygon_2& pgn1,
const Polygon_with_holes_2& pgn2,
Polygon_2& outer_boundary, OutputIterator holes) const
{
CGAL_precondition(pgn1.is_simple());
CGAL_precondition(pgn1.orientation() == COUNTERCLOCKWISE);
const Polygon_with_holes_2 pwh1(pgn1);
common_operator(pwh1, pgn2, outer_boundary, holes);
}
private:
template <typename OutputIterator>
void common_operator(const Polygon_with_holes_2& pgn1,
const Polygon_with_holes_2& pgn2,
Polygon_2& outer_boundary, OutputIterator holes) const
{
// If the outer boundaries of both summands are empty the Minkowski sum is
// the entire plane.
if (pgn1.outer_boundary().is_empty() && pgn2.outer_boundary().is_empty())
return;
// Initialize collision detector. It operates on pgn2 and on the inversed
// pgn1:
const Polygon_with_holes_2 inversed_pgn1 =
transform(Aff_transformation_2<Kernel>(SCALING, -1), pgn1);
AABB_collision_detector_2<Kernel, Container>
collision_detector(pgn2, inversed_pgn1);
// Compute the reduced convolution (see section 4.1 of Alon's master's
// thesis)
Segment_list reduced_convolution;
build_reduced_convolution(pgn1, pgn2, reduced_convolution);
// Insert the segments into an arrangement
Arrangement_history_2 arr;
insert(arr, reduced_convolution.begin(), reduced_convolution.end());
// Trace the outer loop and put it in 'outer_boundary'
// If one of the summand does not have an outer boundary, then the Minkowski
// sum does not have an outer boundary either.
bool is_outer_boundary_empty = pgn1.outer_boundary().is_empty() ||
pgn2.outer_boundary().is_empty();
if (! is_outer_boundary_empty) get_outer_loop(arr, outer_boundary);
// Check for each face whether it is a hole in the M-sum. If it is, add it
// to 'holes'. See chapter 3 of of Alon's master's thesis.
for (Face_iterator fit = arr.faces_begin(); fit != arr.faces_end(); ++fit) {
// Check whether the face is on the M-sum's border.
// If the face contains holes, it can't be on the Minkowski sum's border
if (0 < fit->number_of_holes()) continue;
// The face needs to be orientable
if (! test_face_orientation(arr, fit)) continue;
// When the reversed polygon 1, translated by a point inside of this face,
// collides with polygon 2, this cannot be a hole
if (! is_outer_boundary_empty) {
Point_2 inner_point = get_point_in_face(fit);
if (collision_detector.check_collision(inner_point)) continue;
}
add_face(fit, holes);
}
}
// Builds the reduced convolution for each pair of loop in the two
// polygons-with-holes.
void build_reduced_convolution(const Polygon_with_holes_2& pgnwh1,
const Polygon_with_holes_2& pgnwh2,
Segment_list& reduced_convolution) const
{
for (std::size_t x = 0; x < 1+pgnwh1.number_of_holes(); ++x)
{
for (std::size_t y = 0; y < 1+pgnwh2.number_of_holes(); ++y)
{
if ((x != 0) && (y != 0))
{
continue;
}
Polygon_2 pgn1, pgn2;
if (x == 0) {
pgn1 = pgnwh1.outer_boundary();
}
else {
typename Polygon_with_holes_2::Hole_const_iterator it1 =
pgnwh1.holes_begin();
for (std::size_t count = 0; count < x-1; count++) { it1++; }
pgn1 = *it1;
}
if (y == 0) {
pgn2 = pgnwh2.outer_boundary();
}
else {
typename Polygon_with_holes_2::Hole_const_iterator it2 =
pgnwh2.holes_begin();
for (std::size_t count = 0; count < y-1; count++) { it2++; }
pgn2 = *it2;
}
build_reduced_convolution(pgn1, pgn2, reduced_convolution);
}
}
}
// Builds the reduced convolution using a fiber grid approach. For each
// starting vertex, try to add two outgoing next states. If a visited
// vertex is reached, then do not explore further. This is a BFS-like
// iteration beginning from each vertex in the first column of the fiber
// grid.
void build_reduced_convolution(const Polygon_2& pgn1, const Polygon_2& pgn2,
Segment_list& reduced_convolution) const
{
int n1 = static_cast<int>(pgn1.size());
int n2 = static_cast<int>(pgn2.size());
if ((n1 == 0) || (n2 == 0)) return;
std::vector<Point_2> p1_vertices = vertices_of_polygon(pgn1);
std::vector<Point_2> p2_vertices = vertices_of_polygon(pgn2);
// Init the direcions of both polygons
std::vector<Direction_2> p1_dirs = directions_of_polygon(p1_vertices);
std::vector<Direction_2> p2_dirs = directions_of_polygon(p2_vertices);
// Contains states that were already visited
boost::unordered_set<State> visited_states;
// Init the queue with vertices from the first column
std::queue<State> state_queue;
for (int i = n1-1; i >= 0; --i)
{
state_queue.push(State(i, 0));
}
while (state_queue.size() > 0)
{
State curr_state = state_queue.front();
state_queue.pop();
int i1 = curr_state.first;
int i2 = curr_state.second;
// If this state was already visited, skip it
if (visited_states.count(curr_state) > 0)
{
continue;
}
visited_states.insert(curr_state);
int next_i1 = (i1+1) % n1;
int next_i2 = (i2+1) % n2;
int prev_i1 = (n1+i1-1) % n1;
int prev_i2 = (n2+i2-1) % n2;
// Try two transitions: From (i,j) to (i+1,j) and to (i,j+1). Add
// the respective segments, if they are in the reduced convolution.
for(int step_in_pgn1 = 0; step_in_pgn1 <= 1; step_in_pgn1++)
{
int new_i1, new_i2;
if (step_in_pgn1)
{
new_i1 = next_i1;
new_i2 = i2;
}
else
{
new_i1 = i1;
new_i2 = next_i2;
}
// If the segment's direction lies counterclockwise in between
// the other polygon's vertex' ingoing and outgoing directions,
// the segment belongs to the full convolution.
bool belongs_to_convolution;
if (step_in_pgn1)
{
belongs_to_convolution =
f_ccw_in_between(p1_dirs[i1], p2_dirs[prev_i2], p2_dirs[i2]) ||
p1_dirs[i1] == p2_dirs[i2];
}
else
{
belongs_to_convolution =
f_ccw_in_between(p2_dirs[i2], p1_dirs[prev_i1], p1_dirs[i1]) ||
p2_dirs[i2] == p1_dirs[prev_i1];
}
if (belongs_to_convolution)
{
state_queue.push(State(new_i1, new_i2));
// Only edges added to convex vertices can be on the M-sum's boundary.
// This filter only leaves the *reduced* convolution.
bool convex;
if (step_in_pgn1)
{
convex = is_convex(p2_vertices[prev_i2], p2_vertices[i2],
p2_vertices[next_i2]);
}
else
{
convex = is_convex(p1_vertices[prev_i1], p1_vertices[i1],
p1_vertices[next_i1]);
}
if (convex)
{
Point_2 start_point = get_point(i1, i2, p1_vertices, p2_vertices);
Point_2 end_point = get_point(new_i1, new_i2, p1_vertices,
p2_vertices);
reduced_convolution.push_back(Segment_2(start_point, end_point));
}
}
}
}
}
// Returns a vector of the polygon's vertices, in case that Container
// is std::list and we cannot use vertex(i).
std::vector<Point_2> vertices_of_polygon(const Polygon_2& p) const
{
std::vector<Point_2> vertices;
for (typename Polygon_2::Vertex_const_iterator it = p.vertices_begin();
it != p.vertices_end(); it++)
{
vertices.push_back(*it);
}
return vertices;
}
// Returns a sorted list of the polygon's edges
std::vector<Direction_2> directions_of_polygon(
const std::vector<Point_2>& points) const
{
std::vector<Direction_2> directions;
std::size_t n = points.size();
for (std::size_t i = 0; i < n-1; ++i)
{
directions.push_back(f_direction(f_vector(points[i], points[i+1])));
}
directions.push_back(f_direction(f_vector(points[n-1], points[0])));
return directions;
}
bool is_convex(const Point_2& prev, const Point_2& curr,
const Point_2& next) const
{
return f_orientation(prev, curr, next) == LEFT_TURN;
}
// Returns the point corresponding to a state (i,j).
Point_2 get_point(int i1, int i2, const std::vector<Point_2>& pgn1,
const std::vector<Point_2>& pgn2) const
{
return f_add(pgn1[i1], Vector_2(Point_2(ORIGIN), pgn2[i2]));
}
// Put the outer loop of the arrangement in 'outer_boundary'
void get_outer_loop(Arrangement_history_2& arr,
Polygon_2& outer_boundary) const
{
Inner_ccb_iterator icit = arr.unbounded_face()->inner_ccbs_begin();
Ccb_halfedge_circulator circ_start = *icit;
Ccb_halfedge_circulator circ = circ_start;
do
{
outer_boundary.push_back(circ->source()->point());
}
while (--circ != circ_start);
}
// Determine whether the face orientation is consistent.
bool test_face_orientation(const Arrangement_history_2& arr,
const Face_handle face) const
{
// The face needs to be orientable
Ccb_halfedge_circulator start = face->outer_ccb();
Ccb_halfedge_circulator circ = start;
do if (!do_original_edges_have_same_direction(arr, circ)) return false;
while (++circ != start);
return true;
}
// Add a face to 'holes'.
template <typename OutputIterator>
void add_face(const Face_handle face, OutputIterator holes) const
{
Polygon_2 pgn_hole;
Ccb_halfedge_circulator start = face->outer_ccb();
Ccb_halfedge_circulator circ = start;
do pgn_hole.push_back(circ->source()->point());
while (--circ != start);
*holes = pgn_hole;
++holes;
}
// Check whether the convolution's original edge(s) had the same direction as
// the arrangement's half edge
bool do_original_edges_have_same_direction(const Arrangement_history_2& arr,
const Halfedge_handle he) const
{
Originating_curve_iterator segment_itr;
for (segment_itr = arr.originating_curves_begin(he);
segment_itr != arr.originating_curves_end(he); ++segment_itr)
{
if (f_compare_xy(segment_itr->source(), segment_itr->target()) ==
(Comparison_result)he->direction())
{
return false;
}
}
return true;
}
// Return a point in the face's interior by finding a diagonal
Point_2 get_point_in_face(const Face_handle face) const
{
Ccb_halfedge_circulator current_edge = face->outer_ccb();
Ccb_halfedge_circulator next_edge = current_edge;
next_edge++;
Point_2 a, v, b;
// Move over the face's vertices until a convex corner is encountered:
do
{
a = current_edge->source()->point();
v = current_edge->target()->point();
b = next_edge->target()->point();
current_edge++;
next_edge++;
}
while (!is_convex(a, v, b));
Triangle_2 ear(a, v, b);
FT min_distance = -1;
const Point_2* min_q = 0;
// Of the remaining vertices, find the one inside of the "ear" with minimal
// distance to v:
while (++next_edge != current_edge)
{
const Point_2& q = next_edge->target()->point();
if (ear.has_on_bounded_side(q))
{
FT distance = squared_distance(q, v);
if ((min_q == 0) || (distance < min_distance))
{
min_distance = distance;
min_q = &q;
}
}
}
// If there was no vertex inside of the ear, return it's centroid.
// Otherwise, return a point between v and min_q.
return (min_q == 0) ? centroid(ear) : midpoint(v, *min_q);
}
template <typename Transformation>
Polygon_with_holes_2 transform(const Transformation& t,
const Polygon_with_holes_2& p) const
{
Polygon_with_holes_2 result(CGAL::transform(t, p.outer_boundary()));
typename Polygon_with_holes_2::Hole_const_iterator it = p.holes_begin();
while (it != p.holes_end())
{
Polygon_2 p2(it->vertices_begin(), it->vertices_end());
result.add_hole(CGAL::transform(t, p2));
++it;
}
return result;
}
};
} // namespace CGAL
#endif
| [
"huxingyi@msn.com"
] | huxingyi@msn.com |
95b16fcf7aba9cffc7917967cffa481ee9bb11be | ed2d635479472dd330176a8624b3189017b0cdc2 | /test/SplitterTest.cpp | 516353fd646aac96209addd888a923bcc9f3d7b5 | [] | no_license | Firobe/Packer | ac16da43e3e86f9401bdabb0f91f986a022d0220 | bd6b3cc824028c63e01c51831ebbb4b006a4b351 | refs/heads/master | 2021-01-21T16:27:47.159457 | 2017-04-13T22:20:44 | 2017-04-13T22:20:44 | 91,887,259 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 410 | cpp | #include <iostream>
#include "Outer.hpp"
#include "Splitter.hpp"
using namespace std;
int main() {
int width = 2000, height = 2000, nbSplit = 50;
Splitter splitter(width, height);
for (int i = 0 ; i < nbSplit ; i++)
splitter.split(Point(rand() % width, rand() % height), Point(rand() % width, rand() % height));
Layout l(splitter.getShapes());
cout << debugOutputSVG(l);
}
| [
"vrobles@enseirb-matmeca.fr"
] | vrobles@enseirb-matmeca.fr |
8234c7d2a02d2fc33ee3e2b4e27d27e82dff12f3 | dcea0b93c838a008367eee386b5eb10531e6d2a7 | /src/instructions/src/TESTinstruction.cpp | fb44d4f944cf2f951bdd20a2798dd94ba5a2865e | [] | no_license | lazav94/TwoPassAssembler | 084a7ccaa6a4f71737f1db9845e4456faf6d5763 | 7c3c1de07b8f42af56f1f2e7dc331960cbd7b4d6 | refs/heads/master | 2021-01-22T06:02:00.675495 | 2017-02-12T13:59:25 | 2017-02-12T13:59:25 | 81,729,123 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,912 | cpp | /*
* TESTinstruction.cpp
*
* Created on: Aug 11, 2016
* Author: laza
*/
#include "TESTinstruction.h"
TEST_instruction::TEST_instruction(int src, int dst) {
this->src = src;
this->dst = dst;
}
TEST_instruction::TEST_instruction(string instruction, string name,
string condition, bool psw_change, Elf32_Word op_code, int src, int dst) :
Instruction(instruction, name, condition, psw_change, op_code) {
this->src = src;
this->dst = dst;
}
TEST_instruction::~TEST_instruction() {
}
Elf32_Word TEST_instruction::read_instruction() {
vector<string> words;
Format::get_words(instuction, words);
int shift_reg_op_code = 19;
try {
int arg_pos = -1;
for (unsigned int i = 0; i < words.size(); i++)
if (Parser::is_memonic(words[i])) {
arg_pos = i;
break;
}
if (arg_pos == -1)
throw INSTRUCTION_NOT_FOUND;
unsigned int arg_dst = arg_pos + 1;
unsigned int arg_src = arg_pos + 2;
if (words.size() - arg_pos != arg_src + 1 - arg_pos)
throw EXCESS_ARGUMENTS;
if (Instruction_Parser::is_register(words[arg_dst])) {
this->dst = Instruction_Parser::read_register(words[arg_dst]);
if(this->dst == PC || this->dst == LR || this->dst == PSW)
throw NOT_ALLOWED_REG;
this->op_code |= this->dst << shift_reg_op_code;
} else
throw NOT_REG;
if (Instruction_Parser::is_register(words[arg_src])) {
this->src = Instruction_Parser::read_register(words[arg_src]);
if(this->src == PC || this->src == LR || this->src == PSW)
throw NOT_ALLOWED_REG;
shift_reg_op_code -= 5;
this->op_code |= this->src << shift_reg_op_code;
} else
throw NOT_REG;
return op_code;
} catch (int id) {
cout << "Exception TEST msg: " << exception_msgs[id] << endl;
} catch (...) {
cout << "TEST " << exception_msgs[UNKNOWN_EXCEPTION]
<< " Check TEST instruction!" << endl;
}
return 0;
}
| [
"lazav94@gmail.com"
] | lazav94@gmail.com |
c2fc37b3078a98d420aa542049f848fdacf5db5f | 040edc2bdbefe7c0d640a18d23f25a8761d62f40 | /25/src/densitysimcomp.cpp | aa9f386413385d855a37c4021098bf4a31155edc | [
"BSD-2-Clause"
] | permissive | johnrsibert/tagest | 9d3be8352f6bb5603fbd0eb6140589bb852ff40b | 0194b1fbafe062396cc32a0f5a4bbe824341e725 | refs/heads/master | 2021-01-24T00:18:24.231639 | 2018-01-16T19:10:25 | 2018-01-16T19:10:25 | 30,438,830 | 3 | 3 | null | 2016-12-08T17:59:28 | 2015-02-07T00:05:30 | C++ | UTF-8 | C++ | false | false | 5,052 | cpp | //$Id: halfcomp.cpp 2754 2011-01-02 20:57:07Z jsibert $
#include <fvar.hpp>
//#include "trace.h"
void assignSeapodym(dmatrix& density, const dmatrix& seapodym)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for(int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
density(i, j) = seapodym(i, j);
}
}
}
void startingDensity(dmatrix& density, const dmatrix& initial)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
density(i, j) = initial(i, j);
}
}
}
void initialDensity(const dmatrix& density, const imatrix map, dvector& initial_density)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
int k = map(i, j);
initial_density(k) += density(i, j);
}
}
}
void densitycomp(const dmatrix& density, const imatrix map,
const double curr_time, dmatrix& zone_density)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
int k = map(i, j);
zone_density(k, int(curr_time)) += density(i, j);
}
}
}
void remove_tags(dmatrix& density, const imatrix map, const int region_drop)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
int k = map(i, j);
if ( k == region_drop )
{
density(i, j) = 0.0;
}
}
}
}
void initial_prev_zs(const dvector& sum0, dvector& prev_sum)
{
int k1 = sum0.indexmin();
int k2 = sum0.indexmax();
for (int k = k1; k <= k2; k++)
{
prev_sum(k) = sum0(k);
}
}
void halfcomp(const dmatrix& density, dmatrix& prev_density,
const double curr_time, double& prev_time,
const double half, const dmatrix& ini_density,
dmatrix& half_life)
{
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
if ( (half_life(i,j) < 0.0) && ((density(i,j)/ini_density(i,j)) <= half) )
{
double b = (density(i,j)-prev_density(i,j))/(curr_time-prev_time);
half_life(i,j) = (b*prev_time + half - prev_density(i,j))/b;
}
}
}
prev_density = density;
//prev_time = curr_time;
}
void update_average(const dmatrix& current, dmatrix& average,
const double& curr_time, double& prev_time)
{
double w = curr_time;
double w1 = prev_time;
int i1 = average.rowmin();
int i2 = average.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = average(i).indexmin();
int j2 = average(i).indexmax();
for (int j = j1; j <= j2; j++)
{
average(i,j) = (w1*average(i,j)+current(i,j))/w;
}
}
//prev_time = curr_time;
}
void halfcomp(const dmatrix& density, const imatrix map, dvector& sum0,
dvector& prev_sum, dvector& cur_sum,
const double cur_time, double& prev_time,
const double half, dvector& half_life)
{
cur_sum.initialize();
int i1 = density.rowmin();
int i2 = density.rowmax();
for (int i = i1; i <= i2; i++)
{
int j1 = density(i).indexmin();
int j2 = density(i).indexmax();
for (int j = j1; j <= j2; j++)
{
int k = map(i,j);
cur_sum(k) += density(i,j);
}
}
if (sum(sum0) <= 0.0)
{
sum0 = cur_sum;
}
else
{
int k1 = sum0.indexmin();
int k2 = sum0.indexmax();
for (int k = k1; k <= k2; k++)
{
if ( (half_life(k) < 0.0) &&
(sum0(k) > 0.0) &&
(cur_sum(k)/sum0(k) <= half) )
{
double yp = prev_sum(k)/sum0(k);
double yc = cur_sum(k)/sum0(k);
double b = (yp-yc)/(prev_time-cur_time);
half_life(k) = (b*prev_time-yp+half)/b;
/*
if (k == 25)
{
TTRACE(yp,yc)
TTRACE(prev_time,cur_time)
TTRACE(b,half_life(k))
}
*/
}
}
}
//prev_time = cur_time;
prev_sum = cur_sum;
}
void update_average(const dvector& current, dvector& average,
const double& curr_time, double& prev_time)
{
double w = curr_time;
double w1 = prev_time;
int i1 = average.indexmin();
int i2 = average.indexmax();
for (int i = i1; i <= i2; i++)
{
//if (current(i) > 0.0)
average(i) = (w1*average(i)+current(i))/w;
}
//prev_time = curr_time;
}
| [
"thomasp@spc.int"
] | thomasp@spc.int |
cdc55fe3b78c98ec20d72136661b2c13507d970b | a46add2ebf128c4dbe8346a59a874f2ce6c833a6 | /source/Logger/Level.hpp | c207ff52090b2c040eeecf435ea128be02e27454 | [
"MIT"
] | permissive | kurocha/logger | 3a0f4c20d8de0e53f97f9d41b0c53c22184d54cd | e34a8b976fc983b904c091a7d75671192d015179 | refs/heads/master | 2020-12-03T07:52:44.159969 | 2019-09-14T05:18:57 | 2019-09-14T05:18:57 | 95,637,102 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,308 | hpp | //
// Level.hpp
// File file is part of the "Logger" project and released under the MIT License.
//
// Created by Samuel Williams on 28/6/2017.
// Copyright, 2017, by Samuel Williams. All rights reserved.
//
#pragma once
#include <type_traits>
namespace Logger
{
enum class Level : unsigned {
ERROR = 1,
WARN = 2,
INFO = 4,
DEBUG = 8,
ALL = (1 | 2 | 4 | 8)
};
inline Level operator|(Level lhs, Level rhs)
{
using T = std::underlying_type<Level>::type;
return static_cast<Level>(static_cast<T>(lhs) | static_cast<T>(rhs));
}
inline Level& operator|=(Level & lhs, Level rhs)
{
using T = std::underlying_type<Level>::type;
lhs = static_cast<Level>(static_cast<T>(lhs) | static_cast<T>(rhs));
return lhs;
}
inline bool operator&(Level lhs, Level rhs)
{
using T = std::underlying_type<Level>::type;
return (static_cast<T>(lhs) & static_cast<T>(rhs)) != 0;
}
inline Level& operator&=(Level & lhs, Level rhs)
{
using T = std::underlying_type<Level>::type;
lhs = static_cast<Level>(static_cast<T>(lhs) & static_cast<T>(rhs));
return lhs;
}
inline Level operator~(Level rhs)
{
using T = std::underlying_type<Level>::type;
return static_cast<Level>(~static_cast<T>(rhs));
}
const char * level_name(Level level) noexcept;
}
| [
"samuel.williams@oriontransfer.co.nz"
] | samuel.williams@oriontransfer.co.nz |
4c10f87ba91b73d99b5d4b1c23f14de89de77614 | 5a54b68c4936c8e2c4af67bb280517c1932e991e | /uavobjectwidget/uavobjectbrowserfactory.cpp | 572dc3abfa123cc2ee5c094715c48a0265cda6eb | [] | no_license | 519984307/testpilot | 538a0384dca703f7c5906f4525854c31a343eecc | 798d39afd9d39724049980d619aa4505fc67fab6 | refs/heads/master | 2023-03-17T03:29:35.283448 | 2018-02-10T12:30:48 | 2018-02-10T12:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,277 | cpp | /**
******************************************************************************
*
* @file uavobjectbrowserfactory.cpp
* @author The OpenPilot Team, http://www.openpilot.org Copyright (C) 2010.
* @addtogroup GCSPlugins GCS Plugins
* @{
* @addtogroup UAVObjectBrowserPlugin UAVObject Browser Plugin
* @{
* @brief The UAVObject Browser gadget plugin
*****************************************************************************/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "uavobjectbrowserfactory.h"
#include "uavobjectbrowserwidget.h"
#include "uavobjectbrowser.h"
#include "uavobjectbrowserconfiguration.h"
#include "uavobjectbrowseroptionspage.h"
#include <coreplugin/iuavgadget.h>
UAVObjectBrowserFactory::UAVObjectBrowserFactory(QObject *parent) :
IUAVGadgetFactory(QString("UAVObjectBrowser"), tr("UAVObject Browser"), parent)
{}
UAVObjectBrowserFactory::~UAVObjectBrowserFactory()
{}
Core::IUAVGadget *UAVObjectBrowserFactory::createGadget(QWidget *parent)
{
UAVObjectBrowserWidget *gadgetWidget = new UAVObjectBrowserWidget(parent);
return new UAVObjectBrowser(QString("UAVObjectBrowser"), gadgetWidget, parent);
}
IUAVGadgetConfiguration *UAVObjectBrowserFactory::createConfiguration(QSettings *qSettings)
{
return new UAVObjectBrowserConfiguration(QString("UAVObjectBrowser"), qSettings);
}
IOptionsPage *UAVObjectBrowserFactory::createOptionsPage(IUAVGadgetConfiguration *config)
{
return new UAVObjectBrowserOptionsPage(qobject_cast<UAVObjectBrowserConfiguration *>(config));
}
| [
"teching.ko@gmail.com"
] | teching.ko@gmail.com |
4090a260992308622b1cc2ff12cf77a61c46e34c | c7f14ba53098a55e94780678c0ba815cf7954930 | /Project 3 - boulder blast/BoulderBlast/BoulderBlast/backup during exit i really needa nap/Actor.cpp | 867f24aca5b30f5d316e9fd26c44c90215bd3081 | [] | no_license | TheodoreNguyen/CS32 | 0a07f29bba944a76e3f8b6b1e1d530ccdd900dc0 | 9b95a20f8572e439f8d4d97c1d06acdc1c8ffb63 | refs/heads/master | 2021-01-12T12:35:47.460095 | 2015-11-06T04:55:03 | 2015-11-06T04:55:03 | 31,944,858 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,527 | cpp | #include "Actor.h"
#include "StudentWorld.h"
// Students: Add code to this file (if you wish), Actor.h, StudentWorld.h, and StudentWorld.cpp
void Exit::leveldone()
{
}
void Exit::doSomething()
{
if (isVisible())
{
if (getWorld()->getPlayer->getX() == getX() && getWorld()->getPlayer()->getY() == getY())
{
getWorld()->GameWorld::playSound(SOUND_FINISHED_LEVEL);
getWorld()->increaseScore(2000);
getWorld()->increaseScore(getWorld()->getPlayer()->getBonus());
}
}
}
int Player::getHpPercentage()
{
double hp = m_hp / 20;
hp = 100 * hp;
int whole = hp;
return whole;
}
void Player::reduceBonus()
{
if (m_bonus > 0)
m_bonus--;
return;
}
void Player::doSomething()
{
if (m_hp <= 0)
{
setDead();
return;
}
int input = 0;
if (this->getWorld()->getKey(input))
{
switch (input)
{
case KEY_PRESS_DOWN:
if (getY() > 0 && getWorld()->canIMoveThere(getX(), getY() - 1))
moveTo(getX(), getY() - 1);
setDirection(down);
break;
case KEY_PRESS_UP:
if (getY() < VIEW_HEIGHT - 1 && getWorld()->canIMoveThere(getX(), getY() + 1))
moveTo(getX(), getY() + 1);
setDirection(up);
break;
case KEY_PRESS_LEFT:
if (getX() > 0 && getWorld()->canIMoveThere(getX() - 1, getY()))
moveTo(getX() - 1, getY());
setDirection(left);
break;
case KEY_PRESS_RIGHT:
if (getX() < VIEW_WIDTH - 1 && getWorld()->canIMoveThere(getX() + 1, getY()))
moveTo(getX() + 1, getY());
setDirection(right);
break;
case KEY_PRESS_ESCAPE:
setDead();
return;
}
}
} | [
"theodore.h.nguyen@outlook.com"
] | theodore.h.nguyen@outlook.com |
f7fd718ba06470dca586f0b4f654b2669774b6fd | 277a8953fb34dcade615d30b65d5571800fffbe8 | /src/main.cpp | 28b5915804f40e62f2f3f4107dbbcd66eec63c4b | [] | no_license | julien-besancon/OOP_arcade_2019 | 528ccd7bed416fdd8bfbab0903c3fca4ebe5e72b | ce2677eb68bf6c2199e0cad135f28df05e219f1d | refs/heads/master | 2021-05-26T11:11:26.235031 | 2020-04-08T18:30:11 | 2020-04-08T18:30:11 | 254,108,123 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 818 | cpp | /*
** EPITECH PROJECT, 2020
** OOP_arcade_2019 [WSL: Ubuntu]
** File description:
** main
*/
#include <dlfcn.h>
#include <stdio.h>
#include "Core.hpp"
void launch_game(Core &core)
{
input c = core.game->game_loop(core);
if (c == restart)
launch_game(core);
if (c == next_game) {
core.next_game();
launch_game(core);
}
if (c == prev_game) {
core.prev_game();
launch_game(core);
}
}
int main(int ac, char **av)
{
if (ac != 2) {
std::cerr << "Incorrect number of arguments !" << std::endl
<< "Usage : ./arcade [Path to Dynamic library]" << std::endl;
return (84);
}
Core core(av[1]);
launch_game(core);
delete core.graph;
delete core.game;
dlclose(core._game_handle);
dlclose(core._graph_handle);
} | [
"yanis.auer@epitech.eu"
] | yanis.auer@epitech.eu |
e6fa14bdac9bcd3c0d39dcdebd772db4032f7619 | ab86dd1ea843aaf24040bee448f28c9c3ecba33b | /src/dao.cpp | 518fc4bd5762cec2da190c437292489b20128929 | [
"Apache-2.0"
] | permissive | subbyte/sdld | 14253ea51f92c60a9047ed345c5a70f586b9ad4e | 39707290fc148ddc935979cbbfdd8639035b9f95 | refs/heads/master | 2021-01-22T09:54:12.053742 | 2013-11-07T17:48:35 | 2013-11-07T17:48:35 | 14,211,007 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,668 | cpp | #include <fstream>
#include <cstdio>
#include <dirent.h>
#include <unistd.h>
#include <sys/stat.h>
#include "dao.h"
using namespace std;
DAO::DAO(char *dirname, uint32_t shingle_len)
{
m_shingle_len = shingle_len;
m_dirname = dirname;
read_filelist();
}
DAO::~DAO()
{
for(auto it = m_filenames.begin(); it != m_filenames.end(); ++it)
{
delete[] *it;
}
m_filenames.clear();
}
void DAO::read_filelist()
{
char *basename;
DIR *dir = opendir(m_dirname);
if (dir == NULL)
{
throw "Directory Invalid.";
}
struct dirent *direntry;
while ((direntry = readdir(dir)))
{
if (direntry->d_name[0] != '.')
// avoid directories such as ".", "..", ".svn"
{
basename = new char[strlen(direntry->d_name) + 1];
strcpy(basename, direntry->d_name);
m_filenames.push_back(basename);
}
}
closedir(dir);
m_it = m_filenames.begin();
}
FPS * DAO::next()
{
char *filename;
char *basename;
ifstream file;
uint32_t file_size;
char *content;
if (m_it == m_filenames.end())
{
return NULL;
}
basename = *m_it;
++m_it;
filename = new char[strlen(m_dirname) + strlen("/") + strlen(basename) + 1];
strcpy(filename, m_dirname);
strcat(filename, "/");
strcat(filename, basename);
file.open(filename, ios::in|ios::binary);
file.seekg(0, ios::end);
file_size = file.tellg();
file.seekg(0, ios::beg);
content = new char[file_size];
file.read(content, file_size);
file.close();
return new FPS(basename, content, file_size, m_shingle_len);
}
| [
"subbyte@gmail.com"
] | subbyte@gmail.com |
c26c0d55e48228f06762b775339116ab9416d3d1 | 44f04b8f2b6c0dba51f1e998985d3a9e8540715f | /UESTC/1058/E.cpp | 7c2f5cc3302aa1bf9f2566cb30256e55bc3ad123 | [] | no_license | GuessEver/ACMICPCSolutions | 2dd318a45939711eff1dd208cffc05a029b38130 | 909927778efd77ca9ec8e18aed3ff22c167d2a33 | refs/heads/master | 2020-05-18T20:52:32.359955 | 2015-11-04T08:31:43 | 2015-11-04T08:31:43 | 29,720,059 | 5 | 3 | null | null | null | null | UTF-8 | C++ | false | false | 2,249 | cpp | #include <cstdio>
#include <cctype>
#include <cstring>
#include <algorithm>
const int N = 100000 + 10;
int W, H, n, m;
struct Edge{
int x1, x2, y, sign;
bool operator < (const Edge &b) const
{
if(y == b.y) return x1 < b.x1;
return y < b.y;
}
}edge1[N * 2], edge2[N * 2];
int num1, num2;
int val[N * 4], len[N * 4];
int nextInt()
{
char ch; int res = 0;
while(!isdigit((ch = getchar()))) ;
do res = (res << 3) + (res << 1) + ch - '0';
while(isdigit((ch = getchar())));
return res;
}
void update(int p, int l, int r)
{
if(val[p] == 0)
{
if(l + 1 == r) len[p] = 0;
else len[p] = len[p*2] + len[p*2+1];
}
else len[p] = r - l;
}
void insert(int p, int l, int r, int a, int b, int c)
{
if(a <= l && b >= r)
{
val[p] += c;
update(p, l, r);
return;
}
int mid = (l + r) / 2;
if(a < mid) insert(p*2, l, mid, a, b, c);
if(b > mid) insert(p*2+1, mid, r, a, b, c);
update(p, l, r);
}
long long solve(Edge *edge, int num, int MAX, int MAXY)
{
MAX = std::max(1, MAX - (m - 1));
//val[1] = 0; len[1] = 0;
memset(val, 0, sizeof(val));
memset(len, 0, sizeof(len));
std::sort(edge+1, edge+num+1);
long long res = 0;
for(int i = 1; i <= num; i++)
{
int leny = edge[i+1].y - edge[i].y;
//printf("[nowx: %d ~ %d] [nowy = %d] [%s] : leny = %d\n", edge[i].x1, edge[i].x2, edge[i].y, edge[i].sign == 1 ? "+" : "-", leny);
insert(1, 1, MAX, edge[i].x1, std::min(MAX, edge[i].x2), edge[i].sign);
if(i < num) res += 1ll * leny * len[1];
//printf("covered = %d, sol-res = %lld\n", len[1], res);
}
return 1ll * (MAX - 1) * (MAXY - 1) - res;
}
int main()
{
//scanf("%d%d%d%d", &W, &H, &n, &m);
W = nextInt(); H = nextInt();
n = nextInt(); m = nextInt();
W++; H++;
for(int i = 1; i <= n; i++)
{
int x1, x2, y1, y2;
//scanf("%d%d%d%d", &x1, &y1, &x2, &y2);
x1 = nextInt(); y1 = nextInt();
x2 = nextInt(); y2 = nextInt();
x2++; y2++;
edge1[++num1] = (Edge){std::max(1, x1-m+1), x2, y1, 1};
edge1[++num1] = (Edge){std::max(1, x1-m+1), x2, y2, -1};
edge2[++num2] = (Edge){std::max(1, y1-m+1), y2, x1, 1};
edge2[++num2] = (Edge){std::max(1, y1-m+1), y2, x2, -1};
}
long long res = 0;
res += solve(edge1, num1, W, H);
if(m > 1) res += solve(edge2, num2, H, W);
printf("%lld\n", res);
return 0;
}
| [
"jiangzh777@163.com"
] | jiangzh777@163.com |
12d4cdef2347b1c4ff86778d9047352e24115f01 | 510da926846b27824b55cb1bcca05907f247eb5f | /ListNodeSerializationCpp/main.cpp | 361d66f11b8920bf8cd65f023b31ffe813897919 | [] | no_license | VasilchukVV/ListNodeCpp | 207c25652c5289cbca0dafa097e485b8b3618a50 | 3e417231fd58e0c98171b6d66dfeee0452d22ca7 | refs/heads/master | 2022-11-18T18:19:51.987109 | 2020-07-20T03:44:12 | 2020-07-20T03:44:12 | 281,005,484 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,540 | cpp | #include "stdafx.h"
#include <sstream>
using namespace list_node_sample;
ostream& serialize(ostream& stream, const node_info& node_info);
istream& deserialize(istream& stream, node_info& node_info);
//-------------------------------------------------------------------------------------------------------------
int main()
{
const auto serializer = create_list_node_serializer(serialize, deserialize);
list_node_ptr source;
const auto mode = data_generation_mode::all;
auto result = generate_list_node(3, mode, source);
if (FAILED(result))
return result;
list_node_ptr clone;
result = serializer->deep_copy(source, clone);
if (FAILED(result))
return result;
ostringstream oss;
result = serializer->serialize(oss, clone);
if (FAILED(result))
return result;
oss.flush();
const string data = oss.str();
cout << data;
std::istringstream iss(data);
list_node_ptr copy;
result = serializer->deserialize(iss, copy);
if (FAILED(result))
return result;
cout << source;
cout << clone;
return 0;
}
//-------------------------------------------------------------------------------------------------------------
ostream& serialize(ostream& stream, const node_info& node_info)
{
stream << node_info.id << " ";
stream << node_info.data << " ";
return stream;
}
//-------------------------------------------------------------------------------------------------------------
istream& deserialize(istream& stream, node_info& node_info)
{
stream >> node_info.id;
stream >> node_info.data;
return stream;
}
| [
"VasilchukVV@gmail.com"
] | VasilchukVV@gmail.com |
bb7eb530f62f5575622455f40d9cdb85a8b46607 | b549c903ec613321b16f01818a4a861f22afc41c | /src/QDownloaderPrivate.h | 28e4b168dec61a27d19f2b0b504ed666cfa8d6f1 | [] | no_license | jeandet/QDownloader | 206ba381ee584f6c5910923fe1a0ee71b9da5deb | 3725b25223247a60d61e56e572a2cabe6437f533 | refs/heads/master | 2021-01-23T23:02:37.441686 | 2017-09-09T13:00:58 | 2017-09-09T13:00:58 | 102,950,821 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,613 | h | /*------------------------------------------------------------------------------
-- This file is a part of the QDownloader library
-- Copyright (C) 2017, Plasma Physics Laboratory - CNRS
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 2 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program; if not, write to the Free Software
-- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-------------------------------------------------------------------------------*/
/*-- Author : Alexis Jeandet
-- Mail : alexis.jeandet@member.fsf.org
----------------------------------------------------------------------------*/
#ifndef QDOWNLOADERPRIVATE_H
#define QDOWNLOADERPRIVATE_H
#include <QNetworkAccessManager>
#include <QByteArray>
#include <QUrl>
class QDownloaderPrivate
{
public:
QDownloaderPrivate(){}
QDownloaderPrivate ( const QDownloaderPrivate & ){}
QByteArray get(const QUrl& url);
void get_async(const QUrl& url, std::function<void(QByteArray data)> callback);
private:
QNetworkAccessManager p_access_mngr;
};
#endif
| [
"alexis.jeandet@member.fsf.org"
] | alexis.jeandet@member.fsf.org |
541abdb22dfaabbe5de0031df96be623728d4750 | a550ff2a385e7a7498c02ac831116e4ff38f2a1c | /c++ sol/USACO/2018-2019/January/Platinum/redistricting.cpp | e021f2f2c7e70bc46358131ab95a41203b47527f | [] | no_license | TausifIqbal/CPcode | 28854eca64813bf01369ec553558a8cf87111537 | 23292111132752f4639037ebada288f338101e32 | refs/heads/master | 2023-04-07T19:38:52.373971 | 2021-04-13T23:11:36 | 2021-04-13T23:11:36 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 902 | cpp | #include <iostream>
#include <cstdio>
#include <algorithm>
#include <set>
using namespace std;
#define endl '\n'
int main(){
freopen("redistricting.in", "r", stdin);
freopen("redistricting.out", "w", stdout);
ios::sync_with_stdio(false);
cin.tie(NULL);
int n, k;
cin >> n >> k;
int dif[n + 1], dp[n + 1];
dif[0] = 0;
dp[0] = 0;
for(int i = 1; i <= n; i++){
char c;
cin >> c;
dif[i] = dif[i - 1] + (c == 'H' ? 1 : -1);
}
multiset<int> bestdp;
multiset<int> bestdif[n + 1];
bestdp.insert(0);
bestdif[0].insert(0);
for(int i = 1; i <= n; i++){
int minv = *bestdp.begin();
dp[i] = minv + (*bestdif[minv].begin() < dif[i] ? 0 : 1);
bestdp.insert(dp[i]);
bestdif[dp[i]].insert(dif[i]);
if(i >= k){
bestdp.erase(bestdp.find(dp[i - k]));
bestdif[dp[i - k]].erase(bestdif[dp[i - k]].find(dif[i - k]));
}
}
cout << dp[n] << endl;
return 0;
} | [
"super_j@att.net"
] | super_j@att.net |
aa600d0ac987f72998e58ebf2aec43749946f30f | 5330918e825f8d373d3907962ba28215182389c3 | /CMGTools/H2TauTau/interface/DiObjectUpdateFactory.h | 86fe248404e18b758d7592c671ceaf7c6fca93de | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | C++ | false | false | 7,190 | h | #ifndef DIOBJECTUPDATEFACTORY_H_
#define DIOBJECTUPDATEFACTORY_H_
#include "CMGTools/H2TauTau/interface/DiTauObjectFactory.h"
#include "TLorentzVector.h"
#include "DataFormats/Math/interface/deltaR.h"
namespace cmg{
// T is for example a di-object<U, S>
template< typename T>
class DiObjectUpdateFactory : public cmg::Factory< T > {
public:
DiObjectUpdateFactory(const edm::ParameterSet& ps):
// diObjectFactory_( ps ),
diObjectLabel_ (ps.getParameter<edm::InputTag>("diObjectCollection")),
//metLabel_ (ps.getParameter<edm::InputTag>("metCollection")),
nSigma_ (ps.getParameter<double>("nSigma")),
uncertainty_ (ps.getParameter<double>("uncertainty")),
shift1ProngNoPi0_ (ps.getParameter<double>("shift1ProngNoPi0")),
shift1Prong1Pi0_ (ps.getParameter<double>("shift1Prong1Pi0")),
ptDependence1Pi0_ (ps.getParameter<double>("ptDependence1Pi0")),
shift3Prong_ (ps.getParameter<double>("shift3Prong")),
ptDependence3Prong_(ps.getParameter<double>("ptDependence3Prong")),
shiftMet_ (ps.getParameter<bool>("shiftMet")),
shiftTaus_ (ps.getParameter<bool>("shiftTaus"))
{}
//need to override from Factory to insert "typename"
typedef typename cmg::Factory< T >::event_ptr event_ptr;
virtual event_ptr create(const edm::Event&, const edm::EventSetup&);
private:
// const DiObjectFactory< typename T::type1, typename T::type2 > diObjectFactory_;
const edm::InputTag diObjectLabel_;
// const edm::InputTag metLabel_;
double nSigma_;
double uncertainty_;
double shift1ProngNoPi0_;
double shift1Prong1Pi0_;
double ptDependence1Pi0_;
double shift3Prong_;
double ptDependence3Prong_;
bool shiftMet_ ;
bool shiftTaus_ ;
};
} // namespace cmg
template< typename T >
typename cmg::DiObjectUpdateFactory<T>::event_ptr cmg::DiObjectUpdateFactory<T>::create(const edm::Event& iEvent, const edm::EventSetup&){
typedef std::vector< T > collection;
edm::Handle<collection> diObjects;
iEvent.getByLabel(diObjectLabel_,diObjects);
edm::Handle< std::vector<reco::GenParticle> > genparticles;
iEvent.getByLabel("genParticlesPruned",genparticles);
typename cmg::DiObjectUpdateFactory<T>::event_ptr result(new collection);
unsigned index = 0;
for(typename collection::const_iterator it = diObjects->begin(); it != diObjects->end(); ++it, ++index ){
const T& diObject = *it;
// assert( index < metCands->size() );
typename T::type1 leg1(diObject.leg1());
typename T::type2 leg2(diObject.leg2());
// reco::LeafCandidate met = reco::LeafCandidate( metCands->at(index) );
reco::LeafCandidate met = diObject.met();
float shift1 = 0.;
float shift2 = 0.;
if(typeid(typename T::type1)==typeid(cmg::Tau))
{
shift1 = (nSigma_ * uncertainty_);
const cmg::Tau& tau1 = dynamic_cast<const cmg::Tau&>(diObject.leg1());
if((tau1.decayMode()==0)&&(shift1ProngNoPi0_!=0))
shift1+=shift1ProngNoPi0_;
//Also allow decay mode 2 according to synchronisation twiki
if((tau1.decayMode()==1 || tau1.decayMode()==2)&&(shift1Prong1Pi0_!=0))
shift1+=shift1Prong1Pi0_+ptDependence1Pi0_*TMath::Min(TMath::Max(diObject.leg1().pt()-45.,0.),10.);
if((tau1.decayMode()==10)&&(shift3Prong_!=0))
shift1+=shift3Prong_+ptDependence3Prong_*TMath::Min(TMath::Max(diObject.leg1().pt()-32.,0.),18.);
}
if(typeid(typename T::type2)==typeid(cmg::Tau))
{
shift2 = (nSigma_ * uncertainty_);
const cmg::Tau& tau2 = dynamic_cast<const cmg::Tau&>(diObject.leg2());
if((tau2.decayMode()==0)&&(shift1ProngNoPi0_!=0))
shift2+=shift1ProngNoPi0_;
//Also allow decay mode 2 according to synchronisation twiki
if((tau2.decayMode()==1 || tau2.decayMode()==2)&&(shift1Prong1Pi0_!=0))
shift2+=shift1Prong1Pi0_+ptDependence1Pi0_*TMath::Min(TMath::Max(diObject.leg2().pt()-45.,0.),10.);
if((tau2.decayMode()==10)&&(shift3Prong_!=0))
shift2+=shift3Prong_+ptDependence3Prong_*TMath::Min(TMath::Max(diObject.leg2().pt()-32.,0.),18.);
}
// the tauES shift must be applied to *real* taus only
bool l1genMatched = false ;
bool l2genMatched = false ;
for ( size_t i=0; i< genparticles->size(); ++i)
{
const reco::GenParticle &p = (*genparticles)[i];
int id = p.pdgId() ;
int status = p.status() ;
int motherId = 0 ;
if ( p.numberOfMothers()>0 ) {
//std::cout << __LINE__ << "]\tnum of mothers " << p.numberOfMothers() << "\tmy mom " << p.mother()->pdgId() << std::endl ;
motherId = p.mother()->pdgId() ;
}
// PDG Id: e 11, mu 13, tau 15, Z 23, h 25, H 35, A 35
if ( status == 3 && abs(id) == 15 && (motherId == 23 || motherId == 25 || motherId == 35 || motherId == 36 )){
// match leg 1
if(typeid(typename T::type1)==typeid(cmg::Tau)){
const cmg::Tau& tau1 = dynamic_cast<const cmg::Tau&>(diObject.leg1());
if (deltaR(tau1.eta(),tau1.phi(),p.eta(),p.phi())<0.3) {
l1genMatched = true ;
//std::cout << __LINE__ << "]\tleg1 matched to a tau" << std::endl ;
}
}
// match leg 2
if(typeid(typename T::type2)==typeid(cmg::Tau)){
const cmg::Tau& tau2 = dynamic_cast<const cmg::Tau&>(diObject.leg2());
if (deltaR(tau2.eta(),tau2.phi(),p.eta(),p.phi())<0.3) {
l2genMatched = true ;
//std::cout << __LINE__ << "]\tleg2 matched to a tau" << std::endl ;
}
}
}
}
reco::Candidate::LorentzVector leg1Vec = diObject.leg1().p4();
reco::Candidate::LorentzVector leg2Vec = diObject.leg2().p4();
reco::Candidate::LorentzVector metVec = met.p4();
float dpx = 0.;
float dpy = 0.;
// if genMatched compute the transverse momentum variation
dpx = l1genMatched * leg1Vec.px() * shift1 + l2genMatched * leg2Vec.px() * shift2;
dpy = l1genMatched * leg1Vec.py() * shift1 + l2genMatched * leg2Vec.py() * shift2;
// if genMatched apply the shift
if (l1genMatched) leg1Vec *= (1. + shift1);
if (l2genMatched) leg2Vec *= (1. + shift2);
// apply the tranverse momentum correction to the MET
math::XYZTLorentzVector deltaTauP4(dpx,dpy,0,0);
math::XYZTLorentzVector scaledmetP4 = metVec - deltaTauP4;
TLorentzVector metVecNew;
metVecNew.SetPtEtaPhiM(scaledmetP4.Pt(),scaledmetP4.Eta(),scaledmetP4.Phi(),0.);
if (shiftTaus_ ){ leg1.setP4(leg1Vec); }
if (shiftTaus_ ){ leg2.setP4(leg2Vec); }
if (shiftMet_ ){ met.setP4(reco::Candidate::LorentzVector(metVecNew.Px(),metVecNew.Py(),metVecNew.Pz(),metVecNew.E())); }
// T diObjectNew = T(leg1,leg2);
result->push_back(T(diObject));
// diObjectFactory_.set( std::make_pair(leg1, leg2), met, & result->back() );
DiTauObjectFactory< typename T::type1, typename T::type2 >::set( std::make_pair(leg1, leg2), met, &result->back() );
}
return result;
}
#endif /*DIOBJECTUPDATEFACTORY_H_*/
| [
"colin.bernet@cern.ch"
] | colin.bernet@cern.ch |
20c06a975fad8ae967c7a546d2cf25409fb0666d | f81664ad23806f837b154cd9c193b4b0a4cbecb9 | /vs2003_cd01/Program Files/Microsoft Visual Studio .NET 2003/Vc7/VCWizards/ClassWiz/MFC/Simple/Templates/1033/oproppg.cpp | 8b77dde9b7884f85167bcf5583be1c89fdbbd182 | [] | no_license | HowlTheHusky/vs2003 | 7b3c5a412e76025f203b7a2bf93daed546834e68 | 2f9e0d77ddb69453626459221128d941c31a2330 | refs/heads/master | 2021-06-28T13:57:57.230418 | 2017-09-18T13:39:52 | 2017-09-18T13:39:52 | 103,944,102 | 0 | 4 | null | null | null | null | UTF-8 | C++ | false | false | 1,545 | cpp | // [!output IMPL_FILE] : implementation file
//
#include "stdafx.h"
#include "[!output PROJECT_NAME].h"
#include "[!output HEADER_FILE]"
[!if !MERGE_FILE]
#ifdef _DEBUG
#define new DEBUG_NEW
#endif
[!endif]
// [!output CLASS_NAME] dialog
IMPLEMENT_DYNCREATE([!output CLASS_NAME], COlePropertyPage)
// Message map
BEGIN_MESSAGE_MAP([!output CLASS_NAME], COlePropertyPage)
END_MESSAGE_MAP()
// Initialize class factory and guid
// {[!output CLSID_REGISTRY_FORMAT]}
IMPLEMENT_OLECREATE_EX([!output CLASS_NAME], "[!output TYPEID]",
[!output CLSID_IMPLEMENT_OLECREATE_FORMAT])
// [!output CLASS_NAME]::[!output CLASS_NAME]Factory::UpdateRegistry -
// Adds or removes system registry entries for [!output CLASS_NAME]
BOOL [!output CLASS_NAME]::[!output CLASS_NAME]Factory::UpdateRegistry(BOOL bRegister)
{
// TODO: Define string resource for page type; replace '0' below with ID.
if (bRegister)
return AfxOleRegisterPropertyPageClass(AfxGetInstanceHandle(),
m_clsid, 0);
else
return AfxOleUnregisterClass(m_clsid, NULL);
}
// [!output CLASS_NAME]::[!output CLASS_NAME] - Constructor
// TODO: Define string resource for page caption; replace '0' below with ID.
[!output CLASS_NAME]::[!output CLASS_NAME]() :
COlePropertyPage(IDD, 0)
{
[!if ACCESSIBILITY]
EnableActiveAccessibility();
[!endif]
}
// [!output CLASS_NAME]::DoDataExchange - Moves data between page and properties
void [!output CLASS_NAME]::DoDataExchange(CDataExchange* pDX)
{
DDP_PostProcessing(pDX);
}
// [!output CLASS_NAME] message handlers
| [
"32062494+HowlTheHusky@users.noreply.github.com"
] | 32062494+HowlTheHusky@users.noreply.github.com |
e2c1aab4b03299431f4a066d5b19bfc8f0f52178 | 78a5c4c4a6881c76816769e3bae4f971c27bed82 | /Room.h | c0fe805b4a81edb6cd32feaf6d607dc3fddab919 | [] | no_license | StiveMan1/PSS_University_Access_System | 56ce0ec6076499aaffce538b034a438c95db19dd | d4dd509e0f198b813b8e55034cc6d26d4dc7b9e4 | refs/heads/main | 2023-03-19T23:18:33.076015 | 2021-03-17T13:04:07 | 2021-03-17T13:04:07 | 348,708,901 | 2 | 0 | null | 2021-03-17T13:02:47 | 2021-03-17T12:53:38 | C++ | UTF-8 | C++ | false | false | 1,981 | h | //
// Created by 04024 on 05.03.2021.
//
#ifndef PSS_AS_ROOM_H
#define PSS_AS_ROOM_H
#include "Users/Person.h"
class Room {
private:
char* name;
unsigned int standardAccess = 1;
int specialAccessedUsers_len = 0,specialAccessedUsers_count=1;
unsigned int* specialAccessedUsers = new unsigned int[1];
void resizeAccess(){
unsigned int* newSpecialAccessedUsers = new unsigned int[specialAccessedUsers_count*2];
for(int i=0;i<specialAccessedUsers_count;i++){
newSpecialAccessedUsers[i] = specialAccessedUsers[i];
}
specialAccessedUsers_count<<=1;
specialAccessedUsers = newSpecialAccessedUsers;
}
public:
Room(){};
Room(char* Name,int Room_id,unsigned int StandardAccess) {
this->name = Name;
this->standardAccess = StandardAccess;
this->room_id = Room_id;
}
void addInAccessed(unsigned int id){
for(int i=0;i<specialAccessedUsers_len;i++){
if(specialAccessedUsers[i] == id){
return;
}
}
if(specialAccessedUsers_len + 1 == specialAccessedUsers_count){
resizeAccess();
}
specialAccessedUsers[specialAccessedUsers_len] = id;
specialAccessedUsers_len++;
}
void removeAccess(unsigned int id){
unsigned int* newSpecialAccessedUsers = new unsigned int[specialAccessedUsers_count];
for(int i=0,j = 0;i<specialAccessedUsers_len;i++){
if(specialAccessedUsers[i] != id){
newSpecialAccessedUsers[i-j] = specialAccessedUsers[i];
}else{
j++;
}
}
specialAccessedUsers = newSpecialAccessedUsers;
specialAccessedUsers_len --;
}
int room_id;
bool canEnter(const Person person);
char* getAccessLevel(){
return Person::AccessLevel(this->standardAccess);
}
char* getName(){
return this->name;
}
};
#endif //PSS_AS_ROOM_H
| [
"39464416+StiveMan1@users.noreply.github.com"
] | 39464416+StiveMan1@users.noreply.github.com |
c847a4374dec5232ecf6e422f6a3fa374d3fcdcd | 0fceffd86e5eebcca6c9de8c6e15ffa355b34cc2 | /HavokOpenGL/HavokOpenGL/HvkOGLObj.cpp | 2ee09c3c098248f989b645cea90bc7cacbfbbfdb | [] | no_license | lxq2537664558/BallDropper | b40d9a3f731d7b0ddc18ebd6fd016f365072c738 | 424f871540b2282c62b4f00ad2506a8fd40cf9cc | refs/heads/master | 2021-01-22T20:00:20.860109 | 2014-05-02T17:22:04 | 2014-05-02T17:22:04 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,083 | cpp | #include "HvkOGLObj.h"
HvkOGLObj::HvkOGLObj(HavokObj* havokObj){
hObj = havokObj;
angle = 0;
r = g = b = 0.5f;
}
HvkOGLObj::HvkOGLObj(HavokObj* havokObj, char* img){
hObj = havokObj;
angle = 0;
r = g = b = 0.5f;
LoadTextures *lt = new LoadTextures(img); //for texture
texture = lt->getTexture();
delete lt;
}
void HvkOGLObj::setRGB(float r, float g, float b){
this->r = r;
this->g = g;
this->b = b;
}
void HvkOGLObj::update(){
if(hObj->getRigidBody()){
hObj->setPos(Vector(hObj->getRigidBody()->getPosition().getComponent(0), //get physics obj pos
hObj->getRigidBody()->getPosition().getComponent(1),
hObj->getRigidBody()->getPosition().getComponent(2)));
hkQuaternion quaternion = hObj->getRigidBody()->getRotation(); //get orientation as a quaternion
if(quaternion.hasValidAxis()){
angle = quaternion.getAngle() * 180.0f / HK_REAL_PI; //convert to degrees for OpenGL rotation
hkVector4 axis;
quaternion.getAxis(axis);
hObj->setDir(Vector(axis.getSimdAt(0), axis.getSimdAt(1), axis.getSimdAt(2)));
}
}
}
HvkOGLObj::~HvkOGLObj(void){
}
| [
"rseymour8@gmail.com"
] | rseymour8@gmail.com |
af2945c2abba4ecc1c062fffaf2a65e8f64b94d7 | 85b9ce4fb88972d9b86dce594ae4fb3acfcd0a4b | /build/Android/Release/Global Pot/app/src/main/jni/_root.RecipePage.Template3.Template4.cpp | 0060301041397bfad94b84abd4a2d533efce5ba3 | [] | no_license | bgirr/Global-Pot_App | 16431a99e26f1c60dc16223fb388d9fd525cb5fa | c96c5a8fb95acde66fc286bcd9a5cdf160ba8b1b | refs/heads/master | 2021-01-09T06:29:18.255583 | 2017-02-21T23:27:47 | 2017-02-21T23:27:47 | 80,985,681 | 0 | 0 | null | 2017-02-21T23:27:48 | 2017-02-05T10:29:14 | C++ | UTF-8 | C++ | false | false | 10,435 | cpp | // This file was generated based on 'F:\Global Pot_App\.uno\ux11\RecipePage.g.uno'.
// WARNING: Changes might be lost if you edit this file directly.
#include <_root.GlobalPot_FuseControlsImage_Url_Property.h>
#include <_root.GlobalPot_FuseControlsTextControl_Value_Property.h>
#include <_root.GlobalPot_UnoUXStringConcatOperator_Right_Property.h>
#include <_root.MainView.h>
#include <_root.RecipePage.Template3.h>
#include <_root.RecipePage.Template3.Template4.h>
#include <Fuse.Binding.h>
#include <Fuse.Controls.Control.h>
#include <Fuse.Controls.DockPanel.h>
#include <Fuse.Controls.Grid.h>
#include <Fuse.Controls.Image.h>
#include <Fuse.Controls.Panel.h>
#include <Fuse.Controls.Rectangle.h>
#include <Fuse.Controls.Text.h>
#include <Fuse.Controls.TextControl.h>
#include <Fuse.Drawing.Brush.h>
#include <Fuse.Drawing.StaticSolidColor.h>
#include <Fuse.Elements.Element.h>
#include <Fuse.Font.h>
#include <Fuse.Layouts.Dock.h>
#include <Fuse.Node.h>
#include <Fuse.Reactive.DataBinding-1.h>
#include <Fuse.Visual.h>
#include <Uno.Bool.h>
#include <Uno.Collections.ICollection-1.h>
#include <Uno.Collections.IList-1.h>
#include <Uno.Float.h>
#include <Uno.Float4.h>
#include <Uno.Int.h>
#include <Uno.Object.h>
#include <Uno.String.h>
#include <Uno.UX.Property.h>
#include <Uno.UX.Property-1.h>
#include <Uno.UX.Selector.h>
#include <Uno.UX.Size.h>
#include <Uno.UX.StringConcatOperator.h>
#include <Uno.UX.Unit.h>
static uString* STRINGS[7];
static uType* TYPES[3];
namespace g{
// public partial sealed class RecipePage.Template3.Template4 :236
// {
// static Template4() :250
static void RecipePage__Template3__Template4__cctor__fn(uType* __type)
{
RecipePage__Template3__Template4::__selector0_ = ::g::Uno::UX::Selector__op_Implicit(::STRINGS[0/*"Url"*/]);
RecipePage__Template3__Template4::__selector1_ = ::g::Uno::UX::Selector__op_Implicit(::STRINGS[1/*"Right"*/]);
RecipePage__Template3__Template4::__selector2_ = ::g::Uno::UX::Selector__op_Implicit(::STRINGS[2/*"Value"*/]);
RecipePage__Template3__Template4::__selector3_ = ::g::Uno::UX::Selector__op_Implicit(::STRINGS[3/*"__gen9"*/]);
}
static void RecipePage__Template3__Template4_build(uType* type)
{
::STRINGS[0] = uString::Const("Url");
::STRINGS[1] = uString::Const("Right");
::STRINGS[2] = uString::Const("Value");
::STRINGS[3] = uString::Const("__gen9");
::STRINGS[4] = uString::Const("ingredient.iconUrl");
::STRINGS[5] = uString::Const("ingredient.nameDe");
::STRINGS[6] = uString::Const("https://cookingtest-cookingtest.rhcloud.com/static/resource/img/icon/");
::TYPES[0] = ::g::Fuse::Reactive::DataBinding_typeof()->MakeType(::g::Uno::String_typeof(), NULL);
::TYPES[1] = ::g::Uno::Collections::ICollection_typeof()->MakeType(::g::Fuse::Node_typeof(), NULL);
::TYPES[2] = ::g::Uno::Collections::ICollection_typeof()->MakeType(::g::Fuse::Binding_typeof(), NULL);
type->SetFields(2,
::g::Uno::UX::Property1_typeof()->MakeType(::g::Uno::String_typeof(), NULL), offsetof(::g::RecipePage__Template3__Template4, __gen10_Right_inst1), 0,
::g::Uno::UX::Property1_typeof()->MakeType(::g::Uno::String_typeof(), NULL), offsetof(::g::RecipePage__Template3__Template4, __gen9_Url_inst1), 0,
::g::RecipePage__Template3_typeof(), offsetof(::g::RecipePage__Template3__Template4, __parent1), uFieldFlagsWeak,
::g::Fuse::Controls::Rectangle_typeof(), offsetof(::g::RecipePage__Template3__Template4, __parentInstance1), uFieldFlagsWeak,
::g::Uno::UX::Property1_typeof()->MakeType(::g::Uno::String_typeof(), NULL), offsetof(::g::RecipePage__Template3__Template4, temp_Value_inst), 0,
::g::Uno::UX::Selector_typeof(), (uintptr_t)&::g::RecipePage__Template3__Template4::__selector0_, uFieldFlagsStatic,
::g::Uno::UX::Selector_typeof(), (uintptr_t)&::g::RecipePage__Template3__Template4::__selector1_, uFieldFlagsStatic,
::g::Uno::UX::Selector_typeof(), (uintptr_t)&::g::RecipePage__Template3__Template4::__selector2_, uFieldFlagsStatic,
::g::Uno::UX::Selector_typeof(), (uintptr_t)&::g::RecipePage__Template3__Template4::__selector3_, uFieldFlagsStatic);
}
::g::Uno::UX::Template_type* RecipePage__Template3__Template4_typeof()
{
static uSStrong< ::g::Uno::UX::Template_type*> type;
if (type != NULL) return type;
uTypeOptions options;
options.BaseDefinition = ::g::Uno::UX::Template_typeof();
options.FieldCount = 11;
options.ObjectSize = sizeof(RecipePage__Template3__Template4);
options.TypeSize = sizeof(::g::Uno::UX::Template_type);
type = (::g::Uno::UX::Template_type*)uClassType::New("RecipePage.Template3.Template4", options);
type->fp_build_ = RecipePage__Template3__Template4_build;
type->fp_cctor_ = RecipePage__Template3__Template4__cctor__fn;
type->fp_New1 = (void(*)(::g::Uno::UX::Template*, uObject**))RecipePage__Template3__Template4__New1_fn;
return type;
}
// public Template4(RecipePage.Template3 parent, Fuse.Controls.Rectangle parentInstance) :240
void RecipePage__Template3__Template4__ctor_1_fn(RecipePage__Template3__Template4* __this, ::g::RecipePage__Template3* parent, ::g::Fuse::Controls::Rectangle* parentInstance)
{
__this->ctor_1(parent, parentInstance);
}
// public override sealed object New() :253
void RecipePage__Template3__Template4__New1_fn(RecipePage__Template3__Template4* __this, uObject** __retval)
{
::g::Fuse::Controls::DockPanel* self = ::g::Fuse::Controls::DockPanel::New4();
::g::Fuse::Controls::Image* __gen91 = ::g::Fuse::Controls::Image::New3();
__this->__gen9_Url_inst1 = ::g::GlobalPot_FuseControlsImage_Url_Property::New1(__gen91, RecipePage__Template3__Template4::__selector0());
::g::Uno::UX::StringConcatOperator* __gen101 = ::g::Uno::UX::StringConcatOperator::New2();
__this->__gen10_Right_inst1 = ::g::GlobalPot_UnoUXStringConcatOperator_Right_Property::New1(__gen101, RecipePage__Template3__Template4::__selector1());
::g::Fuse::Controls::Text* temp = ::g::Fuse::Controls::Text::New3();
__this->temp_Value_inst = ::g::GlobalPot_FuseControlsTextControl_Value_Property::New1(temp, RecipePage__Template3__Template4::__selector2());
::g::Fuse::Controls::Panel* temp1 = ::g::Fuse::Controls::Panel::New3();
::g::Fuse::Reactive::DataBinding* temp2 = (::g::Fuse::Reactive::DataBinding*)::g::Fuse::Reactive::DataBinding::New1(::TYPES[0/*Fuse.Reactive.DataBinding<string>*/], __this->__gen10_Right_inst1, ::STRINGS[4/*"ingredient....*/]);
::g::Fuse::Controls::Panel* temp3 = ::g::Fuse::Controls::Panel::New3();
::g::Fuse::Reactive::DataBinding* temp4 = (::g::Fuse::Reactive::DataBinding*)::g::Fuse::Reactive::DataBinding::New1(::TYPES[0/*Fuse.Reactive.DataBinding<string>*/], __this->temp_Value_inst, ::STRINGS[5/*"ingredient....*/]);
::g::Fuse::Drawing::StaticSolidColor* temp5 = ::g::Fuse::Drawing::StaticSolidColor::New2(::g::Uno::Float4__New2(0.0f, 0.2313726f, 0.3490196f, 1.0f));
temp1->MinWidth(::g::Uno::UX::Size__New1(40.0f, 1));
temp1->MinHeight(::g::Uno::UX::Size__New1(30.0f, 1));
temp1->Margin(::g::Uno::Float4__New2(10.0f, 0.0f, 0.0f, 0.0f));
::g::Fuse::Controls::DockPanel::SetDock(temp1, 0);
::g::Fuse::Controls::Grid::SetColumn(temp1, 2);
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(temp1->Children()), ::TYPES[1/*Uno.Collections.ICollection<Fuse.Node>*/]), __gen91);
__gen91->MaxWidth(::g::Uno::UX::Size__New1(30.0f, 1));
__gen91->MaxHeight(::g::Uno::UX::Size__New1(30.0f, 1));
__gen91->Margin(::g::Uno::Float4__New2(5.0f, 0.0f, 0.0f, 0.0f));
__gen91->Name(RecipePage__Template3__Template4::__selector3());
::g::Fuse::Controls::DockPanel::SetDock(__gen91, 0);
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(__gen91->Bindings()), ::TYPES[2/*Uno.Collections.ICollection<Fuse.Binding>*/]), temp2);
__gen101->Left(::STRINGS[6/*"https://coo...*/]);
__gen101->Target(__this->__gen9_Url_inst1);
temp3->MinWidth(::g::Uno::UX::Size__New1(40.0f, 1));
temp3->MinHeight(::g::Uno::UX::Size__New1(30.0f, 1));
temp3->Margin(::g::Uno::Float4__New2(40.0f, 15.0f, 0.0f, 0.0f));
::g::Fuse::Controls::Grid::SetColumn(temp3, 1);
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(temp3->Children()), ::TYPES[1/*Uno.Collections.ICollection<Fuse.Node>*/]), temp);
temp->Color(::g::Uno::Float4__New2(1.0f, 1.0f, 1.0f, 1.0f));
temp->Margin(::g::Uno::Float4__New2(0.0f, 0.0f, 0.0f, 0.0f));
temp->Font(::g::MainView::Roboto());
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(temp->Bindings()), ::TYPES[2/*Uno.Collections.ICollection<Fuse.Binding>*/]), temp4);
self->Background(temp5);
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(self->Children()), ::TYPES[1/*Uno.Collections.ICollection<Fuse.Node>*/]), temp1);
::g::Uno::Collections::ICollection::Add_ex(uInterface(uPtr(self->Children()), ::TYPES[1/*Uno.Collections.ICollection<Fuse.Node>*/]), temp3);
return *__retval = self, void();
}
// public Template4 New(RecipePage.Template3 parent, Fuse.Controls.Rectangle parentInstance) :240
void RecipePage__Template3__Template4__New2_fn(::g::RecipePage__Template3* parent, ::g::Fuse::Controls::Rectangle* parentInstance, RecipePage__Template3__Template4** __retval)
{
*__retval = RecipePage__Template3__Template4::New2(parent, parentInstance);
}
::g::Uno::UX::Selector RecipePage__Template3__Template4::__selector0_;
::g::Uno::UX::Selector RecipePage__Template3__Template4::__selector1_;
::g::Uno::UX::Selector RecipePage__Template3__Template4::__selector2_;
::g::Uno::UX::Selector RecipePage__Template3__Template4::__selector3_;
// public Template4(RecipePage.Template3 parent, Fuse.Controls.Rectangle parentInstance) [instance] :240
void RecipePage__Template3__Template4::ctor_1(::g::RecipePage__Template3* parent, ::g::Fuse::Controls::Rectangle* parentInstance)
{
ctor_(NULL, false);
__parent1 = parent;
__parentInstance1 = parentInstance;
}
// public Template4 New(RecipePage.Template3 parent, Fuse.Controls.Rectangle parentInstance) [static] :240
RecipePage__Template3__Template4* RecipePage__Template3__Template4::New2(::g::RecipePage__Template3* parent, ::g::Fuse::Controls::Rectangle* parentInstance)
{
RecipePage__Template3__Template4* obj1 = (RecipePage__Template3__Template4*)uNew(RecipePage__Template3__Template4_typeof());
obj1->ctor_1(parent, parentInstance);
return obj1;
}
// }
} // ::g
| [
"girr.benjamin@gmail.com"
] | girr.benjamin@gmail.com |
d5244461c524a17b9fb42cb7a4295fb6bd4be99b | 63f7f32a914a2096a9a82b50dc29dd21a877b84d | /GeneratedFiles/soapBasicHttpBinding_USCOREIRouteServiceObject.h | 3fe56bf4d96b75b073f046f4eb81ced6193f9263 | [] | no_license | dabiaoluo/VirtualEarthBitmapDownload | 069da5ad2ce3e45dde62364691e4529a8eaf5ad0 | f886f07431ae66d469c888aa1f54ecf2bb950f7b | refs/heads/master | 2021-01-14T12:58:05.021911 | 2014-11-04T03:42:55 | 2014-11-04T03:42:55 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 5,637 | h | /* soapBasicHttpBinding_USCOREIRouteServiceObject.h
Generated by gSOAP 2.7.13 from VirtualEarth.h
Copyright(C) 2000-2009, Robert van Engelen, Genivia Inc. All Rights Reserved.
This part of the software is released under one of the following licenses:
GPL, the gSOAP public license, or Genivia's license for commercial use.
*/
#ifndef soapBasicHttpBinding_USCOREIRouteServiceObject_H
#define soapBasicHttpBinding_USCOREIRouteServiceObject_H
#include "soapH.h"
/******************************************************************************\
* *
* Service Object *
* *
\******************************************************************************/
class BasicHttpBinding_USCOREIRouteServiceService : public soap
{ public:
BasicHttpBinding_USCOREIRouteServiceService()
{ static const struct Namespace namespaces[] =
{
{"SOAP-ENV", "http://schemas.xmlsoap.org/soap/envelope/", "http://www.w3.org/*/soap-envelope", NULL},
{"SOAP-ENC", "http://schemas.xmlsoap.org/soap/encoding/", "http://www.w3.org/*/soap-encoding", NULL},
{"xsi", "http://www.w3.org/2001/XMLSchema-instance", "http://www.w3.org/*/XMLSchema-instance", NULL},
{"xsd", "http://www.w3.org/2001/XMLSchema", "http://www.w3.org/*/XMLSchema", NULL},
{"ns4", "http://dev.virtualearth.net/webservices/v1/common", NULL, NULL},
{"ns5", "http://schemas.microsoft.com/2003/10/Serialization/", NULL, NULL},
{"ns7", "http://schemas.microsoft.com/2003/10/Serialization/Arrays", NULL, NULL},
{"ns1", "http://s.mappoint.net/mappoint-30/", NULL, NULL},
{"ns10", "http://dev.virtualearth.net/webservices/v1/geocode", NULL, NULL},
{"ns9", "http://dev.virtualearth.net/webservices/v1/geocode/contracts", NULL, NULL},
{"ns13", "http://dev.virtualearth.net/webservices/v1/imagery", NULL, NULL},
{"ns12", "http://dev.virtualearth.net/webservices/v1/imagery/contracts", NULL, NULL},
{"ns16", "http://dev.virtualearth.net/webservices/v1/route", NULL, NULL},
{"ns15", "http://dev.virtualearth.net/webservices/v1/route/contracts", NULL, NULL},
{"ns6", "http://dev.virtualearth.net/webservices/v1/search", NULL, NULL},
{"ns3", "http://dev.virtualearth.net/webservices/v1/search/contracts", NULL, NULL},
{NULL, NULL, NULL, NULL}
};
if (!this->namespaces) this->namespaces = namespaces; };
virtual ~BasicHttpBinding_USCOREIRouteServiceService() { };
/// Bind service to port (returns master socket or SOAP_INVALID_SOCKET)
virtual SOAP_SOCKET bind(const char *host, int port, int backlog) { return soap_bind(this, host, port, backlog); };
/// Accept next request (returns socket or SOAP_INVALID_SOCKET)
virtual SOAP_SOCKET accept() { return soap_accept(this); };
/// Serve this request (returns error code or SOAP_OK)
virtual int serve() { return soap_serve(this); };
};
/******************************************************************************\
* *
* Service Operations (you should define these globally) *
* *
\******************************************************************************/
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetVersionInfo(struct soap*, _ns1__GetVersionInfo *ns1__GetVersionInfo, _ns1__GetVersionInfoResponse *ns1__GetVersionInfoResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetCountryRegionInfo(struct soap*, _ns1__GetCountryRegionInfo *ns1__GetCountryRegionInfo, _ns1__GetCountryRegionInfoResponse *ns1__GetCountryRegionInfoResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetEntityTypes(struct soap*, _ns1__GetEntityTypes *ns1__GetEntityTypes, _ns1__GetEntityTypesResponse *ns1__GetEntityTypesResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetDataSourceInfo(struct soap*, _ns1__GetDataSourceInfo *ns1__GetDataSourceInfo, _ns1__GetDataSourceInfoResponse *ns1__GetDataSourceInfoResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetGreatCircleDistances(struct soap*, _ns1__GetGreatCircleDistances *ns1__GetGreatCircleDistances, _ns1__GetGreatCircleDistancesResponse *ns1__GetGreatCircleDistancesResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns1__GetClientToken(struct soap*, _ns1__GetClientToken *ns1__GetClientToken, _ns1__GetClientTokenResponse *ns1__GetClientTokenResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns10__Geocode(struct soap*, _ns9__Geocode *ns9__Geocode, _ns9__GeocodeResponse *ns9__GeocodeResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns10__ReverseGeocode(struct soap*, _ns9__ReverseGeocode *ns9__ReverseGeocode, _ns9__ReverseGeocodeResponse *ns9__ReverseGeocodeResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns13__GetImageryMetadata(struct soap*, _ns12__GetImageryMetadata *ns12__GetImageryMetadata, _ns12__GetImageryMetadataResponse *ns12__GetImageryMetadataResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns13__GetMapUri(struct soap*, _ns12__GetMapUri *ns12__GetMapUri, _ns12__GetMapUriResponse *ns12__GetMapUriResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns16__CalculateRoute(struct soap*, _ns15__CalculateRoute *ns15__CalculateRoute, _ns15__CalculateRouteResponse *ns15__CalculateRouteResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns16__CalculateRoutesFromMajorRoads(struct soap*, _ns15__CalculateRoutesFromMajorRoads *ns15__CalculateRoutesFromMajorRoads, _ns15__CalculateRoutesFromMajorRoadsResponse *ns15__CalculateRoutesFromMajorRoadsResponse);
SOAP_FMAC5 int SOAP_FMAC6 __ns6__Search(struct soap*, _ns3__Search *ns3__Search, _ns3__SearchResponse *ns3__SearchResponse);
#endif
| [
"wanglauping@gmail.com"
] | wanglauping@gmail.com |
b2126d4f68cf9679ace3cd57fca87fb0d5f398e3 | 6c71067a78989f1d287368a5537609fd3f4200f3 | /qgis/include/ui/ui_qgsidentifyresultsbase.h | 8bd4dff2a1b0a401e6a7db53b9ce175b654d0372 | [] | no_license | 3DGISKing/MarineAlarmSystem | c12569d4f300dd8cc783fea4b66de69d8ea17139 | 0151edbe0fbb422d95a8675f3bbbfc1d215dfb27 | refs/heads/master | 2021-06-15T20:53:23.547139 | 2017-03-30T20:16:28 | 2017-03-30T20:16:28 | 86,743,988 | 1 | 2 | null | null | null | null | UTF-8 | C++ | false | false | 16,556 | h | /********************************************************************************
** Form generated from reading UI file 'qgsidentifyresultsbase.ui'
**
** Created: Sat Jan 17 20:06:26 2015
** by: Qt User Interface Compiler version 4.8.0
**
** WARNING! All changes made in this file will be lost when recompiling UI file!
********************************************************************************/
#ifndef UI_QGSIDENTIFYRESULTSBASE_H
#define UI_QGSIDENTIFYRESULTSBASE_H
#include <QtCore/QVariant>
#include <QtGui/QAction>
#include <QtGui/QApplication>
#include <QtGui/QButtonGroup>
#include <QtGui/QCheckBox>
#include <QtGui/QComboBox>
#include <QtGui/QDialog>
#include <QtGui/QHBoxLayout>
#include <QtGui/QHeaderView>
#include <QtGui/QLabel>
#include <QtGui/QSpacerItem>
#include <QtGui/QStackedWidget>
#include <QtGui/QTableWidget>
#include <QtGui/QToolButton>
#include <QtGui/QTreeWidget>
#include <QtGui/QVBoxLayout>
#include <QtGui/QWidget>
#include "qwt_plot.h"
QT_BEGIN_NAMESPACE
class Ui_QgsIdentifyResultsBase
{
public:
QVBoxLayout *verticalLayout_4;
QStackedWidget *stackedWidget;
QWidget *stackedWidgetPage1;
QVBoxLayout *verticalLayout;
QHBoxLayout *horizontalLayout;
QToolButton *mExpandToolButton;
QToolButton *mCollapseToolButton;
QToolButton *mExpandNewToolButton;
QToolButton *mOpenFormButton;
QToolButton *mClearToolButton;
QToolButton *mCopyToolButton;
QToolButton *mPrintToolButton;
QSpacerItem *horizontalSpacer;
QTreeWidget *lstResults;
QHBoxLayout *horizontalLayout_2;
QLabel *lblIdentifyMode;
QComboBox *cmbIdentifyMode;
QSpacerItem *horizontalSpacer_43;
QCheckBox *cbxAutoFeatureForm;
QWidget *stackedWidgetPage2;
QVBoxLayout *verticalLayout_2;
QTableWidget *tblResults;
QWidget *stackedWidgetPage3;
QVBoxLayout *verticalLayout_3;
QwtPlot *mPlot;
QHBoxLayout *horizontalLayout_3;
QLabel *lblViewMode;
QComboBox *cmbViewMode;
QSpacerItem *horizontalSpacer_2;
QToolButton *mHelpToolButton;
void setupUi(QDialog *QgsIdentifyResultsBase)
{
if (QgsIdentifyResultsBase->objectName().isEmpty())
QgsIdentifyResultsBase->setObjectName(QString::fromUtf8("QgsIdentifyResultsBase"));
QgsIdentifyResultsBase->resize(355, 390);
verticalLayout_4 = new QVBoxLayout(QgsIdentifyResultsBase);
verticalLayout_4->setSpacing(0);
verticalLayout_4->setContentsMargins(0, 0, 0, 0);
verticalLayout_4->setObjectName(QString::fromUtf8("verticalLayout_4"));
stackedWidget = new QStackedWidget(QgsIdentifyResultsBase);
stackedWidget->setObjectName(QString::fromUtf8("stackedWidget"));
stackedWidgetPage1 = new QWidget();
stackedWidgetPage1->setObjectName(QString::fromUtf8("stackedWidgetPage1"));
verticalLayout = new QVBoxLayout(stackedWidgetPage1);
verticalLayout->setSpacing(0);
verticalLayout->setContentsMargins(0, 0, 0, 0);
verticalLayout->setObjectName(QString::fromUtf8("verticalLayout"));
horizontalLayout = new QHBoxLayout();
horizontalLayout->setSpacing(6);
horizontalLayout->setObjectName(QString::fromUtf8("horizontalLayout"));
horizontalLayout->setContentsMargins(5, -1, 5, 6);
mExpandToolButton = new QToolButton(stackedWidgetPage1);
mExpandToolButton->setObjectName(QString::fromUtf8("mExpandToolButton"));
QIcon icon;
icon.addFile(QString::fromUtf8("../../images/themes/default/mActionExpandTree.png"), QSize(), QIcon::Normal, QIcon::Off);
mExpandToolButton->setIcon(icon);
mExpandToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mExpandToolButton);
mCollapseToolButton = new QToolButton(stackedWidgetPage1);
mCollapseToolButton->setObjectName(QString::fromUtf8("mCollapseToolButton"));
QIcon icon1;
icon1.addFile(QString::fromUtf8("../../images/themes/default/mActionCollapseTree.png"), QSize(), QIcon::Normal, QIcon::Off);
mCollapseToolButton->setIcon(icon1);
mCollapseToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mCollapseToolButton);
mExpandNewToolButton = new QToolButton(stackedWidgetPage1);
mExpandNewToolButton->setObjectName(QString::fromUtf8("mExpandNewToolButton"));
QIcon icon2;
icon2.addFile(QString::fromUtf8("../../images/themes/default/mActionExpandNewTree.png"), QSize(), QIcon::Normal, QIcon::Off);
mExpandNewToolButton->setIcon(icon2);
mExpandNewToolButton->setCheckable(true);
mExpandNewToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mExpandNewToolButton);
mOpenFormButton = new QToolButton(stackedWidgetPage1);
mOpenFormButton->setObjectName(QString::fromUtf8("mOpenFormButton"));
QIcon icon3;
icon3.addFile(QString::fromUtf8(":/images/themes/default/mActionPropertyItem.png"), QSize(), QIcon::Normal, QIcon::Off);
mOpenFormButton->setIcon(icon3);
mOpenFormButton->setAutoRaise(true);
horizontalLayout->addWidget(mOpenFormButton);
mClearToolButton = new QToolButton(stackedWidgetPage1);
mClearToolButton->setObjectName(QString::fromUtf8("mClearToolButton"));
QIcon icon4;
icon4.addFile(QString::fromUtf8(":/images/themes/default/mActionDeselectAll.svg"), QSize(), QIcon::Normal, QIcon::Off);
mClearToolButton->setIcon(icon4);
mClearToolButton->setCheckable(false);
mClearToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mClearToolButton);
mCopyToolButton = new QToolButton(stackedWidgetPage1);
mCopyToolButton->setObjectName(QString::fromUtf8("mCopyToolButton"));
QIcon icon5;
icon5.addFile(QString::fromUtf8(":/images/themes/default/mActionEditCopy.png"), QSize(), QIcon::Normal, QIcon::Off);
mCopyToolButton->setIcon(icon5);
mCopyToolButton->setCheckable(false);
mCopyToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mCopyToolButton);
mPrintToolButton = new QToolButton(stackedWidgetPage1);
mPrintToolButton->setObjectName(QString::fromUtf8("mPrintToolButton"));
mPrintToolButton->setEnabled(true);
QIcon icon6;
icon6.addFile(QString::fromUtf8("../../images/themes/default/mActionFilePrint.png"), QSize(), QIcon::Normal, QIcon::Off);
mPrintToolButton->setIcon(icon6);
mPrintToolButton->setCheckable(false);
mPrintToolButton->setAutoRaise(true);
horizontalLayout->addWidget(mPrintToolButton);
horizontalSpacer = new QSpacerItem(40, 20, QSizePolicy::Expanding, QSizePolicy::Minimum);
horizontalLayout->addItem(horizontalSpacer);
verticalLayout->addLayout(horizontalLayout);
lstResults = new QTreeWidget(stackedWidgetPage1);
QTreeWidgetItem *__qtreewidgetitem = new QTreeWidgetItem();
__qtreewidgetitem->setText(0, QString::fromUtf8("1"));
lstResults->setHeaderItem(__qtreewidgetitem);
lstResults->setObjectName(QString::fromUtf8("lstResults"));
lstResults->setLineWidth(2);
lstResults->setAlternatingRowColors(true);
lstResults->setSortingEnabled(true);
verticalLayout->addWidget(lstResults);
horizontalLayout_2 = new QHBoxLayout();
horizontalLayout_2->setSpacing(6);
horizontalLayout_2->setObjectName(QString::fromUtf8("horizontalLayout_2"));
horizontalLayout_2->setContentsMargins(5, 5, 5, -1);
lblIdentifyMode = new QLabel(stackedWidgetPage1);
lblIdentifyMode->setObjectName(QString::fromUtf8("lblIdentifyMode"));
horizontalLayout_2->addWidget(lblIdentifyMode);
cmbIdentifyMode = new QComboBox(stackedWidgetPage1);
cmbIdentifyMode->setObjectName(QString::fromUtf8("cmbIdentifyMode"));
QSizePolicy sizePolicy(QSizePolicy::Preferred, QSizePolicy::Fixed);
sizePolicy.setHorizontalStretch(0);
sizePolicy.setVerticalStretch(0);
sizePolicy.setHeightForWidth(cmbIdentifyMode->sizePolicy().hasHeightForWidth());
cmbIdentifyMode->setSizePolicy(sizePolicy);
horizontalLayout_2->addWidget(cmbIdentifyMode);
horizontalSpacer_43 = new QSpacerItem(40, 20, QSizePolicy::Expanding, QSizePolicy::Minimum);
horizontalLayout_2->addItem(horizontalSpacer_43);
cbxAutoFeatureForm = new QCheckBox(stackedWidgetPage1);
cbxAutoFeatureForm->setObjectName(QString::fromUtf8("cbxAutoFeatureForm"));
horizontalLayout_2->addWidget(cbxAutoFeatureForm);
verticalLayout->addLayout(horizontalLayout_2);
stackedWidget->addWidget(stackedWidgetPage1);
stackedWidgetPage2 = new QWidget();
stackedWidgetPage2->setObjectName(QString::fromUtf8("stackedWidgetPage2"));
verticalLayout_2 = new QVBoxLayout(stackedWidgetPage2);
verticalLayout_2->setSpacing(0);
verticalLayout_2->setContentsMargins(0, 0, 0, 0);
verticalLayout_2->setObjectName(QString::fromUtf8("verticalLayout_2"));
tblResults = new QTableWidget(stackedWidgetPage2);
if (tblResults->columnCount() < 4)
tblResults->setColumnCount(4);
QTableWidgetItem *__qtablewidgetitem = new QTableWidgetItem();
tblResults->setHorizontalHeaderItem(0, __qtablewidgetitem);
QTableWidgetItem *__qtablewidgetitem1 = new QTableWidgetItem();
tblResults->setHorizontalHeaderItem(1, __qtablewidgetitem1);
QTableWidgetItem *__qtablewidgetitem2 = new QTableWidgetItem();
tblResults->setHorizontalHeaderItem(2, __qtablewidgetitem2);
QTableWidgetItem *__qtablewidgetitem3 = new QTableWidgetItem();
tblResults->setHorizontalHeaderItem(3, __qtablewidgetitem3);
tblResults->setObjectName(QString::fromUtf8("tblResults"));
tblResults->setEditTriggers(QAbstractItemView::NoEditTriggers);
tblResults->setSortingEnabled(false);
verticalLayout_2->addWidget(tblResults);
stackedWidget->addWidget(stackedWidgetPage2);
stackedWidgetPage3 = new QWidget();
stackedWidgetPage3->setObjectName(QString::fromUtf8("stackedWidgetPage3"));
verticalLayout_3 = new QVBoxLayout(stackedWidgetPage3);
verticalLayout_3->setSpacing(6);
verticalLayout_3->setContentsMargins(11, 11, 11, 11);
verticalLayout_3->setObjectName(QString::fromUtf8("verticalLayout_3"));
mPlot = new QwtPlot(stackedWidgetPage3);
mPlot->setObjectName(QString::fromUtf8("mPlot"));
verticalLayout_3->addWidget(mPlot);
stackedWidget->addWidget(stackedWidgetPage3);
verticalLayout_4->addWidget(stackedWidget);
horizontalLayout_3 = new QHBoxLayout();
horizontalLayout_3->setSpacing(6);
horizontalLayout_3->setObjectName(QString::fromUtf8("horizontalLayout_3"));
horizontalLayout_3->setContentsMargins(5, -1, 5, -1);
lblViewMode = new QLabel(QgsIdentifyResultsBase);
lblViewMode->setObjectName(QString::fromUtf8("lblViewMode"));
horizontalLayout_3->addWidget(lblViewMode);
cmbViewMode = new QComboBox(QgsIdentifyResultsBase);
cmbViewMode->setObjectName(QString::fromUtf8("cmbViewMode"));
horizontalLayout_3->addWidget(cmbViewMode);
horizontalSpacer_2 = new QSpacerItem(58, 38, QSizePolicy::Expanding, QSizePolicy::Minimum);
horizontalLayout_3->addItem(horizontalSpacer_2);
mHelpToolButton = new QToolButton(QgsIdentifyResultsBase);
mHelpToolButton->setObjectName(QString::fromUtf8("mHelpToolButton"));
mHelpToolButton->setEnabled(true);
mHelpToolButton->setCheckable(false);
horizontalLayout_3->addWidget(mHelpToolButton);
verticalLayout_4->addLayout(horizontalLayout_3);
retranslateUi(QgsIdentifyResultsBase);
stackedWidget->setCurrentIndex(0);
QMetaObject::connectSlotsByName(QgsIdentifyResultsBase);
} // setupUi
void retranslateUi(QDialog *QgsIdentifyResultsBase)
{
QgsIdentifyResultsBase->setWindowTitle(QApplication::translate("QgsIdentifyResultsBase", "Identify Results", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mExpandToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Expand tree", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mExpandToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mCollapseToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Collapse tree", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mCollapseToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mExpandNewToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "New results will be expanded by default.", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mExpandNewToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
mOpenFormButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mClearToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Clear Results", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mClearToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mCopyToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Copy selected feature to clipboard.", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mCopyToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
mPrintToolButton->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Print selected HTML response.", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
mPrintToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "...", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
lblIdentifyMode->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Select identify mode", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
lblIdentifyMode->setText(QApplication::translate("QgsIdentifyResultsBase", "Mode", 0, QApplication::UnicodeUTF8));
cbxAutoFeatureForm->setText(QApplication::translate("QgsIdentifyResultsBase", "Auto open form", 0, QApplication::UnicodeUTF8));
QTableWidgetItem *___qtablewidgetitem = tblResults->horizontalHeaderItem(0);
___qtablewidgetitem->setText(QApplication::translate("QgsIdentifyResultsBase", "Layer", 0, QApplication::UnicodeUTF8));
QTableWidgetItem *___qtablewidgetitem1 = tblResults->horizontalHeaderItem(1);
___qtablewidgetitem1->setText(QApplication::translate("QgsIdentifyResultsBase", "FID", 0, QApplication::UnicodeUTF8));
QTableWidgetItem *___qtablewidgetitem2 = tblResults->horizontalHeaderItem(2);
___qtablewidgetitem2->setText(QApplication::translate("QgsIdentifyResultsBase", "Attribute", 0, QApplication::UnicodeUTF8));
QTableWidgetItem *___qtablewidgetitem3 = tblResults->horizontalHeaderItem(3);
___qtablewidgetitem3->setText(QApplication::translate("QgsIdentifyResultsBase", "Value", 0, QApplication::UnicodeUTF8));
#ifndef QT_NO_TOOLTIP
lblViewMode->setToolTip(QApplication::translate("QgsIdentifyResultsBase", "Select view mode for raster layers", 0, QApplication::UnicodeUTF8));
#endif // QT_NO_TOOLTIP
lblViewMode->setText(QApplication::translate("QgsIdentifyResultsBase", "View", 0, QApplication::UnicodeUTF8));
mHelpToolButton->setText(QApplication::translate("QgsIdentifyResultsBase", "Help", 0, QApplication::UnicodeUTF8));
} // retranslateUi
};
namespace Ui {
class QgsIdentifyResultsBase: public Ui_QgsIdentifyResultsBase {};
} // namespace Ui
QT_END_NAMESPACE
#endif // UI_QGSIDENTIFYRESULTSBASE_H
| [
"wugis3@yahoo.com"
] | wugis3@yahoo.com |
aae72d3002da609f42c2d9e6c64dc6b5d837b43f | 0f753714aab2b0e90469f17af834e94e2faa9c0f | /src/progs/recon_PBC_real.cpp | 36475dcbac8e5a8731c17cb3da6d90fd877d9111 | [] | no_license | npadmana/baorecon | b90ee4f2fb3738beb9e8ea1d5e654f6f4a0dc8eb | 1147c4b9f3a46c3b84a6c2f25960d70402e57390 | refs/heads/master | 2016-09-08T01:22:03.082693 | 2014-11-28T02:40:44 | 2014-11-28T02:40:44 | 2,431,362 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 7,385 | cpp | #include <iostream>
#include <cmath>
#include <sstream>
#include <iomanip>
#include "Recon.h"
static char help[] = "recon_PBC_real -configfn <configuration file>\n";
using namespace std;
int main(int argc, char *args[]) {
PetscErrorCode ierr;
ierr=PetscInitialize(&argc,&args,(char *) 0, help); CHKERRQ(ierr);
// Get MPI rank
int rank; MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
// Read in configuration file
char configfn[200];
PetscBool flg;
// See if we need help
PetscOptionsHasName(NULL, "-help", &flg);
if (flg) exit(0);
PetscOptionsGetString(NULL, "-configfn", configfn, 200, &flg);
if (!flg) RAISE_ERR(99,"Specify configuration file");
// Read in parameters
PBCParams pars(string(configfn), "PBCreal");
// Define the potential solver
PotentialSolve psolve(pars.Ngrid, pars.Lbox, pars.recon.maxit);
psolve.SetupOperator(REALPBC);
// Loop over files
list<PBCParams::fn>::iterator files;
for (files = pars.fnlist.begin(); files !=pars.fnlist.end(); ++files)
{
/* ******************************
* First we get the various options and print out useful information
* ********************************/
ostringstream hdr;
hdr << "# Input file is " << files->in << endl;
hdr << "# Output file is " << files->out << endl;
hdr << "# Ngrid=" << setw(5) << pars.Ngrid << endl;
hdr << "# boxsize=" << setw(8) << fixed << setprecision(2) << pars.Lbox << endl;
hdr << "# bias=" << setw(8) << setprecision(2) << pars.recon.bias << endl;
hdr << "# smooth=" << setw(8) << setprecision(2) << pars.recon.smooth << endl;
hdr << "# " << setw(4) << pars.xi.Nbins << " Xi bins from " << setw(8) << setprecision(2) << pars.xi.rmin
<< " with spacing of " << pars.xi.dr << endl;
hdr << "# " << "Correlation function smoothed with a smoothing scale of" << setw(8) << setprecision(2)
<< pars.xi.smooth << endl;
PetscPrintf(PETSC_COMM_WORLD, (hdr.str()).c_str());
/****************************************
* Read in the particle data here and slab decompose
****************************************/
Particle pp;
DensityGrid dg(pars.Ngrid, pars.Lbox);
pp.TPMReadSerial(files->in.c_str(), pars.Lbox);
PetscPrintf(PETSC_COMM_WORLD,"Read in %i particles.....\n",pp.npart);
pp.SlabDecompose(dg);
// Test slab decomp
bool good = dg.TestSlabDecompose(pp);
if (good)
{PetscSynchronizedPrintf(PETSC_COMM_WORLD,"Slab decomposition succeeded on process %i\n",rank);}
else
{PetscSynchronizedPrintf(PETSC_COMM_WORLD,"Slab decomposition FAILED on process %i\n",rank);}
PetscSynchronizedFlush(PETSC_COMM_WORLD);
if (!good) RAISE_ERR(99, "Slab decomposition failed");
/*************************************************
* Now we start working on the grid
* ***********************************************/
Vec grid, gridr;
PkStruct xi1(pars.xi.rmin, pars.xi.dr, pars.xi.Nbins);
grid=dg.Allocate();
//CIC
dg.CIC(grid, pp);
VecDuplicate(grid, &gridr); VecCopy(grid, gridr); //FFTs are destructive!
dg.XiFFT(gridr, pars.xi.smooth, xi1);
// Smooth
dg.GaussSmooth(grid, pars.recon.smooth);
PetscPrintf(PETSC_COMM_WORLD,"Initial correlation function computed....\n");
/************************************************
* Now we solve for the potential
************************************************/
// Allocate potential solver
Vec pot;
pot = dg.Allocate();
if (psolve.Solve(grid, pot, pars.recon.bias)) {
// If the potential calculation converged
PetscPrintf(PETSC_COMM_WORLD,"Potential calculated....\n");
/************************************************
* Now we shift data and randoms
************************************************/
// Generate random particles
Vec dp, qx, qy, qz;
Particle pr;
pr.RandomInit(pp.npart*pars.recon.nrandomfac, pars.Lbox, 1931);
pr.SlabDecompose(dg);
// Compute derivatives at data positions and shift
dp = dg.Deriv(pot, 0); qx = dg.Interp3d(dp, pp); _mydestroy(dp);
dp = dg.Deriv(pot, 1); qy = dg.Interp3d(dp, pp); _mydestroy(dp);
dp = dg.Deriv(pot, 2); qz = dg.Interp3d(dp, pp); _mydestroy(dp);
// Print some statistics
double sum[3];
VecSum(qx,&sum[0]); VecSum(qy, &sum[1]); VecSum(qz, &sum[2]);
for (int ii=0; ii < 3; ++ii) sum[ii] /= pp.npart;
PetscPrintf(PETSC_COMM_WORLD, "Mean x,y,z displacements on particles is : %10.4f,%10.4f,%10.4f\n",sum[0],sum[1],sum[2]);
VecNorm(qx,NORM_2,&sum[0]); VecNorm(qy, NORM_2,&sum[1]); VecNorm(qz, NORM_2,&sum[2]);
for (int ii=0; ii < 3; ++ii) sum[ii] /= sqrt(pp.npart);
PetscPrintf(PETSC_COMM_WORLD, "RMS x,y,z displacements on particles is : %10.4f,%10.4f,%10.4f\n",sum[0],sum[1],sum[2]);
VecAXPY(pp.px, -1.0, qx);
VecAXPY(pp.py, -1.0, qy);
VecAXPY(pp.pz, -1.0, qz);
// Cleanup
_mydestroy(qx); _mydestroy(qy); _mydestroy(qz);
// Do the same for the randoms
dp = dg.Deriv(pot, 0); qx = dg.Interp3d(dp, pr); _mydestroy(dp);
dp = dg.Deriv(pot, 1); qy = dg.Interp3d(dp, pr); _mydestroy(dp);
dp = dg.Deriv(pot, 2); qz = dg.Interp3d(dp, pr); _mydestroy(dp);
VecSum(qx,&sum[0]); VecSum(qy, &sum[1]); VecSum(qz, &sum[2]);
for (int ii=0; ii < 3; ++ii) sum[ii] /= pr.npart;
PetscPrintf(PETSC_COMM_WORLD, "Mean x,y,z displacements on randoms is : %10.4f,%10.4f,%10.4f\n",sum[0],sum[1],sum[2]);
VecNorm(qx,NORM_2,&sum[0]); VecNorm(qy, NORM_2,&sum[1]); VecNorm(qz, NORM_2,&sum[2]);
for (int ii=0; ii < 3; ++ii) sum[ii] /= sqrt(pr.npart);
PetscPrintf(PETSC_COMM_WORLD, "RMS x,y,z displacements on randoms is : %10.4f,%10.4f,%10.4f\n",sum[0],sum[1],sum[2]);
VecAXPY(pr.px, -1.0, qx);
VecAXPY(pr.py, -1.0, qy);
VecAXPY(pr.pz, -1.0, qz);
PetscPrintf(PETSC_COMM_WORLD,"Displacements calculated....\n");
// Clean up
_mydestroy(qx); _mydestroy(qy); _mydestroy(qz);
// Shifted data and random grid
pp.SlabDecompose(dg); pr.SlabDecompose(dg);
dg.CIC(grid, pp); dg.CIC(gridr, pr);
VecAXPY(grid, -1.0, gridr);
// Correlation fn
PkStruct xi2(pars.xi.rmin, pars.xi.dr, pars.xi.Nbins);
dg.XiFFT(grid, pars.xi.smooth, xi2);
// Outputs
FILE *fp;
double _rvec, _xi1, _xi2, _n1;
PetscFOpen(PETSC_COMM_WORLD,files->out.c_str(),"w", &fp);
PetscFPrintf(PETSC_COMM_WORLD, fp, (hdr.str()).c_str());
for (int ii = xi1.lo; ii < xi1.hi; ++ii) {
_xi1 = xi1(ii, _rvec, _n1);
_xi2 = xi2(ii);
if (_n1>0) PetscSynchronizedFPrintf(PETSC_COMM_WORLD, fp, "%6i %9.3f %15.8e %15.8e\n",ii,_rvec,_xi1,_xi2);
}
PetscSynchronizedFlush(PETSC_COMM_WORLD);
PetscFClose(PETSC_COMM_WORLD,fp);
}
// Cleanup
_mydestroy(grid);_mydestroy(gridr); _mydestroy(pot);
}
// Only call this when everything is out of scope
ierr=PetscFinalize(); CHKERRQ(ierr);
}
| [
"nikhil.padmanabhan@yale.edu"
] | nikhil.padmanabhan@yale.edu |
d1d08d73b8efe9e21c5a868f8d28bcc8798472ca | da99e80d9c88b7783e2a5154ef6b9217fbf611e9 | /MyAstroids/MoveComponent.cpp | 9990e0a72b2f5059bbfcf6238afac9d69f7abc47 | [] | no_license | markvanbuyten/MyAstroids | 983d14fc81818b40caf401a10eff506911467687 | 4d98cb2b9e051c8e2d2f1403057e7152c646468f | refs/heads/master | 2020-04-23T17:19:24.295896 | 2019-02-21T20:04:34 | 2019-02-21T20:04:34 | 171,327,780 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 797 | cpp | #include "MoveComponent.h"
#include "Actor.h"
MoveComponent::MoveComponent(Actor * owner, int updateOrder)
:Component(owner, updateOrder)
,mAngularSpeed(0.0f)
,mForwardSpeed(0.0f)
{
}
void MoveComponent::Update(float deltaTime)
{
if (!Math::NearZero(mAngularSpeed))
{
float rotation = mOwner->GetRotation();
rotation += mAngularSpeed * deltaTime;
mOwner->SetRotation(rotation);
}
if (!Math::NearZero(mForwardSpeed))
{
Vector2 position = mOwner->GetPosition();
position += mOwner->GetForward() * mForwardSpeed * deltaTime;
if (position.x < 0.0f) { position.x = 1022.0f; }
else if (position.x > 1024.0f) { position.x = 2.0f; }
if (position.y < 0.0f) { position.y = 766.0f; }
else if (position.y > 768.0f) { position.y = 2.0f; }
mOwner->SetPosition(position);
}
}
| [
"markvanbuyten@gmail.com"
] | markvanbuyten@gmail.com |
f6fdd6f2df29a676f54867054083733d44e3a38c | fda4e3f7ecbb8f7b2e3c92682776c20b013f4e97 | /libssoa/src/registry/registryservicerequest.cpp | 4c1fbdb5409e5151c98d006afd1cdac9d7d4f046 | [] | no_license | antoniomacri/sisop-soa | 7f1ae01c6a56013fb38d76fc641ee2c9465e0995 | bbe7932ce838ce8cab2bee7c3fc8df4d67b75aac | refs/heads/master | 2021-01-11T01:23:02.021624 | 2016-10-12T21:02:41 | 2016-10-12T21:02:41 | 70,739,532 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 791 | cpp | /*
* registryservicerequest.cpp
*/
#include <ssoa/registry/registryservicerequest.h>
#include <sstream>
#include <yaml-cpp/yaml.h>
using std::string;
namespace ssoa
{
RegistryMessage * RegistryServiceRequest::fromYaml(const YAML::Node& node)
{
if (node["type"].to<string>() != messageType())
throw std::logic_error("Message type mismatch");
string service = node["service"].to<string>();
return new RegistryServiceRequest(service);
}
string RegistryServiceRequest::toYaml() const
{
YAML::Emitter e;
e << YAML::BeginMap;
e << YAML::Key << "type" << YAML::Value << messageType();
e << YAML::Key << "service" << YAML::Value << service;
e << YAML::EndMap;
return e.c_str();
}
}
| [
"ing.antonio.macri@gmail.com"
] | ing.antonio.macri@gmail.com |
7305c4bfd048c32cc8babae08b531c81df6dcb8d | d61b37777151cae274b071f6af1a89356273cb61 | /CDAC/DS/DoubleEndedQueue.cpp | db9362f14b5ebd191f0a6db0682958fe00ddc591 | [] | no_license | Arun-A-Patil/CDAC | bb63cb308d5ba02011a9aa13ca962bbba8ee2e8a | fd1775ba4dc22931a3ca6fe9243182fb47fdf794 | refs/heads/master | 2023-02-25T09:17:29.000689 | 2021-02-02T17:42:59 | 2021-02-02T17:42:59 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,885 | cpp | #include<iostream>
#include<stdlib.h>
using namespace std;
class Queue{
private:
int size;
int *arr;
int front;
int rear;
public:
Queue(){
size=0;
front=-1;
rear=-1;
}
Queue(int size){
this->size=size;
arr = new int[size];
front=-1;
rear=-1;
}
bool isEmpty(){
if((front == -1 && rear == -1) || front>rear){
return true;
}else
{
return false;
}
}
bool isFull(){
if(front == 0 && rear >= size-1)
return true;
else
{
return false;
}
}
void enqueueAtEnd(int data){
if(!isFull()){
if(rear >= size-1){
cout<<"Insertion is not possible at end"<<endl;
}else{
if(front == -1){
front = 0;
}
rear++;
arr[rear]=data;
}
}else
{
cout<<"Queue is full"<<endl;
}
}
void enqueueAtStart(int data){
if(!isFull()){
if(front == -1)
{
front = 0;
rear++;
arr[rear]=data;
}else if(front != 0){
front--;
arr[front]=data;
}else{
cout<<"Insertion is not possible at start"<<endl;
}
}else{
cout<<"Queue is full"<<endl;
}
}
void dequeueAtStart(){
if(!isEmpty()){
if(front <= rear){
cout<<"Dequeued element: "<<arr[front]<<endl;
front++;
}else{
cout<<"Deletion is not possible from start"<<endl;
}
}else{
cout<<"Queue is empty"<<endl;
}
}
void dequeueAtEnd(){
if(!isEmpty()){
if(front <= rear){
cout<<"Dequeued element: "<<arr[rear]<<endl;
rear--;
}else{
cout<<"Deletion is not possible from end"<<endl;
}
}else{
cout<<"Queue is empty"<<endl;
}
}
void display(){
if(!isEmpty()){
for(int i=front;i<=rear;i++)
cout<<arr[i]<<" ";
cout<<endl;
}else{
cout<<"Queue is empty"<<endl;
}
}
};
int main(){
int size,data,choice;
cout<<"Enter the size of queue: "<<endl;
cin>>size;
Queue q(size);
while(1){
cout<<"1. Enqueue at start\n2. Dequeue from start\n3. Enqueue at end\n4. Dequeue at end\n5. Display\n6. Exit"<<endl;
cout<<"Enter the choice: ";
cin>>choice;
switch(choice){
case 1:
cout<<"Enter the data: ";
cin>>data;
q.enqueueAtStart(data);
break;
case 2:
q.dequeueAtStart();
break;
case 3:
cout<<"Enter the data: ";
cin>>data;
q.enqueueAtEnd(data);
break;
case 4:
q.dequeueAtEnd();
break;
case 5:
q.display();
break;
case 6:
exit(0);
default:
cout<<"Please enter correct choice"<<endl;
break;
}
}
} | [
"nipun_madaan@yahoo.com"
] | nipun_madaan@yahoo.com |
3a149f7f2613ce0141859772dfff7da06f089291 | b8eb1b1e8db64dd69bab51a038131c6ef2cb7b1e | /src/Pawn.h | bf92ed9d4c08e59c5e44c6a8ad9a696f15465627 | [] | no_license | Gasia44/Advanced-Object-Oriented-Programming | f12252b1e5ab7f1b6b693c7cc6e090ee5e8e5f00 | 2fbd1464412ac3f94b427a9b54e56692dcbb14d0 | refs/heads/master | 2021-09-08T13:15:49.928299 | 2021-09-02T06:21:23 | 2021-09-02T06:21:23 | 225,483,804 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 270 | h | //
// Created by gasia on 12/13/19.
//
#ifndef CHESS_PROJECT_PAWN_H
#define CHESS_PROJECT_PAWN_H
#pragma once
#include "Figure.h"
class Pawn: public Figure {
public:
Pawn(bool);
~Pawn();
bool canMove(Square*, Square*);
};
#endif //CHESS_PROJECT_PAWN_H
| [
"gasiaatashian@gmail.com"
] | gasiaatashian@gmail.com |
80ec58caf299bd175668323a90f64d35c139bb96 | 6680910326e975c20fbe36e1aa31d35539d97c75 | /progbase2/build-SR_2_Lukianets_Mykhailo-Desktop_Qt_5_8_0_GCC_64bit-Debug/moc_createdata.cpp | 190814ade599af583bcf072ddfb6823bd4b53f28 | [] | no_license | TGIfr/courses | 9c927ac23921ec9a6f6503e0e7831f47c28060e3 | 99c4bc9b77a41f731d61bf7d5fa2616c610e737d | refs/heads/master | 2020-03-14T08:45:50.322966 | 2018-05-01T11:35:12 | 2018-05-01T11:35:12 | 131,532,044 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,699 | cpp | /****************************************************************************
** Meta object code from reading C++ file 'createdata.h'
**
** Created by: The Qt Meta Object Compiler version 67 (Qt 5.8.0)
**
** WARNING! All changes made in this file will be lost!
*****************************************************************************/
#include "../SR_2_Lukianets_Mykhailo/createdata.h"
#include <QtCore/qbytearray.h>
#include <QtCore/qmetatype.h>
#if !defined(Q_MOC_OUTPUT_REVISION)
#error "The header file 'createdata.h' doesn't include <QObject>."
#elif Q_MOC_OUTPUT_REVISION != 67
#error "This file was generated using the moc from 5.8.0. It"
#error "cannot be used with the include files from this version of Qt."
#error "(The moc has changed too much.)"
#endif
QT_BEGIN_MOC_NAMESPACE
QT_WARNING_PUSH
QT_WARNING_DISABLE_DEPRECATED
struct qt_meta_stringdata_CreateData_t {
QByteArrayData data[1];
char stringdata0[11];
};
#define QT_MOC_LITERAL(idx, ofs, len) \
Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \
qptrdiff(offsetof(qt_meta_stringdata_CreateData_t, stringdata0) + ofs \
- idx * sizeof(QByteArrayData)) \
)
static const qt_meta_stringdata_CreateData_t qt_meta_stringdata_CreateData = {
{
QT_MOC_LITERAL(0, 0, 10) // "CreateData"
},
"CreateData"
};
#undef QT_MOC_LITERAL
static const uint qt_meta_data_CreateData[] = {
// content:
7, // revision
0, // classname
0, 0, // classinfo
0, 0, // methods
0, 0, // properties
0, 0, // enums/sets
0, 0, // constructors
0, // flags
0, // signalCount
0 // eod
};
void CreateData::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a)
{
Q_UNUSED(_o);
Q_UNUSED(_id);
Q_UNUSED(_c);
Q_UNUSED(_a);
}
const QMetaObject CreateData::staticMetaObject = {
{ &QDialog::staticMetaObject, qt_meta_stringdata_CreateData.data,
qt_meta_data_CreateData, qt_static_metacall, Q_NULLPTR, Q_NULLPTR}
};
const QMetaObject *CreateData::metaObject() const
{
return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject;
}
void *CreateData::qt_metacast(const char *_clname)
{
if (!_clname) return Q_NULLPTR;
if (!strcmp(_clname, qt_meta_stringdata_CreateData.stringdata0))
return static_cast<void*>(const_cast< CreateData*>(this));
return QDialog::qt_metacast(_clname);
}
int CreateData::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QDialog::qt_metacall(_c, _id, _a);
if (_id < 0)
return _id;
return _id;
}
QT_WARNING_POP
QT_END_MOC_NAMESPACE
| [
"mihail.lukjanec@gmail.com"
] | mihail.lukjanec@gmail.com |
c6303206455e0c0390cbb348f183b71b81582e38 | 2e676e3b1cebfbb9d20f9b935ceacd507c57d36a | /Octave/octave-4.2.1/include/octave-4.2.1/octave/lo-array-gripes.h | a934fa28d420492cf46ff03251c790bed103aeae | [
"MIT",
"Zlib",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | vohrahul/ML-ang-coursera | 239469e763b290aa178b7aa8a86eda08e4e7f4be | 4c24fd2ecfb9f3de7df15e3a9f75627f782f9915 | refs/heads/master | 2022-12-28T03:45:54.810173 | 2020-10-16T12:33:25 | 2020-10-16T12:33:25 | 304,620,441 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,752 | h | /*
Copyright (C) 2000-2017 John W. Eaton
This file is part of Octave.
Octave is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
Octave is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with Octave; see the file COPYING. If not, see
<http://www.gnu.org/licenses/>.
*/
// FIXME: All gripe_XXX functions deprecated in 4.2. Remove file in 4.6
#if ! defined (octave_lo_array_gripes_h)
#define octave_lo_array_gripes_h 1
#include "octave-config.h"
#include "lo-array-errwarn.h"
#include "dim-vector.h"
#include "quit.h"
OCTAVE_DEPRECATED ("use 'octave::err_nan_to_logical_conversion' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_nan_to_logical_conversion (void);
OCTAVE_DEPRECATED ("use 'octave::err_nan_to_character_conversion' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_nan_to_character_conversion (void);
OCTAVE_DEPRECATED ("use 'octave::err_nonconformant' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_nonconformant (const char *op,
octave_idx_type op1_len,
octave_idx_type op2_len);
OCTAVE_DEPRECATED ("use 'octave::err_nonconformant' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_nonconformant (const char *op,
octave_idx_type op1_nr, octave_idx_type op1_nc,
octave_idx_type op2_nr, octave_idx_type op2_nc);
OCTAVE_DEPRECATED ("use 'octave::err_nonconformant' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_nonconformant (const char *op, const dim_vector& op1_dims,
const dim_vector& op2_dims);
OCTAVE_DEPRECATED ("use 'octave::err_index_out_of_range' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_index_out_of_range (int nd, int dim,
octave_idx_type iext, octave_idx_type ext,
const dim_vector& d);
OCTAVE_DEPRECATED ("use 'octave::err_index_out_of_range' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_index_out_of_range (int nd, int dim,
octave_idx_type iext, octave_idx_type ext);
OCTAVE_DEPRECATED ("use 'octave::err_del_index_out_of_range' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_del_index_out_of_range (bool is1d, octave_idx_type iext,
octave_idx_type ext);
OCTAVE_DEPRECATED ("use 'octave::err_invalid_index' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_invalid_index (double, octave_idx_type nd = 0,
octave_idx_type dim = 0,
const std::string& var = "");
OCTAVE_DEPRECATED ("use 'octave::err_invalid_index' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_invalid_index (octave_idx_type n, octave_idx_type nd = 0,
octave_idx_type dim = 0,
const std::string& var = "");
OCTAVE_DEPRECATED ("use 'octave::err_invalid_index' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_invalid_index (const std::string& idx, octave_idx_type nd = 0,
octave_idx_type dim = 0,
const std::string& var = "");
OCTAVE_DEPRECATED ("use 'octave::err_invalid_resize' instead")
OCTAVE_NORETURN OCTAVE_API extern void
gripe_invalid_resize (void);
OCTAVE_DEPRECATED ("use 'octave::err_singular_matrix' instead")
OCTAVE_API extern void
gripe_singular_matrix (double rcond = 0.0);
#endif
| [
"rvohra91@gmail.com"
] | rvohra91@gmail.com |
44a558aac6fd6019698897640c03d082068f6695 | 11ffb89cf392367646a88844982d0dc06453eb7c | /Homework06/Dragon.h | 3dfdf5399012e7a91655a56afb8a87beb0640eda | [] | no_license | prog1261-2020/homework-atoledanoh | 0114bad2f773fab9191d678e3c1a9feba7e18453 | f600099876e01daede6f39a3bac3a9d68acb4f3c | refs/heads/master | 2020-12-09T22:09:01.933624 | 2020-03-22T23:52:55 | 2020-03-22T23:52:55 | 233,429,881 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 192 | h | #pragma once
#include "Animal.h"
class Dragon : public Animal {
public:
Dragon(std::string n);
int pet();
void speak() const override;
void move() override;
private:
int petted = 0;
};
| [
"55004603+atoledanoh@users.noreply.github.com"
] | 55004603+atoledanoh@users.noreply.github.com |
3f245f098ab21aba54b1f6f2150ba888fdddb01e | 24f26275ffcd9324998d7570ea9fda82578eeb9e | /extensions/browser/content_verifier/content_hash_unittest.cc | 1d560cbe4a012e5c5b3bf2fb4911bfbf6a359e92 | [
"BSD-3-Clause"
] | permissive | Vizionnation/chromenohistory | 70a51193c8538d7b995000a1b2a654e70603040f | 146feeb85985a6835f4b8826ad67be9195455402 | refs/heads/master | 2022-12-15T07:02:54.461083 | 2019-10-25T15:07:06 | 2019-10-25T15:07:06 | 217,557,501 | 2 | 1 | BSD-3-Clause | 2022-11-19T06:53:07 | 2019-10-25T14:58:54 | null | UTF-8 | C++ | false | false | 8,889 | cc | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "extensions/browser/content_verifier/content_hash.h"
#include "base/base64url.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/json/json_writer.h"
#include "crypto/rsa_private_key.h"
#include "crypto/sha2.h"
#include "crypto/signature_creator.h"
#include "extensions/browser/computed_hashes.h"
#include "extensions/browser/content_hash_tree.h"
#include "extensions/browser/content_verifier/test_utils.h"
#include "extensions/browser/content_verifier_delegate.h"
#include "extensions/browser/extension_file_task_runner.h"
#include "extensions/browser/extensions_test.h"
#include "extensions/browser/verified_contents.h"
#include "extensions/common/constants.h"
#include "extensions/common/file_util.h"
#include "extensions/common/value_builder.h"
#include "extensions/test/test_extension_dir.h"
namespace extensions {
// Helper class to create directory with extension files, including signed
// hashes for content verification.
class TestExtensionBuilder {
public:
TestExtensionBuilder()
: test_content_verifier_key_(crypto::RSAPrivateKey::Create(2048)),
// We have to provide explicit extension id in verified_contents.json.
extension_id_(32, 'a') {
base::CreateDirectory(
extension_dir_.UnpackedPath().Append(kMetadataFolder));
}
void WriteManifest() {
extension_dir_.WriteManifest(DictionaryBuilder()
.Set("manifest_version", 2)
.Set("name", "Test extension")
.Set("version", "1.0")
.ToJSON());
}
void WriteResource(base::FilePath::StringType relative_path,
std::string contents) {
extension_dir_.WriteFile(relative_path, contents);
extension_resources_.emplace_back(base::FilePath(std::move(relative_path)),
std::move(contents));
}
void WriteComputedHashes() {
int block_size = extension_misc::kContentVerificationDefaultBlockSize;
ComputedHashes::Writer computed_hashes_writer;
for (const auto& resource : extension_resources_) {
std::vector<std::string> hashes;
ComputedHashes::ComputeHashesForContent(resource.contents, block_size,
&hashes);
computed_hashes_writer.AddHashes(resource.relative_path, block_size,
hashes);
}
ASSERT_TRUE(computed_hashes_writer.WriteToFile(
file_util::GetComputedHashesPath(extension_dir_.UnpackedPath())));
}
void WriteVerifiedContents() {
std::unique_ptr<base::Value> payload = CreateVerifiedContents();
std::string payload_value;
ASSERT_TRUE(base::JSONWriter::Write(*payload, &payload_value));
std::string payload_b64;
base::Base64UrlEncode(
payload_value, base::Base64UrlEncodePolicy::OMIT_PADDING, &payload_b64);
std::string signature_sha256 = crypto::SHA256HashString("." + payload_b64);
std::vector<uint8_t> signature_source(signature_sha256.begin(),
signature_sha256.end());
std::vector<uint8_t> signature_value;
ASSERT_TRUE(crypto::SignatureCreator::Sign(
test_content_verifier_key_.get(), crypto::SignatureCreator::SHA256,
signature_source.data(), signature_source.size(), &signature_value));
std::string signature_b64;
base::Base64UrlEncode(
std::string(signature_value.begin(), signature_value.end()),
base::Base64UrlEncodePolicy::OMIT_PADDING, &signature_b64);
std::unique_ptr<base::Value> signatures =
ListBuilder()
.Append(DictionaryBuilder()
.Set("header",
DictionaryBuilder().Set("kid", "webstore").Build())
.Set("protected", "")
.Set("signature", signature_b64)
.Build())
.Build();
std::unique_ptr<base::Value> verified_contents =
ListBuilder()
.Append(DictionaryBuilder()
.Set("description", "treehash per file")
.Set("signed_content",
DictionaryBuilder()
.Set("payload", payload_b64)
.Set("signatures", std::move(signatures))
.Build())
.Build())
.Build();
std::string json;
ASSERT_TRUE(base::JSONWriter::Write(*verified_contents, &json));
base::FilePath verified_contents_path =
file_util::GetVerifiedContentsPath(extension_dir_.UnpackedPath());
ASSERT_EQ(
static_cast<int>(json.size()),
base::WriteFile(verified_contents_path, json.data(), json.size()));
}
std::vector<uint8_t> GetTestContentVerifierPublicKey() {
std::vector<uint8_t> public_key;
test_content_verifier_key_->ExportPublicKey(&public_key);
return public_key;
}
base::FilePath extension_path() const {
return extension_dir_.UnpackedPath();
}
const ExtensionId& extension_id() const { return extension_id_; }
private:
struct ExtensionResource {
ExtensionResource(base::FilePath relative_path, std::string contents)
: relative_path(std::move(relative_path)),
contents(std::move(contents)) {}
base::FilePath relative_path;
std::string contents;
};
std::unique_ptr<base::Value> CreateVerifiedContents() {
int block_size = extension_misc::kContentVerificationDefaultBlockSize;
ListBuilder files;
for (const auto& resource : extension_resources_) {
base::FilePath::StringType path =
VerifiedContents::NormalizeResourcePath(resource.relative_path);
std::string tree_hash =
ContentHash::ComputeTreeHashForContent(resource.contents, block_size);
std::string tree_hash_b64;
base::Base64UrlEncode(
tree_hash, base::Base64UrlEncodePolicy::OMIT_PADDING, &tree_hash_b64);
files.Append(DictionaryBuilder()
.Set("path", path)
.Set("root_hash", tree_hash_b64)
.Build());
}
return DictionaryBuilder()
.Set("item_id", extension_id_)
.Set("item_version", "1.0")
.Set("content_hashes",
ListBuilder()
.Append(DictionaryBuilder()
.Set("format", "treehash")
.Set("block_size", block_size)
.Set("hash_block_size", block_size)
.Set("files", files.Build())
.Build())
.Build())
.Build();
}
std::unique_ptr<crypto::RSAPrivateKey> test_content_verifier_key_;
ExtensionId extension_id_;
std::vector<ExtensionResource> extension_resources_;
TestExtensionDir extension_dir_;
DISALLOW_COPY_AND_ASSIGN(TestExtensionBuilder);
};
class ContentHashUnittest : public ExtensionsTest {
protected:
ContentHashUnittest() = default;
std::unique_ptr<ContentHashResult> CreateContentHash(
Extension* extension,
ContentVerifierDelegate::VerifierSourceType source_type,
const std::vector<uint8_t>& content_verifier_public_key) {
ContentHash::FetchKey key(
extension->id(), extension->path(), extension->version(),
mojo::NullRemote() /* url_loader_factory_remote */,
GURL() /* fetch_url */, content_verifier_public_key);
return ContentHashWaiter().CreateAndWaitForCallback(std::move(key),
source_type);
}
scoped_refptr<Extension> LoadExtension(const TestExtensionBuilder& builder) {
std::string error;
scoped_refptr<Extension> extension = file_util::LoadExtension(
builder.extension_path(), builder.extension_id(), Manifest::INTERNAL,
0 /* flags */, &error);
if (!extension)
ADD_FAILURE() << " error:'" << error << "'";
return extension;
}
};
TEST_F(ContentHashUnittest, ExtensionWithSignedHashes) {
TestExtensionBuilder builder;
builder.WriteManifest();
builder.WriteResource(FILE_PATH_LITERAL("background.js"),
"console.log('Nothing special');");
builder.WriteVerifiedContents();
scoped_refptr<Extension> extension = LoadExtension(builder);
ASSERT_NE(nullptr, extension);
std::unique_ptr<ContentHashResult> result = CreateContentHash(
extension.get(),
ContentVerifierDelegate::VerifierSourceType::SIGNED_HASHES,
builder.GetTestContentVerifierPublicKey());
DCHECK(result);
EXPECT_TRUE(result->success);
}
} // namespace extensions
| [
"rjkroege@chromium.org"
] | rjkroege@chromium.org |
2518b1d9f829e02d4a6f05dadbb1ffce590166fa | 6d6865725ceec1287dc18a75ed843e6190d58e66 | /String/stringtest.cpp | 2462c6a414ff49ab35e483b1bb5078aaf39af8f9 | [] | no_license | patrickclark9/Esercizi | 0edbdd5e7916f56bd83f2d810e487fd4f07e93e4 | 44b4f2dbd145a788c1d359098d22db6a8085e0ab | refs/heads/master | 2023-03-15T08:44:35.732366 | 2021-03-16T09:24:44 | 2021-03-16T09:24:44 | 346,989,454 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 522 | cpp | #include <string>
#include <iostream>
#include "string.hpp"
int main()
{
eostring n;
std::string s1 = "ciao";
std::string s2 = "buonasera";
std::string s3 = "latte";
std::string s4 = "caffett";
std::string s5 = "carlot";
std::string s6 = "pari";
char c;
n.insertString(s1);
n.insertString(s2);
n.insertString(s3);
n.insertString(s4);
n.insertString(s5);
n.insertString(s6);
n.removeEvenC();
n.nrEven();
n.printOdd();
n.printEven();
return 0;
} | [
"patrickclark@outlook.it"
] | patrickclark@outlook.it |
63b26bbaee1633aa57fc358047709d4daf3f37b1 | d25f34c7c2feedc72bcc6c7af2b624e1c1605eee | /modules/common/vehicle_state/vehicle_state_provider.h | 59c1a10d414d23b567fa9e9f1160927733661acb | [
"Apache-2.0"
] | permissive | wangzhongchuan1973/2020-w1 | aefecada26376f49cc88b652378920647ae3af78 | f9fb019d6b5ebbba4b4e9cf5e98544fa9bc3c7a6 | refs/heads/master | 2022-12-11T14:06:04.564465 | 2020-03-08T07:11:25 | 2020-03-08T07:11:25 | 245,765,118 | 0 | 0 | Apache-2.0 | 2022-12-07T02:18:33 | 2020-03-08T06:37:00 | C++ | UTF-8 | C++ | false | false | 5,447 | h | /******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/**
* @file vehicle_state.h
*
* @brief Declaration of the class VehicleStateProvider.
*/
#ifndef MODULES_COMMON_VEHICLE_STATE_VEHICLE_STATE_PROVIDER_H_
#define MODULES_COMMON_VEHICLE_STATE_VEHICLE_STATE_PROVIDER_H_
#include <memory>
#include <string>
#include "modules/canbus/proto/chassis.pb.h"
#include "modules/common/proto/vehicle_state.pb.h"
#include "modules/localization/proto/localization.pb.h"
#include "modules/common/macro.h"
#include "modules/common/math/box2d.h"
#include "modules/common/math/vec2d.h"
#include "modules/common/status/status.h"
/**
* @namespace apollo::common
* @brief apollo::common
*/
namespace apollo {
namespace common {
/**
* @class VehicleStateProvider
* @brief The class of vehicle state.
* It includes basic information and computation
* about the state of the vehicle.
*/
class VehicleStateProvider {
public:
/**
* @brief Constructor by information of localization and chassis.
* @param localization Localization information of the vehicle.
* @param chassis Chassis information of the vehicle.
*/
Status Update(const localization::LocalizationEstimate& localization,
const canbus::Chassis& chassis);
/**
* @brief Update VehicleStateProvider instance by protobuf files.
* @param localization_file the localization protobuf file.
* @param chassis_file The chassis protobuf file
*/
void Update(const std::string& localization_file,
const std::string& chassis_file);
double timestamp() const;
const localization::Pose& pose() const;
/**
* @brief Default destructor.
*/
virtual ~VehicleStateProvider() = default;
/**
* @brief Get the x-coordinate of vehicle position.
* @return The x-coordinate of vehicle position.
*/
double x() const;
/**
* @brief Get the y-coordinate of vehicle position.
* @return The y-coordinate of vehicle position.
*/
double y() const;
/**
* @brief Get the z coordinate of vehicle position.
* @return The z coordinate of vehicle position.
*/
double z() const;
double kappa() const;
/**
* @brief Get the vehicle roll angle.
* @return The euler roll angle.
*/
double roll() const;
/**
* @brief Get the vehicle pitch angle.
* @return The euler pitch angle.
*/
double pitch() const;
/**
* @brief Get the vehicle yaw angle.
* As of now, use the heading instead of yaw angle.
* Heading angle with East as zero, yaw angle has North as zero
* @return The euler yaw angle.
*/
double yaw() const;
/**
* @brief Get the heading of vehicle position, which is the angle
* between the vehicle's heading direction and the x-axis.
* @return The angle between the vehicle's heading direction
* and the x-axis.
*/
double heading() const;
/**
* @brief Get the vehicle's linear velocity.
* @return The vehicle's linear velocity.
*/
double linear_velocity() const;
/**
* @brief Get the vehicle's angular velocity.
* @return The vehicle's angular velocity.
*/
double angular_velocity() const;
/**
* @brief Get the vehicle's linear acceleration.
* @return The vehicle's linear acceleration.
*/
double linear_acceleration() const;
/**
* @brief Get the vehicle's gear position.
* @return The vehicle's gear position.
*/
double gear() const;
/**
* @brief Set the vehicle's linear velocity.
* @param linear_velocity The value to set the vehicle's linear velocity.
*/
void set_linear_velocity(const double linear_velocity);
/**
* @brief Estimate future position from current position and heading,
* along a period of time, by constant linear velocity,
* linear acceleration, angular velocity.
* @param t The length of time period.
* @return The estimated future position in time t.
*/
math::Vec2d EstimateFuturePosition(const double t) const;
/**
* @brief Compute the position of center of mass(COM) of the vehicle,
* given the distance from rear wheels to the center of mass.
* @param rear_to_com_distance Distance from rear wheels to
* the vehicle's center of mass.
* @return The position of the vehicle's center of mass.
*/
math::Vec2d ComputeCOMPosition(const double rear_to_com_distance) const;
const VehicleState& vehicle_state() const;
private:
bool ConstructExceptLinearVelocity(
const localization::LocalizationEstimate& localization);
common::VehicleState vehicle_state_;
DECLARE_SINGLETON(VehicleStateProvider);
};
} // namespace common
} // namespace apollo
#endif // MODULES_COMMON_VEHICLE_STATE_VEHICLE_STATE_PROVIDER_H_
| [
"wzc1973@163.com"
] | wzc1973@163.com |
cb0e7e99d3af9f27c262008c880d91b5bbdcbac8 | 0ed3ff478ea539c975438027f91737b2f0995736 | /garli/tags/0.952b2/src/utility.h | 1d7bc2c16fcbc54318e7c2c4c30b9994c51d19f9 | [] | no_license | zwickl/garli | a275d6fad15c0d3e5d12fe6821efbfff1e1c7520 | 65fb3e7e967109d1c0a3a9b3fa5eeb0df89285ff | refs/heads/master | 2022-09-24T16:39:32.643748 | 2022-09-15T23:11:54 | 2022-09-15T23:11:54 | 39,407,170 | 4 | 3 | null | null | null | null | UTF-8 | C++ | false | false | 5,705 | h | #ifndef GAML_UTIL_HPP
#define GAML_UTIL_HPP
// code from MTH for allocating flattened matrices
#include "memchk.h"
#include <stdlib.h>
#include <cassert>
#define DBL_ALIGN 32
template<typename T> T ***New3DArray(unsigned f , unsigned s , unsigned t);
template<typename T> T **New2DArray(unsigned f , unsigned s);
template<typename T> void Delete3DArray (T ***temp);
template<typename T> void Delete2DArray (T **temp);
//aligned versions
template<typename T> T ***New3DAlignedArray(unsigned f , unsigned s , unsigned t, unsigned a);
template<typename T> T **New2DAlignedArray(unsigned f , unsigned s, unsigned a);
template<typename T> void Delete3DAlignedArray (T ***temp);
template<typename T> void Delete2DAlignedArray (T **temp);
template<typename T> T *NewAlignedArray(unsigned len, unsigned align ){
#ifdef _MSC_VER
return (T*) _aligned_malloc(sizeof(T)*len, align);
#endif
}
template<typename T> void DeleteAlignedArray(T *a){
#ifdef _MSC_VER
_aligned_free(a);
#endif
}
/*--------------------------------------------------------------------------------------------------------------------------
| Allocates a three dimensional array of FLOAT_TYPEs as one contiguous block of memory
| the dimensions are f two dimensional arrays that are s by t.
| the array is set up so that
| for(i = 0 ; i < f ; i++)
| for (j = 0 ; j < s ; j++)
| for (k = 0 ; k < t; k++)
| array[i][j][k];
| would be the same order of access as:
|
| T *temp = **array;
| for (i = 0 ; i < f*s*t ; i++)
| {
| *temp++;
| }
*/
template<typename T> T ***New3DArray(unsigned f , unsigned s , unsigned t)
{
assert(f > 0 && s > 0 && t> 0);
T ***temp;
temp = new T **[f];
*temp = new T *[f * s];
**temp = new T[f * s * t];
for (unsigned sIt = 1 ; sIt < s ; sIt++)
temp[0][sIt] = temp[0][sIt-1] + t ;
for (unsigned fIt = 1 ; fIt < f ; fIt ++)
{
temp[fIt] = temp[fIt -1] + s ;
temp[fIt][0] = temp[fIt -1][0] + (s*t);
for (unsigned sIt = 1 ; sIt < s ; sIt++)
temp[fIt][sIt] = temp[fIt][sIt-1] + t ;
}
return temp;
}
/*--------------------------------------------------------------------------------------------------------------------------
| Delete a Three Dimensional Array that has been allocated using New3DArray
*/
template<typename T> void Delete3DArray (T ***temp)
{
assert(temp); //these asserts aren't necessary, but right now I can't think of a case in which they'd fail other than following an allocation error
assert(*temp);
assert(**temp);
if (temp)
{
if (*temp)
{
if (**temp)
delete [] **temp;
delete [] * temp;
}
delete [] temp;
}
}
/*--------------------------------------------------------------------------------------------------------------------------
| Allocates a two dimensional array of FLOAT_TYPEs as one contiguous block of memory
| the dimensions are f by s.
| the array is set up so that
|
| for(i = 0 ; i < f ; i++)
| for (j = 0 ; j < s ; j++)
| array[i][j];
|
| would be the same order of access as:
|
| T *temp = **array;
| for (i = 0 ; i < f*s*t ; i++)
| *temp++;
*/
template<typename T> T **New2DArray(unsigned f , unsigned s)
{
assert(f > 0 && s > 0);
T **temp;
temp = new T *[f];
*temp = new T [f * s];
for (unsigned fIt = 1 ; fIt < f ; fIt ++)
temp[fIt] = temp[fIt -1] + s ;
return temp;
}
/*--------------------------------------------------------------------------------------------------------------------------
| Delete a 2 Dimensional Array New2DArray
*/
template<typename T> inline void Delete2DArray (T **temp)
{
assert(temp); //these asserts aren't necessary, but right now I can't think of a case in which they'd fail other than following an allocation error
assert(*temp);
if (temp)
{
if (*temp)
delete [] * temp;
delete [] temp;
}
}
//aligned version
template<typename T> T ***New3DAlignedArray(unsigned f , unsigned s , unsigned t)
{
assert(f > 0 && s > 0 && t> 0);
T ***temp;
temp = new T **[f];
*temp = new T *[f * s];
**temp = new T[f * s * t];
**temp = NewAlignedArray<T>(f * s * t, DBL_ALIGN);
for (unsigned sIt = 1 ; sIt < s ; sIt++)
temp[0][sIt] = temp[0][sIt-1] + t ;
for (unsigned fIt = 1 ; fIt < f ; fIt ++)
{
temp[fIt] = temp[fIt -1] + s ;
temp[fIt][0] = temp[fIt -1][0] + (s*t);
for (unsigned sIt = 1 ; sIt < s ; sIt++)
temp[fIt][sIt] = temp[fIt][sIt-1] + t ;
}
return temp;
}
/*--------------------------------------------------------------------------------------------------------------------------
| Delete a Three Dimensional Array that has been allocated using New3DArray
*/
template<typename T> void Delete3DAlignedArray (T ***temp)
{
assert(temp); //these asserts aren't necessary, but right now I can't think of a case in which they'd fail other than following an allocation error
assert(*temp);
assert(**temp);
if (temp)
{
if (*temp)
{
if (**temp)
DeleteAlignedArray(**temp);
delete [] * temp;
}
delete [] temp;
}
}
template<typename T> T **New2DAlignedArray(unsigned f , unsigned s)
{
assert(f > 0 && s > 0);
T **temp;
temp = new T *[f];
*temp = NewAlignedArray<T>(f * s, DBL_ALIGN);
for (unsigned fIt = 1 ; fIt < f ; fIt ++)
temp[fIt] = temp[fIt -1] + s ;
return temp;
}
/*--------------------------------------------------------------------------------------------------------------------------
| Delete a 2 Dimensional Array New2DArray
*/
template<typename T> inline void Delete2DAlignedArray (T **temp)
{
assert(temp); //these asserts aren't necessary, but right now I can't think of a case in which they'd fail other than following an allocation error
assert(*temp);
if (temp)
{
if (*temp)
DeleteAlignedArray(*temp);
delete [] temp;
}
}
#endif //
| [
"zwickl@f68b6e6a-deb8-11de-9f73-7335da764322"
] | zwickl@f68b6e6a-deb8-11de-9f73-7335da764322 |
643f7ae71c761d9173ed5494783e46541626109a | 801f7ed77fb05b1a19df738ad7903c3e3b302692 | /refactoringOptimisation/differentiatedCAD/occt-min-topo-src/src/TDataStd/TDataStd_TreeNode.hxx | f9459a4fa828f4abfdf116479a5225844642d901 | [] | no_license | salvAuri/optimisationRefactoring | 9507bdb837cabe10099d9481bb10a7e65331aa9d | e39e19da548cb5b9c0885753fe2e3a306632d2ba | refs/heads/master | 2021-01-20T03:47:54.825311 | 2017-04-27T11:31:24 | 2017-04-27T11:31:24 | 89,588,404 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 9,500 | hxx | // Created on: 1999-06-10
// Created by: Vladislav ROMASHKO
// Copyright (c) 1999 Matra Datavision
// Copyright (c) 1999-2014 OPEN CASCADE SAS
//
// This file is part of Open CASCADE Technology software library.
//
// This library is free software; you can redistribute it and/or modify it under
// the terms of the GNU Lesser General Public License version 2.1 as published
// by the Free Software Foundation, with special exception defined in the file
// OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
// distribution for complete text of the license and disclaimer of any warranty.
//
// Alternatively, this file may be used under the terms of Open CASCADE
// commercial license or contractual agreement.
#ifndef _TDataStd_TreeNode_HeaderFile
#define _TDataStd_TreeNode_HeaderFile
#include <Standard.hxx>
#include <Standard_Type.hxx>
#include <TDataStd_PtrTreeNode.hxx>
#include <Standard_GUID.hxx>
#include <TDF_Attribute.hxx>
#include <Standard_Boolean.hxx>
#include <Standard_Integer.hxx>
#include <Standard_OStream.hxx>
class TDataStd_ChildNodeIterator;
class TDF_Label;
class Standard_GUID;
class TDF_AttributeDelta;
class TDF_Attribute;
class TDF_RelocationTable;
class TDF_DataSet;
class TDataStd_TreeNode;
DEFINE_STANDARD_HANDLE(TDataStd_TreeNode, TDF_Attribute)
//! Allows you to define an explicit tree of labels
//! which you can also edit.
//! Without this class, the data structure cannot be fully edited.
//! This service is required if for presentation
//! purposes, you want to create an application with
//! a tree which allows you to organize and link data
//! as a function of application features.
class TDataStd_TreeNode : public TDF_Attribute
{
public:
//! class methods working on the node
//! ===================================
//! Returns true if the tree node T is found on the label L.
//! Otherwise, false is returned.
Standard_EXPORT static Standard_Boolean Find (const TDF_Label& L, Handle(TDataStd_TreeNode)& T);
//! Finds or Creates a TreeNode attribute on the label <L>
//! with the default tree ID, returned by the method
//! <GetDefaultTreeID>. Returns the created/found TreeNode
//! attribute.
Standard_EXPORT static Handle(TDataStd_TreeNode) Set (const TDF_Label& L);
//! Finds or Creates a TreeNode attribute on the label
//! <L>, with an explicit tree ID. <ExplicitTreeID> is
//! the ID returned by <TDF_Attribute::ID> method.
//! Returns the found/created TreeNode attribute.
Standard_EXPORT static Handle(TDataStd_TreeNode) Set (const TDF_Label& L, const Standard_GUID& ExplicitTreeID);
//! returns a default tree ID. this ID is used by the
//! <Set> method without explicit tree ID.
//! Instance methods:
//! ================
Standard_EXPORT static const Standard_GUID& GetDefaultTreeID();
Standard_EXPORT TDataStd_TreeNode();
//! Insert the TreeNode <Child> as last child of <me>. If
//! the insertion is successful <me> becomes the Father of <Child>.
Standard_EXPORT Standard_Boolean Append (const Handle(TDataStd_TreeNode)& Child);
//! Insert the the TreeNode <Child> as first child of
//! <me>. If the insertion is successful <me> becomes the Father of <Child>
Standard_EXPORT Standard_Boolean Prepend (const Handle(TDataStd_TreeNode)& Child);
//! Inserts the TreeNode <Node> before <me>. If insertion is successful <me>
//! and <Node> belongs to the same Father.
Standard_EXPORT Standard_Boolean InsertBefore (const Handle(TDataStd_TreeNode)& Node);
//! Inserts the TreeNode <Node> after <me>. If insertion is successful <me>
//! and <Node> belongs to the same Father.
Standard_EXPORT Standard_Boolean InsertAfter (const Handle(TDataStd_TreeNode)& Node);
//! Removes this tree node attribute from its father
//! node. The result is that this attribute becomes a root node.
Standard_EXPORT Standard_Boolean Remove();
//! Returns the depth of this tree node in the overall tree node structure.
//! In other words, the number of father tree nodes of this one is returned.
Standard_EXPORT Standard_Integer Depth() const;
//! Returns the number of child nodes.
//! If <allLevels> is true, the method counts children of all levels
//! (children of children ...)
Standard_EXPORT Standard_Integer NbChildren (const Standard_Boolean allLevels = Standard_False) const;
//! Returns true if this tree node attribute is an
//! ascendant of of. In other words, if it is a father or
//! the father of a father of of.
Standard_EXPORT Standard_Boolean IsAscendant (const Handle(TDataStd_TreeNode)& of) const;
//! Returns true if this tree node attribute is a
//! descendant of of. In other words, if it is a child or
//! the child of a child of of.
Standard_EXPORT Standard_Boolean IsDescendant (const Handle(TDataStd_TreeNode)& of) const;
//! Returns true if this tree node attribute is the
//! ultimate father in the tree.
Standard_EXPORT Standard_Boolean IsRoot() const;
//! Returns the ultimate father of this tree node attribute.
Standard_EXPORT Handle(TDataStd_TreeNode) Root() const;
//! Returns true if this tree node attribute is a father of of.
Standard_EXPORT Standard_Boolean IsFather (const Handle(TDataStd_TreeNode)& of) const;
//! Returns true if this tree node attribute is a child of of.
Standard_EXPORT Standard_Boolean IsChild (const Handle(TDataStd_TreeNode)& of) const;
//! Returns true if this tree node attribute has a father tree node.
Standard_Boolean HasFather() const;
//! Returns the father TreeNode of <me>. Null if root.
Standard_EXPORT Handle(TDataStd_TreeNode) Father() const;
//! Returns true if this tree node attribute has a next tree node.
Standard_Boolean HasNext() const;
//! Returns the next tree node in this tree node attribute.
//! Warning
//! This tree node is null if it is the last one in this
//! tree node attribute.Returns the next TreeNode of <me>. Null if last.
Standard_EXPORT Handle(TDataStd_TreeNode) Next() const;
//! Returns true if this tree node attribute has a previous tree node.
Standard_Boolean HasPrevious() const;
//! Returns the previous tree node of this tree node attribute.
//! Warning
//! This tree node is null if it is the first one in this tree node attribute.
Standard_EXPORT Handle(TDataStd_TreeNode) Previous() const;
//! Returns true if this tree node attribute has a first child tree node.
Standard_Boolean HasFirst() const;
//! Returns the first child tree node in this tree node object.
Standard_EXPORT Handle(TDataStd_TreeNode) First() const;
//! Returns true if this tree node attribute has a last child tree node.
Standard_Boolean HasLast() const;
//! Returns the last child tree node in this tree node object.
Standard_EXPORT Handle(TDataStd_TreeNode) Last();
//! Returns the last child tree node in this tree node object.
//! to set fields
//! =============
Standard_EXPORT Handle(TDataStd_TreeNode) FindLast();
Standard_EXPORT void SetTreeID (const Standard_GUID& explicitID);
Standard_EXPORT void SetFather (const Handle(TDataStd_TreeNode)& F);
Standard_EXPORT void SetNext (const Handle(TDataStd_TreeNode)& F);
Standard_EXPORT void SetPrevious (const Handle(TDataStd_TreeNode)& F);
Standard_EXPORT void SetFirst (const Handle(TDataStd_TreeNode)& F);
//! TreeNode callback:
//! ==================
Standard_EXPORT void SetLast (const Handle(TDataStd_TreeNode)& F);
//! Connect the TreeNode to its father child list
Standard_EXPORT virtual void AfterAddition() Standard_OVERRIDE;
//! Disconnect the TreeNode from its Father child list
Standard_EXPORT virtual void BeforeForget() Standard_OVERRIDE;
//! Reconnect the TreeNode to its father child list.
Standard_EXPORT virtual void AfterResume() Standard_OVERRIDE;
//! Disconnect the TreeNode, if necessary.
Standard_EXPORT virtual Standard_Boolean BeforeUndo (const Handle(TDF_AttributeDelta)& anAttDelta, const Standard_Boolean forceIt = Standard_False) Standard_OVERRIDE;
//! Reconnect the TreeNode, if necessary.
//! Implementation of Attribute methods:
//! ===================================
Standard_EXPORT virtual Standard_Boolean AfterUndo (const Handle(TDF_AttributeDelta)& anAttDelta, const Standard_Boolean forceIt = Standard_False) Standard_OVERRIDE;
//! Returns the tree ID (default or explicit one depending
//! onthe Set method used).
Standard_EXPORT const Standard_GUID& ID() const;
Standard_EXPORT virtual void Restore (const Handle(TDF_Attribute)& with);
Standard_EXPORT virtual void Paste (const Handle(TDF_Attribute)& into, const Handle(TDF_RelocationTable)& RT) const;
Standard_EXPORT virtual Handle(TDF_Attribute) NewEmpty() const Standard_OVERRIDE;
Standard_EXPORT virtual void References (const Handle(TDF_DataSet)& aDataSet) const Standard_OVERRIDE;
Standard_EXPORT virtual Standard_OStream& Dump (Standard_OStream& anOS) const Standard_OVERRIDE;
friend class TDataStd_ChildNodeIterator;
DEFINE_STANDARD_RTTI(TDataStd_TreeNode,TDF_Attribute)
protected:
private:
TDataStd_PtrTreeNode myFather;
TDataStd_PtrTreeNode myPrevious;
TDataStd_PtrTreeNode myNext;
TDataStd_PtrTreeNode myFirst;
TDataStd_PtrTreeNode myLast;
Standard_GUID myTreeID;
};
#include <TDataStd_TreeNode.lxx>
#endif // _TDataStd_TreeNode_HeaderFile
| [
"salvatore.auriemma@opencascade.com"
] | salvatore.auriemma@opencascade.com |
45cfd7784c06345055942f91af91aa16cea6e46b | f40b6f8755c776d617fd1abe2aa8d8f9f748fde6 | /src/GameBoard/Card.cpp | 69330efe944b00df108314f1b121f3401c116052 | [] | no_license | Harian-Elyoth/Smoke | 4c8668a011c0b34e2f0c0951dd27f3b90994581b | 4a67ef39eace21b92a25287d32e636fbd68bcb7e | refs/heads/master | 2020-09-11T15:51:45.982786 | 2019-12-13T13:13:49 | 2019-12-13T13:13:49 | 222,116,545 | 0 | 0 | null | 2019-11-20T15:54:17 | 2019-11-16T15:10:04 | C++ | UTF-8 | C++ | false | false | 961 | cpp | #include "Card.h"
Card::Card () {
initAttributes();
}
Card::Card (int c, std::string n, int id, std::string tr, std::string ty):
cost(c), name(n), owner(id), tribe(tr), type(ty){}
Card::~Card () { }
std::ostream& operator<<(std::ostream& os, const Card& c){
os << c.cost << " | " << c.name << std::endl;
return os;
}
bool Card::operator!=(const Card& c){
if(cost != c.cost) return true;
if(name != c.name) return true;
if(owner != c.owner) return true;
if(tribe != c.tribe) return true;
if(type != c.type) return true;
return false;
}
bool Card::operator==(const Card& c){
if(cost != c.cost) return false;
if(name != c.name) return false;
if(owner != c.owner) return false;
if(tribe != c.tribe) return false;
if(type != c.type) return false;
return true;
}
void Card::initAttributes () {
cost = 0;
name = "ERROR";
owner = -1;
tribe = "ERROR";
type = "ERROR";
return;
}
| [
"axel@arctique.fr"
] | axel@arctique.fr |
d56a62d3c018b034023c6d986d2aebf0ce791abd | 30e1dc84fe8c54d26ef4a1aff000a83af6f612be | /src/external/boost/boost_1_68_0/tools/bcp/copy_path.cpp | c65c1b3b5e72ed56916436a23ac4020c84b3099b | [
"BSL-1.0",
"LicenseRef-scancode-other-permissive",
"Zlib",
"Spencer-86",
"LicenseRef-scancode-mit-old-style",
"Jam",
"GPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-bison-exception-2.0",
"LicenseRef-scancode-stlport-4.5",
"LicenseRef-scancode-public-domain",
... | permissive | Sitispeaks/turicreate | 0bda7c21ee97f5ae7dc09502f6a72abcb729536d | d42280b16cb466a608e7e723d8edfbe5977253b6 | refs/heads/main | 2023-05-19T17:55:21.938724 | 2021-06-14T17:53:17 | 2021-06-14T17:53:17 | 385,034,849 | 1 | 0 | BSD-3-Clause | 2021-07-11T19:23:21 | 2021-07-11T19:23:20 | null | UTF-8 | C++ | false | false | 7,871 | cpp | /*
*
* Copyright (c) 2003 Dr John Maddock
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying file
* LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
* This file implements the following:
* void bcp_implementation::copy_path(const fs::path& p)
* void bcp_implementation::create_path(const fs::path& p)
*/
#include "bcp_imp.hpp"
#include "fileview.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/regex.hpp>
#include <fstream>
#include <iterator>
#include <algorithm>
#include <iostream>
struct get_new_library_name
{
get_new_library_name(const std::string& n) : m_new_name(n) {}
template <class I>
std::string operator()(const boost::match_results<I>& what)
{
std::string s = what[0];
std::string::size_type n = s.find("boost");
if(n == std::string::npos)
{
s.insert(0, m_new_name);
}
else
{
s.replace(n, 5, m_new_name);
}
return s;
}
private:
std::string m_new_name;
};
void bcp_implementation::copy_path(const fs::path& p)
{
assert(!fs::is_directory(m_boost_path / p));
if(fs::exists(m_dest_path / p))
{
std::cout << "Copying (and overwriting) file: " << p.string() << "\n";
fs::remove(m_dest_path / p);
}
else
std::cout << "Copying file: " << p.string() << "\n";
//
// create the path to the new file if it doesn't already exist:
//
create_path(p.branch_path());
//
// do text based copy if requested:
//
if((p.leaf() == "Jamroot") && m_namespace_name.size())
{
static std::vector<char> v1, v2;
v1.clear();
v2.clear();
boost::filesystem::ifstream is((m_boost_path / p));
std::copy(std::istreambuf_iterator<char>(is), std::istreambuf_iterator<char>(), std::back_inserter(v1));
static boost::regex libname_matcher;
if(libname_matcher.empty())
{
libname_matcher.assign("boost_");
}
regex_replace(std::back_inserter(v2), v1.begin(), v1.end(), libname_matcher, m_namespace_name + "_");
std::swap(v1, v2);
v2.clear();
boost::filesystem::ofstream os;
if(m_unix_lines)
os.open((m_dest_path / p), std::ios_base::binary | std::ios_base::out);
else
os.open((m_dest_path / p), std::ios_base::out);
os.write(&*v1.begin(), v1.size());
os.close();
}
else if(m_namespace_name.size() && m_lib_names.size() && is_jam_file(p))
{
static std::vector<char> v1, v2;
v1.clear();
v2.clear();
boost::filesystem::ifstream is((m_boost_path / p));
std::copy(std::istreambuf_iterator<char>(is), std::istreambuf_iterator<char>(), std::back_inserter(v1));
static boost::regex libname_matcher;
if(libname_matcher.empty())
{
std::string re = "\\<";
re += *m_lib_names.begin();
for(std::set<std::string>::const_iterator i = ++m_lib_names.begin(); i != m_lib_names.end(); ++i)
{
re += "|" + *i;
}
re += "\\>";
libname_matcher.assign(re);
}
regex_replace(std::back_inserter(v2), v1.begin(), v1.end(), libname_matcher, get_new_library_name(m_namespace_name));
std::swap(v1, v2);
v2.clear();
boost::filesystem::ofstream os;
if(m_unix_lines)
os.open((m_dest_path / p), std::ios_base::binary | std::ios_base::out);
else
os.open((m_dest_path / p), std::ios_base::out);
os.write(&*v1.begin(), v1.size());
os.close();
}
else if(m_namespace_name.size() && is_source_file(p))
{
//
// v1 hold the current content, v2 is temp buffer.
// Each time we do a search and replace the new content
// ends up in v2: we then swap v1 and v2, and clear v2.
//
static std::vector<char> v1, v2;
v1.clear();
v2.clear();
boost::filesystem::ifstream is((m_boost_path / p));
std::copy(std::istreambuf_iterator<char>(is), std::istreambuf_iterator<char>(), std::back_inserter(v1));
static const boost::regex namespace_matcher(
"(?|"
"(namespace\\s+)boost(_\\w+)?(?:(\\s*::\\s*)phoenix)?"
"|"
"(namespace\\s+(?:detail::)?)(adstl|phoenix|rapidxml)\\>"
"|"
"()\\<boost((?:_(?!intrusive_tags)\\w+)?\\s*(?:::))(?:(\\s*)phoenix)?"
"|"
"()\\<((?:adstl|phoenix|rapidxml)\\s*(?:::))"
"|"
"(namespace\\s+\\w+\\s*=\\s*(?:::\\s*)?)boost(_\\w+)?(?:(\\s*::\\s*)phoenix)?"
"|"
"(namespace\\s+\\w+\\s*=\\s*(?:::\\s*)?(?:\\w+\\s*::\\s*)?)(adstl|phoenix|rapidxml)\\>"
"|"
"(^\\s*#\\s*define\\s+\\w+\\s+)boost((?:_\\w+)?\\s*)$"
"|"
"(^\\s*#\\s*define[^\\n]+)((?:adstl|phoenix|rapidxml)\\s*)$"
"|"
"()boost(_asio_detail_posix_thread_function|_regex_free_static_mutex)"
"|"
"()\\<(lw_thread_routine|at_thread_exit|on_process_enter|on_process_exit|on_thread_enter|on_thread_exit|tss_cleanup_implemented)\\>"
"|"
"(BOOST_CLASS_REQUIRE4?[^;]*)boost((?:_\\w+)?\\s*,)"
"|"
"(::tr1::|TR1_DECL\\s+)boost(_\\w+\\s*)" // math tr1
"|"
"(\\(\\s*)boost(\\s*\\))\\s*(\\(\\s*)phoenix(\\s*\\))"
"|"
"(\\(\\s*)boost(\\s*\\))"
")"
);
regex_replace(std::back_inserter(v2), v1.begin(), v1.end(), namespace_matcher, "$1" + m_namespace_name + "$2(?3$3" + m_namespace_name + "phoenix?4$4)", boost::regex_constants::format_all);
std::swap(v1, v2);
v2.clear();
if(m_namespace_alias)
{
static const boost::regex namespace_alias(
/*
"namespace\\s+" + m_namespace_name +
"\\s*"
"("
"\\{"
"(?:"
"(?>[^\\{\\}/]+)"
"(?>"
"(?:"
"(?1)"
"|//[^\\n]+$"
"|/[^/]"
"|(?:^\\s*#[^\\n]*"
"(?:(?<=\\\\)\\n[^\\n]*)*)"
")"
"[^\\{\\}]+"
")*"
")*"
"\\}"
")"
*/
/*
"(namespace\\s+" + m_namespace_name +
"\\s*\\{.*"
"\\})([^\\{\\};]*)\\z"
*/
"(namespace)(\\s+)(" + m_namespace_name + ")"
"(adstl|phoenix|rapidxml)?(\\s*\\{)"
);
regex_replace(std::back_inserter(v2), v1.begin(), v1.end(), namespace_alias,
"$1 $3$4 {} $1 (?4$4:boost) = $3$4; $1$2$3$4$5", boost::regex_constants::format_all);
std::swap(v1, v2);
v2.clear();
}
boost::filesystem::ofstream os;
if(m_unix_lines)
os.open((m_dest_path / p), std::ios_base::binary | std::ios_base::out);
else
os.open((m_dest_path / p), std::ios_base::out);
if(v1.size())
os.write(&*v1.begin(), v1.size());
os.close();
}
else if(m_unix_lines && !is_binary_file(p))
{
boost::filesystem::ifstream is((m_boost_path / p));
std::istreambuf_iterator<char> isi(is);
std::istreambuf_iterator<char> end;
boost::filesystem::ofstream os((m_dest_path / p), std::ios_base::binary | std::ios_base::out);
std::ostreambuf_iterator<char> osi(os);
std::copy(isi, end, osi);
}
else
{
// binary copy:
fs::copy_file(m_boost_path / p, m_dest_path / p);
}
}
void bcp_implementation::create_path(const fs::path& p)
{
if(!fs::exists(m_dest_path / p))
{
// recurse then create the path:
create_path(p.branch_path());
fs::create_directory(m_dest_path / p);
}
}
| [
"znation@apple.com"
] | znation@apple.com |
a38a4a156ff1a5f78bfd385dd1174b05c6205593 | a4a018a69e15e2edd43d1c4331b611c46f7a9277 | /CodeChef/FCTRL.cpp | 9657dce29ac9565a8daf5a5f1229e02ad7cb84ee | [] | no_license | sanchitkum/algorithmic-solutions | 207765580d02869cd59ec577db8662a6752a40e2 | b1eebee2ca3a012caf5f3cb6aa5cd799113218e8 | refs/heads/master | 2016-09-05T17:17:55.031903 | 2016-03-01T02:10:18 | 2016-03-01T02:10:18 | 40,711,722 | 11 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 231 | cpp | #include<stdio.h>
int main()
{
int n,i,cnt;
long unsigned no,div;
scanf("%d",&n);
for(i=1;i<=n;i++)
{
cnt=0;
scanf("%lu",&no);
for(div=5;div<=no;div*=5)
cnt=cnt+(int)(no/div);
printf("%lu\n",cnt);
}
return 0;
} | [
"sanchitkum@gmail.com"
] | sanchitkum@gmail.com |
5643a447e254d4fecdd84417c2204a63d79677c1 | 33cb74df7b3fa922a18453a01cf1040e9b0f40bb | /include/stxmodel/GstmtInvokeSub.h | 0d6ff19441867a425a8d43d3251393451cafda38 | [] | no_license | OPRoS/TaskEngine | 8f9f7a65ac38127a796f0f258aa7f95644937d5f | cfc703a23f31865220a9f66e82188fc05d14d9a9 | refs/heads/master | 2021-01-21T23:33:51.611221 | 2013-08-27T05:39:16 | 2013-08-27T05:39:16 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | C++ | false | false | 1,163 | h | /*************************************************************************************************
** Project: OPRoS
** Package: OPRoS Task Executor
** Date: 2010.10.30
** Author: Rockwon Kim (rwkim@etri.re.kr)
** Copyright: Copyright (C) 2010 ETRI
** License: OPRoS Source Codes License (www.opros.or.kr)
*************************************************************************************************/
#pragma once
#include <iostream>
#include "stxmodel/Gstmt.h"
#include "stxmodel/Gtoken.h"
#include "stxmodel/GstmtCall.h"
class GstmtInvokeSub : public Gstmt
{
private:
Gcall* m_behavior; //goto behavior, connector
Gtoken* m_nextTarget; //next target
Gtoken* m_boolVar;
int m_period; //run, withÀÇ ¼Ó¼º
public:
GstmtInvokeSub(std::string file, int eol, std::string blockname, std::string blockpath);
virtual ~GstmtInvokeSub(void);
char* toString(){return NULL;};
void print(unsigned int);
void setInvokeCall(Gcall*);
void setNextTarget(Gtoken*);
void setBoolVar(Gtoken*);
Gcall* getInvokeCall();
Gtoken* getNextTarget();
Gtoken* getBoolVar();
void setPeriod(int period);
int getPeriod();
};
| [
"yudonguk@naver.com"
] | yudonguk@naver.com |
2f75e2add20d9b66f0fe3b600aef1c82580cbde6 | c91e272cfd99013ef9d56acfa95f5aed39d5c943 | /codes-20180817T022617Z-001/ARM_codes/LEDS/led2/led2.cp | 2927ab07c5d6713a2130907580671b4a7f7c74a6 | [] | no_license | KhaledAbdelgalil/ITI-internship | 334a4d26e686f107abaaf7a002bfaf3a757f445a | 8393d2584948647914fa958d483e59f1ef41cdba | refs/heads/master | 2021-10-23T00:55:19.068152 | 2019-03-14T01:22:05 | 2019-03-14T01:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 566 | cp | #line 1 "C:/Users/pc2/Desktop/embded/microCforArm/led2/led2.c"
void main() {
GPIO_Digital_Output(&GPIOA_ODR,_GPIO_PINMASK_ALL);
GPIO_Digital_Output(&GPIOB_ODR,_GPIO_PINMASK_ALL);
GPIO_Digital_Output(&GPIOC_ODR,_GPIO_PINMASK_ALL);
GPIO_Digital_Output(&GPIOD_ODR,_GPIO_PINMASK_ALL);
GPIO_Digital_Output(&GPIOE_ODR,_GPIO_PINMASK_ALL);
GPIOA_ODR = 0xFFFF;
GPIOB_ODR = 0xFFFF;
GPIOC_ODR = 0xFFFF;
GPIOD_ODR = 0xFFFF;
GPIOE_ODR = 0xFFFF;
while(1)
{
GPIOA_ODR~ = 0xFFFF;
GPIOB_ODR ~= 0xFFFF;
GPIOC_ODR ~= 0xFFFF;
GPIOD_ODR ~= 0xFFFF;
GPIOE_ODR ~= 0xFFFF;
Delay_ms(500);
}
| [
"khaled.abdelgalil96@gmail.com"
] | khaled.abdelgalil96@gmail.com |
6960861ae1ecef55a4b0ec18b60e4fbe1f49d3b2 | 46f53e9a564192eed2f40dc927af6448f8608d13 | /content/renderer/pepper/plugin_power_saver_helper_browsertest.cc | 7444d1cc45c7c10fa0f6276dede62472832c8a63 | [
"BSD-3-Clause"
] | permissive | sgraham/nope | deb2d106a090d71ae882ac1e32e7c371f42eaca9 | f974e0c234388a330aab71a3e5bbf33c4dcfc33c | refs/heads/master | 2022-12-21T01:44:15.776329 | 2015-03-23T17:25:47 | 2015-03-23T17:25:47 | 32,344,868 | 2 | 2 | null | null | null | null | UTF-8 | C++ | false | false | 6,280 | cc | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/run_loop.h"
#include "content/common/frame_messages.h"
#include "content/common/view_message_enums.h"
#include "content/public/common/content_constants.h"
#include "content/public/test/render_view_test.h"
#include "content/renderer/pepper/plugin_power_saver_helper.h"
#include "content/renderer/render_frame_impl.h"
#include "content/renderer/render_view_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/WebKit/public/web/WebDocument.h"
#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebPluginParams.h"
#include "url/gurl.h"
namespace content {
class PluginPowerSaverHelperTest : public RenderViewTest {
public:
PluginPowerSaverHelperTest() : sink_(NULL) {}
RenderFrameImpl* frame() {
return static_cast<RenderFrameImpl*>(view_->GetMainRenderFrame());
}
PluginPowerSaverHelper* helper() {
return frame()->plugin_power_saver_helper();
}
void SetUp() override {
RenderViewTest::SetUp();
sink_ = &render_thread_->sink();
}
blink::WebPluginParams MakeParams(const std::string& url,
const std::string& poster,
const std::string& width,
const std::string& height) {
const size_t size = 3;
blink::WebVector<blink::WebString> names(size);
blink::WebVector<blink::WebString> values(size);
blink::WebPluginParams params;
params.url = GURL(url);
params.attributeNames.swap(names);
params.attributeValues.swap(values);
params.attributeNames[0] = "poster";
params.attributeNames[1] = "height";
params.attributeNames[2] = "width";
params.attributeValues[0] = blink::WebString::fromUTF8(poster);
params.attributeValues[1] = blink::WebString::fromUTF8(height);
params.attributeValues[2] = blink::WebString::fromUTF8(width);
return params;
}
protected:
IPC::TestSink* sink_;
DISALLOW_COPY_AND_ASSIGN(PluginPowerSaverHelperTest);
};
TEST_F(PluginPowerSaverHelperTest, AllowSameOrigin) {
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL(), kFlashPluginName, 100,
100, nullptr));
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL(), kFlashPluginName, 1000,
1000, nullptr));
}
TEST_F(PluginPowerSaverHelperTest, DisallowCrossOriginUnlessLarge) {
bool cross_origin_main_content = false;
EXPECT_TRUE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 100, 100,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 1000, 1000,
&cross_origin_main_content));
EXPECT_TRUE(cross_origin_main_content);
}
TEST_F(PluginPowerSaverHelperTest, AlwaysAllowTinyContent) {
bool cross_origin_main_content = false;
EXPECT_FALSE(
helper()->ShouldThrottleContent(GURL(), kFlashPluginName, 1, 1, nullptr));
EXPECT_FALSE(cross_origin_main_content);
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 1, 1,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 5, 5,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
EXPECT_TRUE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 10, 10,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
}
TEST_F(PluginPowerSaverHelperTest, TemporaryOriginWhitelist) {
bool cross_origin_main_content = false;
EXPECT_TRUE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 100, 100,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
// Clear out other messages so we find just the plugin power saver IPCs.
sink_->ClearMessages();
helper()->WhitelistContentOrigin(GURL("http://b.com"));
EXPECT_FALSE(helper()->ShouldThrottleContent(GURL("http://b.com"),
kFlashPluginName, 100, 100,
&cross_origin_main_content));
EXPECT_FALSE(cross_origin_main_content);
// Test that we've sent an IPC to the browser.
ASSERT_EQ(1u, sink_->message_count());
const IPC::Message* msg = sink_->GetMessageAt(0);
EXPECT_EQ(FrameHostMsg_PluginContentOriginAllowed::ID, msg->type());
FrameHostMsg_PluginContentOriginAllowed::Param params;
FrameHostMsg_PluginContentOriginAllowed::Read(msg, ¶ms);
EXPECT_EQ(GURL("http://b.com"), get<0>(params));
}
TEST_F(PluginPowerSaverHelperTest, UnthrottleOnExPostFactoWhitelist) {
base::RunLoop loop;
frame()->RegisterPeripheralPlugin(GURL("http://other.com"),
loop.QuitClosure());
std::set<GURL> origin_whitelist;
origin_whitelist.insert(GURL("http://other.com"));
frame()->OnMessageReceived(FrameMsg_UpdatePluginContentOriginWhitelist(
frame()->GetRoutingID(), origin_whitelist));
// Runs until the unthrottle closure is run.
loop.Run();
}
TEST_F(PluginPowerSaverHelperTest, ClearWhitelistOnNavigate) {
helper()->WhitelistContentOrigin(GURL("http://b.com"));
EXPECT_FALSE(helper()->ShouldThrottleContent(
GURL("http://b.com"), kFlashPluginName, 100, 100, nullptr));
LoadHTML("<html></html>");
EXPECT_TRUE(helper()->ShouldThrottleContent(
GURL("http://b.com"), kFlashPluginName, 100, 100, nullptr));
}
} // namespace content
| [
"scottmg@chromium.org"
] | scottmg@chromium.org |
726757763bf4ef065e82a8d8b9536cebd788e2f7 | 84b2888af824cdfaf14d1f03ce592d924889d90f | /myFramework/material.cpp | 880c36366d43202eaa150ed0498635f8bbe9d0b8 | [] | no_license | GermanScientist/Framework | 26d8a5a3b2f2647765ab9c67f46bf5587faa7743 | 48f925bc4f4058b93bfac989e80921b5ab367c5c | refs/heads/master | 2023-08-16T23:42:58.946416 | 2023-08-09T14:56:41 | 2023-08-09T14:56:41 | 313,586,894 | 0 | 1 | null | 2021-03-16T19:31:07 | 2020-11-17T10:41:12 | C++ | UTF-8 | C++ | false | false | 338 | cpp | #include <myFramework/material.h>
//Constructor
Material::Material()
{
texture = -1;
//Load shaders
shader = new Shader();
// Load the texture using the texture loader
textureloader = new Textureloader();
}
//Destructor
Material::~Material()
{
//Deletes texture loader
delete textureloader;
//Deletes shader
delete shader;
} | [
"jack_hulspas@hotmail.com"
] | jack_hulspas@hotmail.com |
e5e440387ee130a7e555c23d79630738ea39db0b | e99c20155e9b08c7e7598a3f85ccaedbd127f632 | / sjtu-project-pipe/thirdparties/VTK.Net/src/Imaging/vtkImageMathematics.h | 993201e88baa9b5961bdc4fe154e46220835c162 | [
"BSD-3-Clause"
] | permissive | unidevop/sjtu-project-pipe | 38f00462d501d9b1134ce736bdfbfe4f9d075e4a | 5a09f098db834d5276a2921d861ef549961decbe | refs/heads/master | 2020-05-16T21:32:47.772410 | 2012-03-19T01:24:14 | 2012-03-19T01:24:14 | 38,281,086 | 1 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 5,030 | h | /*=========================================================================
Program: Visualization Toolkit
Module: $RCSfile: vtkImageMathematics.h,v $
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// .NAME vtkImageMathematics - Add, subtract, multiply, divide, invert, sin, cos, exp, log.
// .SECTION Description
// vtkImageMathematics implements basic mathematic operations SetOperation is
// used to select the filters behavior. The filter can take two or one
// input.
#ifndef __vtkImageMathematics_h
#define __vtkImageMathematics_h
// Operation options.
#define VTK_ADD 0
#define VTK_SUBTRACT 1
#define VTK_MULTIPLY 2
#define VTK_DIVIDE 3
#define VTK_INVERT 4
#define VTK_SIN 5
#define VTK_COS 6
#define VTK_EXP 7
#define VTK_LOG 8
#define VTK_ABS 9
#define VTK_SQR 10
#define VTK_SQRT 11
#define VTK_MIN 12
#define VTK_MAX 13
#define VTK_ATAN 14
#define VTK_ATAN2 15
#define VTK_MULTIPLYBYK 16
#define VTK_ADDC 17
#define VTK_CONJUGATE 18
#define VTK_COMPLEX_MULTIPLY 19
#define VTK_REPLACECBYK 20
#include "vtkThreadedImageAlgorithm.h"
class VTK_IMAGING_EXPORT vtkImageMathematics : public vtkThreadedImageAlgorithm
{
public:
static vtkImageMathematics *New();
vtkTypeRevisionMacro(vtkImageMathematics,vtkThreadedImageAlgorithm);
void PrintSelf(ostream& os, vtkIndent indent);
// Description:
// Set/Get the Operation to perform.
vtkSetMacro(Operation,int);
vtkGetMacro(Operation,int);
void SetOperationToAdd() {this->SetOperation(VTK_ADD);};
void SetOperationToSubtract() {this->SetOperation(VTK_SUBTRACT);};
void SetOperationToMultiply() {this->SetOperation(VTK_MULTIPLY);};
void SetOperationToDivide() {this->SetOperation(VTK_DIVIDE);};
void SetOperationToConjugate() {this->SetOperation(VTK_CONJUGATE);};
void SetOperationToComplexMultiply()
{this->SetOperation(VTK_COMPLEX_MULTIPLY);};
void SetOperationToInvert() {this->SetOperation(VTK_INVERT);};
void SetOperationToSin() {this->SetOperation(VTK_SIN);};
void SetOperationToCos() {this->SetOperation(VTK_COS);};
void SetOperationToExp() {this->SetOperation(VTK_EXP);};
void SetOperationToLog() {this->SetOperation(VTK_LOG);};
void SetOperationToAbsoluteValue() {this->SetOperation(VTK_ABS);};
void SetOperationToSquare() {this->SetOperation(VTK_SQR);};
void SetOperationToSquareRoot() {this->SetOperation(VTK_SQRT);};
void SetOperationToMin() {this->SetOperation(VTK_MIN);};
void SetOperationToMax() {this->SetOperation(VTK_MAX);};
void SetOperationToATAN() {this->SetOperation(VTK_ATAN);};
void SetOperationToATAN2() {this->SetOperation(VTK_ATAN2);};
void SetOperationToMultiplyByK() {this->SetOperation(VTK_MULTIPLYBYK);};
void SetOperationToAddConstant() {this->SetOperation(VTK_ADDC);};
void SetOperationToReplaceCByK() {this->SetOperation(VTK_REPLACECBYK);};
vtkSetMacro(ConstantK,double);
vtkGetMacro(ConstantK,double);
vtkSetMacro(ConstantC,double);
vtkGetMacro(ConstantC,double);
// How to handle divide by zero
vtkSetMacro(DivideByZeroToC,int);
vtkGetMacro(DivideByZeroToC,int);
vtkBooleanMacro(DivideByZeroToC,int);
// Description:
// Set the two inputs to this filter
virtual void SetInput1(vtkDataObject *in) { this->SetInput(0,in); }
virtual void SetInput2(vtkDataObject *in) { this->SetInput(1,in); }
protected:
vtkImageMathematics();
~vtkImageMathematics() {};
int Operation;
double ConstantK;
double ConstantC;
int DivideByZeroToC;
virtual int RequestInformation (vtkInformation *,
vtkInformationVector **,
vtkInformationVector *);
virtual void ThreadedRequestData(vtkInformation *request,
vtkInformationVector **inputVector,
vtkInformationVector *outputVector,
vtkImageData ***inData,
vtkImageData **outData,
int extent[6], int threadId);
virtual int FillInputPortInformation(int port, vtkInformation* info);
private:
vtkImageMathematics(const vtkImageMathematics&); // Not implemented.
void operator=(const vtkImageMathematics&); // Not implemented.
};
#endif
| [
"useminmin@gmail.com"
] | useminmin@gmail.com |
81c75580fd54a9c32e73c5a36f51a38f3e0860a5 | 25b218c6fa6f3b0d232140b429106ae8d27e1cfd | /hw_impl_2/ad_grid_update.hpp | b9961e48cca657ab7ed3d2ee48ff7cdf11bed0c0 | [] | no_license | djmmoss/SAD | 108bf9816bbe83cf083fe6ad297a24ce5cd086e8 | 3c0b974e5bff788091389d40ac07934be23a8188 | refs/heads/master | 2021-05-16T04:20:27.478159 | 2016-01-15T02:53:38 | 2016-01-15T02:53:38 | 25,385,078 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 139 | hpp | #ifndef AD_GRID_UPDATE_H_
#define AD_GRID_UPDATE_H_
void ad_grid_update(stream<axiIndex> &inData, stream<axiRawValues> &outData);
#endif
| [
"djm.moss@gmail.com"
] | djm.moss@gmail.com |
57c5e0bb12cb93b29c0179bf2ab7b940d346a03c | 7f461f4a8be5e42a20b150ac170b040e6a068e1f | /library/sequence/lcslen-string.cpp | 74cc8a7302f4f5998aa1dbb0c54e0bef73fc089e | [] | no_license | idaten459/competitive-programming | abf46a811bef848173899fac2d66cd253f523832 | c46813bc8f77d600cd585f5fefb5a48cc60ce95f | refs/heads/master | 2023-05-04T11:34:03.732318 | 2021-05-29T10:28:49 | 2021-05-29T10:28:49 | 185,630,869 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 623 | cpp | /*
LCS(最長共通部分列)のlcs長
vectorを引数にとる
stringではlcs-stringを参照
計算量はO(st)
*/
template<typename T>
int lcslen(string& s,string& t){
ll ls = s.size();
ll lt = t.size();
vector<vector<int>> lcs(ls+1,vector<int>(lt+1,0));
REP(i,ls+1){
REP(j,lt+1){
if(i==0||j==0){
lcs[i][j]=0;
}else{
if(s[i-1]==t[j-1]){
lcs[i][j]=lcs[i-1][j-1]+1;
}else{
lcs[i][j]=max(lcs[i-1][j],lcs[i][j-1]);
}
}
}
}
return lcs[ls][lt];
} | [
"shukai2013@gmail.com"
] | shukai2013@gmail.com |
42ff1a6a87e991900bb6cda519acee229450dc84 | fe47c9f5c6e79fdf8994a9262520d33e29e0f9b2 | /buttons/buttons.ino | c277a5eddfff943a0356e16e941d4da22b87fd6f | [] | no_license | pedrorv/arduino-courses | 7ae65b76bcdd3cf0e90004e3682870743a44b91f | 81374e3cb9721a57113d71d3eff6cc064d257dec | refs/heads/master | 2020-05-21T02:37:22.990353 | 2017-07-13T20:26:20 | 2017-07-13T20:26:20 | 84,561,609 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 277 | ino | const int ledPin = 13;
const int inputPin = 2;
void setup() {
// put your setup code here, to run once:
pinMode(ledPin, OUTPUT);
pinMode(inputPin, INPUT);
}
void loop() {
// put your main code here, to run repeatedly:
digitalWrite(ledPin, digitalRead(inputPin));
}
| [
"pedroreisv@gmail.com"
] | pedroreisv@gmail.com |
1a8ac8fbc73fedae45580ea982ba0df272ed944a | 9ec8f808b70b30e6dea8c2a98b4a269b7a4021a6 | /meshshaderclass.h | 1f7a41a10db728d7401e176d06a4313e150969b3 | [] | no_license | adomalewski/DirectXGame | 5daaeb78ca308aff8a7ce24dc7bd5cc253d779ab | 6d706c7450eba9a69bba670a33d9da414cded83c | refs/heads/master | 2021-01-13T12:35:37.839306 | 2017-01-22T20:11:24 | 2017-01-22T20:11:24 | 72,575,223 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,740 | h | #ifndef MESHSHADERCLASS_H
#define MESHSHADERCLASS_H
#include <d3d11.h>
#include <d3dx10math.h>
#include <d3dx11async.h>
#include <fstream>
using namespace std;
class MeshShaderClass
{
private:
struct MatrixBufferType
{
D3DXMATRIX world;
D3DXMATRIX view;
D3DXMATRIX projection;
};
struct MeshBufferType
{
D3DXVECTOR4 difColor;
bool hasTexture;
D3DXVECTOR3 padding;
};
struct CameraBufferType
{
D3DXVECTOR3 cameraPosition;
float padding;
};
struct LightBufferType
{
D3DXVECTOR4 ambientColor;
D3DXVECTOR4 diffuseColor;
D3DXVECTOR3 lightDirection;
float specularPower;
D3DXVECTOR4 specularColor;
};
public:
MeshShaderClass();
MeshShaderClass(const MeshShaderClass&);
~MeshShaderClass();
bool Initialize(ID3D11Device*, HWND);
void Shutdown();
bool Render(ID3D11DeviceContext*, int, int, D3DXMATRIX, D3DXMATRIX, D3DXMATRIX, ID3D11ShaderResourceView*, D3DXVECTOR3,
D3DXVECTOR4, D3DXVECTOR4, D3DXVECTOR3, D3DXVECTOR4, float, D3DXVECTOR4, bool);
private:
bool InitializeShader(ID3D11Device*, HWND, LPCSTR, LPCSTR);
void ShutdownShader();
void OutputShaderErrorMessage(ID3D10Blob*, HWND, LPCSTR);
bool SetShaderParameters(ID3D11DeviceContext*, D3DXMATRIX, D3DXMATRIX, D3DXMATRIX, ID3D11ShaderResourceView*, D3DXVECTOR3,
D3DXVECTOR4, D3DXVECTOR4, D3DXVECTOR3, D3DXVECTOR4, float, D3DXVECTOR4, bool);
void RenderShader(ID3D11DeviceContext*, int, int);
private:
ID3D11VertexShader* m_vertexShader;
ID3D11PixelShader* m_pixelShader;
ID3D11InputLayout* m_layout;
ID3D11SamplerState* m_sampleState;
ID3D11Buffer* m_matrixBuffer;
ID3D11Buffer* m_meshBuffer;
ID3D11Buffer* m_cameraBuffer;
ID3D11Buffer* m_lightBuffer;
ID3D11RasterizerState* RSCullNone;
};
#endif // MESHSHADERCLASS_H
| [
"adomalewski@adomalewski-wro.pgs-soft.com"
] | adomalewski@adomalewski-wro.pgs-soft.com |
363764ed50a60230a0cacc2743500f112a9e29bf | 0a3940d6f8c083900db072876f3afd769bbfdb71 | /2019 July 28/soldiers.cpp | cb5cc76fe924850866eafcc62181000837b34eb6 | [] | no_license | HelioStrike/Arjuna-Code | 174a104d4c8687535ae06ca28d71188eeb95784e | 1679ba15990116e6abba80979f228b0860ce8dc4 | refs/heads/master | 2020-04-21T23:38:11.147929 | 2019-09-08T05:05:36 | 2019-09-08T05:05:36 | 169,952,501 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,269 | cpp | #include <bits/stdc++.h>
#define FOR(i,a,b) for(int i = (a); i < (b); i++)
using namespace std;
string s,t,p,q;
int x,dp[10010];
int stoin(string k)
{
int ret=0;
FOR(i,0,k.length()) ret*=10,ret+=k[i]-'0';
return ret;
}
string to_str(int k)
{
string ret="";
while(k) ret=(char)('0'+(k%10))+ret,k/=10;
return ret;
}
int main()
{
t="10001"; memset(dp,16,sizeof(dp)); dp[0]=dp[10000]=0;
while(cin>>s) dp[stoin(t)]=-1,t=s;
if(dp[0]==-1) { cout<<-1<<'\n'; return 0; }
FOR(i,0,10000)
{
if(dp[i]==-1) continue;
p=to_str(i);
while(p.length()<4) p="0"+p;
FOR(j,0,4)
{
q=p; q[j]+=1;
if(q[j]>'9') q[j]='0';
x=stoin(q);
if(dp[x]==-1) continue;
dp[x]=min(dp[x],dp[i]+1);
}
}
for(int i=10000;i>0;i--)
{
if(dp[i]==-1) continue;
p=to_str(i);
while(p.length()<4) p="0"+p;
if(p.length()>4) p=p.substr(1,4);
FOR(j,0,4)
{
q=p; q[j]-=1;
if(q[j]<'0') q[j]='9';
x=stoin(q);
if(dp[x]==-1) continue;
dp[x]=min(dp[x],dp[i]+1);
}
}
cout<<(dp[stoin(t)]==269488144?-1:dp[stoin(t)])<<'\n';
return 0;
} | [
"rageofepicfury@gmail.com"
] | rageofepicfury@gmail.com |
d522adb8e3921863ee872208cc1177dbded78987 | f83ef53177180ebfeb5a3e230aa29794f52ce1fc | /ACE/ACE_wrappers/TAO/TAO_IDL/be_include/be_visitor_interface_fwd/interface_fwd_ch.h | f40ecff308d8ba96415093612384b44e20ba6cfd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-sun-iiop"
] | permissive | msrLi/portingSources | fe7528b3fd08eed4a1b41383c88ee5c09c2294ef | 57d561730ab27804a3172b33807f2bffbc9e52ae | refs/heads/master | 2021-07-08T01:22:29.604203 | 2019-07-10T13:07:06 | 2019-07-10T13:07:06 | 196,183,165 | 2 | 1 | Apache-2.0 | 2020-10-13T14:30:53 | 2019-07-10T10:16:46 | null | UTF-8 | C++ | false | false | 951 | h | /* -*- c++ -*- */
//=============================================================================
/**
* @file interface_fwd_ch.h
*
* Concrete visitor for the Interface Forward node.
* This one provides code generation for interface forward node.
*
* @author Aniruddha Gokhale
*/
//=============================================================================
#ifndef _BE_INTERFACE_INTERFACE_FWD_CH_H_
#define _BE_INTERFACE_INTERFACE_FWD_CH_H_
/**
* @class be_visitor_interface_fwd_ch
*
* @brief be_visitor_interface_fwd_ch
*
* This is the visitor for interface_fwd for the header file
*/
class be_visitor_interface_fwd_ch : public be_visitor_decl
{
public:
/// constructor
be_visitor_interface_fwd_ch (be_visitor_context *ctx);
/// destructor
~be_visitor_interface_fwd_ch (void);
/// visit interface_fwd.
virtual int visit_interface_fwd (be_interface_fwd *node);
};
#endif /* _BE_INTERFACE_INTERFACE_FWD_CH_H_ */
| [
"lihuibin705@163.com"
] | lihuibin705@163.com |
3904dfbe0f23a0af81734bd1b6576b83ab71f46e | 5700dac204805d113492a1147ac07fde952a4952 | /T3000/DialogT3.cpp | 5af365fd0cef76048119e500162c79fa31c355fe | [
"MIT"
] | permissive | Doanlmit/T3000_Building_Automation_System | 2e1cfe7154cb4fea8e7311765e3dbe29f1a2c264 | f7824c1772a04624f1de29e6e92cf20e5597aa36 | refs/heads/master | 2021-01-20T16:22:12.710466 | 2017-05-09T08:56:21 | 2017-05-09T08:56:21 | 90,835,334 | 1 | 0 | null | 2017-05-10T07:43:06 | 2017-05-10T07:43:06 | null | GB18030 | C++ | false | false | 8,091 | cpp | // DialogT3.cpp : implementation file
//
#include "stdafx.h"
#include "T3000.h"
#include "DialogT3.h"
// CDialogT3
IMPLEMENT_DYNCREATE(CDialogT3, CFormView)
CDialogT3::CDialogT3()
: CFormView(CDialogT3::IDD)
, m_address(0)
, m_firmware(0)
, m_serial(0)
, m_hardware(0)
, m_model(_T(""))
, m_picVersion(0)
{
}
CDialogT3::~CDialogT3()
{
}
void CDialogT3::DoDataExchange(CDataExchange* pDX)
{
CFormView::DoDataExchange(pDX);
DDX_Text(pDX, IDC_EDIT_T3ADDRESS, m_address);
DDX_Text(pDX, IDC_EDIT_T3FIRMWARE, m_firmware);
DDX_Text(pDX, IDC_EDIT_T3SERIAL, m_serial);
DDX_Text(pDX, IDC_EDIT_T3HARDWARE, m_hardware);
DDX_Text(pDX, IDC_EDIT_T3MODEL, m_model);
DDX_Text(pDX, IDC_EDIT_T3PICVERSION, m_picVersion);
DDX_Control(pDX, IDC_MSFLEXGRID_INPUT, m_msflexgrid_input);
DDX_Control(pDX, IDC_MSFLEXGRID_OUTPUT, m_msflexgrid_output);
}
BEGIN_MESSAGE_MAP(CDialogT3, CFormView)
END_MESSAGE_MAP()
// CDialogT3 diagnostics
#ifdef _DEBUG
void CDialogT3::AssertValid() const
{
CFormView::AssertValid();
}
#ifndef _WIN32_WCE
void CDialogT3::Dump(CDumpContext& dc) const
{
CFormView::Dump(dc);
}
#endif
#endif //_DEBUG
// CDialogT3 message handlers
void CDialogT3::OnInitialUpdate()
{
return CFormView::OnInitialUpdate();
//============================================================================================================界面Input部份列表框
//显示横标题
m_msflexgrid_input.put_TextMatrix(0,1,_T("Input Name"));
m_msflexgrid_input.put_TextMatrix(0,2,_T("Value"));
//m_msflexgrid_input.put_TextMatrix(0,3,_T("Hand/Off/Auto"));
m_msflexgrid_input.put_TextMatrix(0,3,_T("Input Filter"));
m_msflexgrid_input.put_TextMatrix(0,4,_T("Range"));
m_msflexgrid_input.put_TextMatrix(0,5,_T("Function"));
//设置排/行数量
m_msflexgrid_input.put_Cols(6);
m_msflexgrid_input.put_Rows(9);
//设置列宽
m_msflexgrid_input.put_ColWidth(0,400);
//m_msflexgrid_input.put_ColWidth(3,1500);
//居中显示
for (int col=0;col<6;col++)
{
m_msflexgrid_input.put_ColAlignment(col,4);
}
//彩色显示
for(int i=1;i<9;i++) //排数量
{
for(int k=0;k<6;k++) //列数量
{
if (i%2==1)
{
m_msflexgrid_input.put_Row(i);m_msflexgrid_input.put_Col(k);m_msflexgrid_input.put_CellBackColor(RGB(255,255,255));
}
else
{
m_msflexgrid_input.put_Row(i);m_msflexgrid_input.put_Col(k);m_msflexgrid_input.put_CellBackColor(COLOR_CELL);
}
}
}
//第3,4列11-26灰色显示
for (int i=1;i<9;i++)
{
m_msflexgrid_input.put_TextMatrix(i,4,_T("°C"));
m_msflexgrid_input.put_Row(i);
m_msflexgrid_input.put_Col(4);
m_msflexgrid_input.put_CellBackColor(FLEXGRID_CELL_GRAY_COLOR); //灰色
}
//显示纵标题
CString str;
for(int i=1;i<=8;i++)
{
str.Format(_T("%d"),i);
m_msflexgrid_input.put_TextMatrix(i,0,str);
str = _T("Input ")+str;
m_msflexgrid_input.put_TextMatrix(i,1,str);
m_msflexgrid_input.put_TextMatrix(i,5,_T("Default"));
}
//显示纵标题
str =_T("");
for(int i=11;i<9;i++)
{
str.Format(_T("%d"),i);
m_msflexgrid_input.put_TextMatrix(i,0,str);
str = _T("Input ")+str;
m_msflexgrid_input.put_TextMatrix(i,1,str);
m_msflexgrid_input.put_TextMatrix(i,3,_T("Auto"));
m_msflexgrid_input.put_TextMatrix(i,4,_T("On/Off"));
m_msflexgrid_input.put_TextMatrix(i,5,_T("Default"));
}
//unsigned char writevalue[1];
//writevalue[0] = 0;
//int flg = Write_Multi(g_tstat_id,writevalue,144,10);
//TRACE(_T("flg=%d\n"),flg);
//ASSERT(flg>0);
//============================================================================================================界面Output部份列表框
//设置行/列数量
m_msflexgrid_output.put_Rows(14);
m_msflexgrid_output.put_Cols(6);
//设置列宽
m_msflexgrid_output.put_ColWidth(0,400);
m_msflexgrid_output.put_ColWidth(3,1500);
//显示横标题
m_msflexgrid_output.put_TextMatrix(0,1,_T("Input Name"));
m_msflexgrid_output.put_TextMatrix(0,2,_T("Value"));
m_msflexgrid_output.put_TextMatrix(0,3,_T("Auto/Manual"));
m_msflexgrid_output.put_TextMatrix(0,4,_T("Range"));
m_msflexgrid_output.put_TextMatrix(0,5,_T("Function"));
//居中显示
for (int col=0;col<6;col++)
{
m_msflexgrid_output.put_ColAlignment(col,4);
}
//彩色显示
for(int i=1;i<14;i++) //排数量
{
for(int k=0;k<6;k++) //列数量
{
if (i%2==1)
{
m_msflexgrid_output.put_Row(i);m_msflexgrid_output.put_Col(k);m_msflexgrid_output.put_CellBackColor(RGB(255,255,255));
}
else
{
m_msflexgrid_output.put_Row(i);m_msflexgrid_output.put_Col(k);m_msflexgrid_output.put_CellBackColor(COLOR_CELL);
}
}
}
//显示纵标题
CString str_output;
for(int i=1;i<=13;i++)
{
str_output.Format(_T("%d"),i);
m_msflexgrid_output.put_TextMatrix(i,0,str_output);
str_output = _T("Output ")+str_output;
m_msflexgrid_output.put_TextMatrix(i,1,str_output);
//m_msflexgrid_output.put_TextMatrix(i,3,_T("Off"));
m_msflexgrid_output.put_TextMatrix(i,4,_T("On/Off"));
m_msflexgrid_output.put_TextMatrix(i,5,_T("Default"));
m_msflexgrid_output.put_Row(i);
m_msflexgrid_output.put_Col(4);
m_msflexgrid_output.put_CellBackColor(FLEXGRID_CELL_GRAY_COLOR);
}
}
void CDialogT3::Fresh()
{
ShowDialogData();
}
void CDialogT3::ShowDialogData()
{
m_address= multi_register_value[6];
m_firmware= multi_register_value[5];
m_serial= get_serialnumber(); //Address 0-3
m_hardware= multi_register_value[4];
if (multi_register_value[7] == 20)
m_model= _T("T3-8AI13O");
m_picVersion= multi_register_value[9];
UpdateData(FALSE);
//183 1 Range Setting for each input.
//184 1 183 correspond to input1,
//185 1 184 correspond to input2, etc.
//186 1 0 = raw data,
//187 1 1 = 10K Celsius,
//188 1 2 = 10K Fahrenheit,
//189 1 3 = 0-100%
//190 1 4 = ON/OFF,
// 5 = OFF/ON
// 6 = Pulse Input
// 7 = Lighting Control
for (int i = 183;i<=190;i++)
{
int retRangSet =write_one(g_tstat_id,i,1);
TRACE(_T("retRangSet=%d\n"),retRangSet);
}
CString strinput,stroutput,strfilter;
//Input Register address 118-133 eg:input1-> 118 high word 119 low word
int numrow = 1;
for (int i = 1;i<=16;i++)
{
int temphigh = multi_register_value[i+117]<<16;
i++;
int templow = multi_register_value[i+117]&0x0000ffff;
int temp = temphigh|templow;
strinput.Format(_T("%d°C"),temp);
m_msflexgrid_input.put_TextMatrix(numrow,2,strinput);
//Input Filter address 191-198
strfilter.Format(_T("%d"),multi_register_value[190+numrow]);
m_msflexgrid_input.put_TextMatrix(numrow,3,strfilter);
numrow++;
}
//Output Register address 100-112
for (int i = 1;i<=13;i++)
{
if (multi_register_value[i+99] == 0)
{
m_msflexgrid_output.put_TextMatrix(i,2,_T("Off"));
}
else if(multi_register_value[i+99] > 0)
{
m_msflexgrid_output.put_TextMatrix(i,2,_T("On"));
}
}
//显示Auto/Manual
//116 2 Switch Status, 1 Register output 1-8
//117 2 Switch Status, 2 Register output 9-13
//当output全按至HAND时,116 117的二进制值:
// high 8 bit | low 8 bit
//HAND INPUT 1 2 3 4 5 6 7 8 9 10 11 12 13
//register:116 117 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 0 0 0 0 0
//当output全按至AUTO时,116 117的二进制值:
//AUTO INPUT 1 2 3 4 5 6 7 8 9 10 11 12 13
//register;116 117 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 0 0 0 0 0 0
int temp = 1;
for (int i = 8;i>=1;i--)
{
//Manual
if((multi_register_value[116]&temp)!= 0)
m_msflexgrid_output.put_TextMatrix(i,3,_T("Manual"));
temp = temp<<1;
if ((multi_register_value[116]&temp)!=0)
m_msflexgrid_output.put_TextMatrix(i,3,_T("Auto"));
temp = temp<<1;
}
temp = 1;
temp = temp<<6;
for (int i = 13;i>=9;i--)
{
//Manual
if((multi_register_value[117]&temp)!= 0)
m_msflexgrid_output.put_TextMatrix(i,3,_T("Manual"));
temp = temp<<1;
if ((multi_register_value[117]&temp)!=0)
m_msflexgrid_output.put_TextMatrix(i,3,_T("Auto"));
temp = temp<<1;
}
} | [
"register@temcocontrols.com"
] | register@temcocontrols.com |
a90b5158640e4114f5a4609c34cb44363f503cf0 | 3a164e9ef0d99d006fd5ff6d67d4a11e3e7b2856 | /race_to_expiry_chtc/eh-sim/libtorch/include/ATen/core/TensorMethods.h | 6b25d5ad90e74c92ad7cae90f12ae7fd57151f13 | [
"MIT"
] | permissive | abhijiths94/r2e_results | bb320ac388a0bbd1eeae23218b97024991c9bc69 | 96def5313b7f46122e6c814c91971521fd6d858f | refs/heads/master | 2021-05-17T22:43:10.798201 | 2020-03-29T19:39:47 | 2020-03-29T19:39:47 | 250,985,394 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 280,198 | h | #pragma once
#include <c10/core/Scalar.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/QScheme.h>
#include <c10/macros/Macros.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/intrusive_ptr.h>
#include <ATen/core/DeprecatedTypeProperties.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/core/NamedTensor.h>
#include <ATen/core/LegacyTypeDispatch.h>
#ifdef USE_STATIC_DISPATCH
#include <ATen/TypeDefault.h>
#include <ATen/CPUType.h>
#include <ATen/QuantizedCPUType.h>
#include <ATen/SparseCPUType.h>
#endif
namespace at {
struct Quantizer;
// This is temporary typedef to enable Quantizer in aten native function API
// we'll remove them when we are actually exposing Quantizer class
// to frontend
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
inline Tensor Tensor::cpu() const {
return to(options().device(DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false);
}
// TODO: The Python version also accepts arguments
inline Tensor Tensor::cuda() const {
return to(options().device(DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false);
}
inline Tensor Tensor::hip() const {
return to(options().device(DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false);
}
inline Tensor Tensor::toType(ScalarType t) const {
return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false);
}
// TODO: Deprecate me
inline Tensor Tensor::toBackend(Backend b) const {
return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
}
inline TensorOptions Tensor::options() const {
return TensorOptions().dtype(dtype())
.device(device())
.layout(layout());
}
// all static inline to allow for inlining of the non-dynamic part of dispatch
inline void Tensor::backward(const Tensor & gradient, bool keep_graph, bool create_graph) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
TypeDefault::backward(const_cast<Tensor&>(*this), gradient, keep_graph, create_graph);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::backward", "");
return op.callUnboxed<void, const Tensor &, const Tensor &, bool, bool>(const_cast<Tensor&>(*this), gradient, keep_graph, create_graph);
#endif
}
inline void Tensor::set_data(const Tensor & new_data) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
TypeDefault::set_data(const_cast<Tensor&>(*this), new_data);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_data", "");
return op.callUnboxed<void, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), new_data);
#endif
}
inline Tensor Tensor::data() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::data(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::data", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_leaf() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_leaf(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_leaf", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::output_nr() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::output_nr(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::output_nr", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::_version() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::_version(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_version", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::requires_grad_(bool _requires_grad) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::requires_grad_(const_cast<Tensor&>(*this), _requires_grad);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::requires_grad_", "");
return op.callUnboxed<Tensor &, Tensor &, bool>(const_cast<Tensor&>(*this), _requires_grad);
#endif
}
inline Tensor & Tensor::rename_(c10::optional<DimnameList> names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rename_(const_cast<Tensor&>(*this), names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rename_", "");
return op.callUnboxed<Tensor &, Tensor &, c10::optional<DimnameList>>(const_cast<Tensor&>(*this), names);
#endif
}
inline Tensor Tensor::rename(c10::optional<DimnameList> names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rename(const_cast<Tensor&>(*this), names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rename", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<DimnameList>>(const_cast<Tensor&>(*this), names);
#endif
}
inline Tensor Tensor::align_to(DimnameList names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::align_to(const_cast<Tensor&>(*this), names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::align_to", "");
return op.callUnboxed<Tensor, const Tensor &, DimnameList>(const_cast<Tensor&>(*this), names);
#endif
}
inline Tensor Tensor::align_to(DimnameList order, int64_t ellipsis_idx) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::align_to(const_cast<Tensor&>(*this), order, ellipsis_idx);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::align_to", "ellipsis_idx");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, int64_t>(const_cast<Tensor&>(*this), order, ellipsis_idx);
#endif
}
inline Tensor Tensor::align_as(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::align_as(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::align_as", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::refine_names(DimnameList names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::refine_names(const_cast<Tensor&>(*this), names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::refine_names", "");
return op.callUnboxed<Tensor, const Tensor &, DimnameList>(const_cast<Tensor&>(*this), names);
#endif
}
inline Tensor Tensor::unflatten(Dimname dim, IntArrayRef sizes, DimnameList names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unflatten(const_cast<Tensor&>(*this), dim, sizes, names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unflatten", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, IntArrayRef, DimnameList>(const_cast<Tensor&>(*this), dim, sizes, names);
#endif
}
inline Tensor Tensor::unflatten(int64_t dim, IntArrayRef sizes, DimnameList names) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unflatten(const_cast<Tensor&>(*this), dim, sizes, names);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unflatten", "int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, IntArrayRef, DimnameList>(const_cast<Tensor&>(*this), dim, sizes, names);
#endif
}
inline Tensor Tensor::abs() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::abs(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::abs", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::abs_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::abs_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::abs_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::angle() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::angle(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::angle", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::real() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::real(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::real", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::imag() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::imag(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::imag", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::conj() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::conj(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::conj", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::acos() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::acos(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::acos", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::acos_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::acos_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::acos_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::add(const Tensor & other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::add(const_cast<Tensor&>(*this), other, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::add(const_cast<Tensor&>(*this), other, alpha);
break;
default:
AT_ERROR("add not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::add", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor & Tensor::add_(const Tensor & other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::add_(const_cast<Tensor&>(*this), other, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::add_(const_cast<Tensor&>(*this), other, alpha);
break;
default:
AT_ERROR("add_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::add_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor Tensor::add(Scalar other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::add(const_cast<Tensor&>(*this), other, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::add", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor & Tensor::add_(Scalar other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::add_(const_cast<Tensor&>(*this), other, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::add_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addmv(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
break;
default:
AT_ERROR("addmv not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addmv", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
#endif
}
inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addmv_(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
break;
default:
AT_ERROR("addmv_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addmv_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), mat, vec, beta, alpha);
#endif
}
inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addr(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addr", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
#endif
}
inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addr_(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addr_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), vec1, vec2, beta, alpha);
#endif
}
inline Tensor Tensor::all(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::all(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::all", "dim");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::all(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::all(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::all", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, bool equal_nan) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::allclose(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::allclose", "");
return op.callUnboxed<bool, const Tensor &, const Tensor &, double, double, bool>(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
#endif
}
inline Tensor Tensor::any(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::any(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::any", "dim");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::any(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::any(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::any", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::argmax(c10::optional<int64_t> dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::argmax(const_cast<Tensor&>(*this), dim, keepdim);
break;
default:
AT_ERROR("argmax not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::argmax", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<int64_t>, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::argmin(c10::optional<int64_t> dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::argmin(const_cast<Tensor&>(*this), dim, keepdim);
break;
default:
AT_ERROR("argmin not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::argmin", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<int64_t>, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::as_strided(const_cast<Tensor&>(*this), size, stride, storage_offset);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::as_strided(const_cast<Tensor&>(*this), size, stride, storage_offset);
break;
default:
AT_ERROR("as_strided not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::as_strided", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, IntArrayRef, c10::optional<int64_t>>(const_cast<Tensor&>(*this), size, stride, storage_offset);
#endif
}
inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::as_strided_(const_cast<Tensor&>(*this), size, stride, storage_offset);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::as_strided_", "");
return op.callUnboxed<Tensor &, Tensor &, IntArrayRef, IntArrayRef, c10::optional<int64_t>>(const_cast<Tensor&>(*this), size, stride, storage_offset);
#endif
}
inline Tensor Tensor::asin() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::asin(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::asin", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::asin_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::asin_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::asin_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::atan() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::atan(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::atan", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::atan_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::atan_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("atan_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::atan_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::baddbmm(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
break;
default:
AT_ERROR("baddbmm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::baddbmm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
#endif
}
inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::baddbmm_(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
break;
default:
AT_ERROR("baddbmm_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::baddbmm_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
#endif
}
inline Tensor Tensor::bernoulli(Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bernoulli(const_cast<Tensor&>(*this), generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bernoulli", "");
return op.callUnboxed<Tensor, const Tensor &, Generator *>(const_cast<Tensor&>(*this), generator);
#endif
}
inline Tensor & Tensor::bernoulli_(const Tensor & p, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::bernoulli_(const_cast<Tensor&>(*this), p, generator);
break;
default:
AT_ERROR("bernoulli_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bernoulli_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, Generator *>(const_cast<Tensor&>(*this), p, generator);
#endif
}
inline Tensor & Tensor::bernoulli_(double p, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::bernoulli_(const_cast<Tensor&>(*this), p, generator);
break;
default:
AT_ERROR("bernoulli_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bernoulli_", "float");
return op.callUnboxed<Tensor &, Tensor &, double, Generator *>(const_cast<Tensor&>(*this), p, generator);
#endif
}
inline Tensor Tensor::bernoulli(double p, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bernoulli(const_cast<Tensor&>(*this), p, generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bernoulli", "p");
return op.callUnboxed<Tensor, const Tensor &, double, Generator *>(const_cast<Tensor&>(*this), p, generator);
#endif
}
inline Tensor Tensor::bincount(const Tensor & weights, int64_t minlength) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::bincount(const_cast<Tensor&>(*this), weights, minlength);
break;
default:
AT_ERROR("bincount not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bincount", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, int64_t>(const_cast<Tensor&>(*this), weights, minlength);
#endif
}
inline Tensor Tensor::bitwise_not() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_not(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_not", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::bitwise_not_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_not_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_not_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::logical_not() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_not(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_not", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::logical_not_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_not_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_not_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::logical_xor(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_xor(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_xor", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::logical_xor_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_xor_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_xor_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::logical_and(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_and(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_and", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::logical_and_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_and_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_and_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::logical_or(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_or(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_or", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::logical_or_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logical_or_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logical_or_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bmm(const Tensor & mat2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::bmm(const_cast<Tensor&>(*this), mat2);
break;
default:
AT_ERROR("bmm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bmm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mat2);
#endif
}
inline Tensor Tensor::ceil() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ceil(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ceil", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::ceil_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ceil_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ceil_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::vector<Tensor> Tensor::chunk(int64_t chunks, int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::chunk(const_cast<Tensor&>(*this), chunks, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::chunk", "");
return op.callUnboxed<std::vector<Tensor>, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), chunks, dim);
#endif
}
inline Tensor Tensor::clamp(c10::optional<Scalar> min, c10::optional<Scalar> max) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::clamp(const_cast<Tensor&>(*this), min, max);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::clamp(const_cast<Tensor&>(*this), min, max);
break;
default:
AT_ERROR("clamp not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, c10::optional<Scalar>>(const_cast<Tensor&>(*this), min, max);
#endif
}
inline Tensor & Tensor::clamp_(c10::optional<Scalar> min, c10::optional<Scalar> max) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::clamp_(const_cast<Tensor&>(*this), min, max);
break;
default:
AT_ERROR("clamp_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp_", "");
return op.callUnboxed<Tensor &, Tensor &, c10::optional<Scalar>, c10::optional<Scalar>>(const_cast<Tensor&>(*this), min, max);
#endif
}
inline Tensor Tensor::clamp_max(Scalar max) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::clamp_max(const_cast<Tensor&>(*this), max);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp_max", "");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), max);
#endif
}
inline Tensor & Tensor::clamp_max_(Scalar max) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::clamp_max_(const_cast<Tensor&>(*this), max);
break;
default:
AT_ERROR("clamp_max_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp_max_", "");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), max);
#endif
}
inline Tensor Tensor::clamp_min(Scalar min) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::clamp_min(const_cast<Tensor&>(*this), min);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp_min", "");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), min);
#endif
}
inline Tensor & Tensor::clamp_min_(Scalar min) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::clamp_min_(const_cast<Tensor&>(*this), min);
break;
default:
AT_ERROR("clamp_min_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clamp_min_", "");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), min);
#endif
}
inline Tensor Tensor::contiguous(MemoryFormat memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::contiguous(const_cast<Tensor&>(*this), memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::contiguous", "");
return op.callUnboxed<Tensor, const Tensor &, MemoryFormat>(const_cast<Tensor&>(*this), memory_format);
#endif
}
inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::copy_(const_cast<Tensor&>(*this), src, non_blocking);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::copy_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, bool>(const_cast<Tensor&>(*this), src, non_blocking);
#endif
}
inline Tensor Tensor::cos() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cos(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cos", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::cos_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::cos_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("cos_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cos_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::cosh() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cosh(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cosh", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::cosh_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::cosh_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("cosh_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cosh_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::cummax(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cummax(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cummax", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::cummax(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cummax(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cummax", "dimname");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::cummin(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cummin(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cummin", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::cummin(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cummin(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cummin", "dimname");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::cumprod(int64_t dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cumprod(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cumprod", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::cumprod(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cumprod(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cumprod", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::cumsum(int64_t dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cumsum(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cumsum", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::cumsum(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cumsum(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cumsum", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::det() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::det(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::det", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::diag_embed(const_cast<Tensor&>(*this), offset, dim1, dim2);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::diag_embed", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), offset, dim1, dim2);
#endif
}
inline Tensor Tensor::diagflat(int64_t offset) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::diagflat(const_cast<Tensor&>(*this), offset);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::diagflat", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), offset);
#endif
}
inline Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::diagonal(const_cast<Tensor&>(*this), offset, dim1, dim2);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::diagonal", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), offset, dim1, dim2);
#endif
}
inline Tensor Tensor::diagonal(Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::diagonal(const_cast<Tensor&>(*this), outdim, dim1, dim2, offset);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::diagonal", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, Dimname, Dimname, int64_t>(const_cast<Tensor&>(*this), outdim, dim1, dim2, offset);
#endif
}
inline Tensor & Tensor::fill_diagonal_(Scalar fill_value, bool wrap) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::fill_diagonal_(const_cast<Tensor&>(*this), fill_value, wrap);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fill_diagonal_", "");
return op.callUnboxed<Tensor &, Tensor &, Scalar, bool>(const_cast<Tensor&>(*this), fill_value, wrap);
#endif
}
inline Tensor Tensor::div(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::div(const_cast<Tensor&>(*this), other);
break;
case Backend::SparseCPU:
return SparseCPUType::div(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("div not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::div", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::div_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::div_(const_cast<Tensor&>(*this), other);
break;
case Backend::SparseCPU:
return SparseCPUType::div_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("div_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::div_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::div(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::div(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::div", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::div_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::div_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::div_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::dot(const Tensor & tensor) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::dot(const_cast<Tensor&>(*this), tensor);
break;
default:
AT_ERROR("dot not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::dot", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), tensor);
#endif
}
inline Tensor Tensor::new_empty(IntArrayRef size, const TensorOptions & options) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::new_empty(const_cast<Tensor&>(*this), size, options);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::new_empty", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, const TensorOptions &>(const_cast<Tensor&>(*this), size, options);
#endif
}
inline Tensor Tensor::new_full(IntArrayRef size, Scalar fill_value, const TensorOptions & options) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::new_full(const_cast<Tensor&>(*this), size, fill_value, options);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::new_full", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, Scalar, const TensorOptions &>(const_cast<Tensor&>(*this), size, fill_value, options);
#endif
}
inline Tensor Tensor::new_zeros(IntArrayRef size, const TensorOptions & options) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::new_zeros(const_cast<Tensor&>(*this), size, options);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::new_zeros", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, const TensorOptions &>(const_cast<Tensor&>(*this), size, options);
#endif
}
inline Tensor & Tensor::resize_(IntArrayRef size, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::resize_(const_cast<Tensor&>(*this), size, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::resize_", "");
return op.callUnboxed<Tensor &, Tensor &, IntArrayRef, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), size, memory_format);
#endif
}
inline Tensor Tensor::erf() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::erf(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erf", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::erf_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::erf_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("erf_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erf_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::erfc() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::erfc(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erfc", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::erfc_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::erfc_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("erfc_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erfc_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::exp() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::exp(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::exp", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::exp_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::exp_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("exp_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::exp_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::expm1() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::expm1(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::expm1", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::expm1_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::expm1_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::expm1_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::expand(const_cast<Tensor&>(*this), size, implicit);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::expand", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool>(const_cast<Tensor&>(*this), size, implicit);
#endif
}
inline Tensor Tensor::expand_as(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::expand_as(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::expand_as", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::flatten(const_cast<Tensor&>(*this), start_dim, end_dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::flatten", "using_ints");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), start_dim, end_dim);
#endif
}
inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim, Dimname out_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::flatten(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::flatten", "named_out_dim");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, Dimname>(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
#endif
}
inline Tensor Tensor::flatten(Dimname start_dim, Dimname end_dim, Dimname out_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::flatten(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::flatten", "using_names");
return op.callUnboxed<Tensor, const Tensor &, Dimname, Dimname, Dimname>(const_cast<Tensor&>(*this), start_dim, end_dim, out_dim);
#endif
}
inline Tensor Tensor::flatten(DimnameList dims, Dimname out_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::flatten(const_cast<Tensor&>(*this), dims, out_dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::flatten", "DimnameList");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, Dimname>(const_cast<Tensor&>(*this), dims, out_dim);
#endif
}
inline Tensor & Tensor::fill_(Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::fill_(const_cast<Tensor&>(*this), value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fill_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), value);
#endif
}
inline Tensor & Tensor::fill_(const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::fill_(const_cast<Tensor&>(*this), value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fill_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), value);
#endif
}
inline Tensor Tensor::floor() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::floor(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::floor", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::floor_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::floor_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::floor_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::frac() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::frac(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::frac", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::frac_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::frac_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::frac_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::ger(const Tensor & vec2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ger(const_cast<Tensor&>(*this), vec2);
break;
default:
AT_ERROR("ger not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ger", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), vec2);
#endif
}
inline Tensor Tensor::fft(int64_t signal_ndim, bool normalized) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::fft(const_cast<Tensor&>(*this), signal_ndim, normalized);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fft", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), signal_ndim, normalized);
#endif
}
inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ifft(const_cast<Tensor&>(*this), signal_ndim, normalized);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ifft", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), signal_ndim, normalized);
#endif
}
inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rfft(const_cast<Tensor&>(*this), signal_ndim, normalized, onesided);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rfft", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool, bool>(const_cast<Tensor&>(*this), signal_ndim, normalized, onesided);
#endif
}
inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::irfft(const_cast<Tensor&>(*this), signal_ndim, normalized, onesided, signal_sizes);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::irfft", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool, bool, IntArrayRef>(const_cast<Tensor&>(*this), signal_ndim, normalized, onesided, signal_sizes);
#endif
}
inline Tensor Tensor::index(TensorList indices) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index(const_cast<Tensor&>(*this), indices);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, TensorList>(const_cast<Tensor&>(*this), indices);
#endif
}
inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_copy_(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_copy_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_copy(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_copy", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor & Tensor::index_copy_(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_copy_(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_copy_", "dimname");
return op.callUnboxed<Tensor &, Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor Tensor::index_copy(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_copy(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_copy", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bool accumulate) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_put_(const_cast<Tensor&>(*this), indices, values, accumulate);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_put_", "");
return op.callUnboxed<Tensor &, Tensor &, TensorList, const Tensor &, bool>(const_cast<Tensor&>(*this), indices, values, accumulate);
#endif
}
inline Tensor Tensor::index_put(TensorList indices, const Tensor & values, bool accumulate) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_put(const_cast<Tensor&>(*this), indices, values, accumulate);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_put", "");
return op.callUnboxed<Tensor, const Tensor &, TensorList, const Tensor &, bool>(const_cast<Tensor&>(*this), indices, values, accumulate);
#endif
}
inline Tensor Tensor::inverse() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::inverse(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::inverse", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::isclose(const Tensor & other, double rtol, double atol, bool equal_nan) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::isclose(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::isclose", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, double, double, bool>(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
#endif
}
inline bool Tensor::is_distributed() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_distributed(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_distributed", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_floating_point() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_floating_point(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_floating_point", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_complex() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_complex(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_complex", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_nonzero() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_nonzero(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_nonzero", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_same_size(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_same_size(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_same_size", "");
return op.callUnboxed<bool, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline bool Tensor::is_signed() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_signed(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_signed", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::kthvalue(const_cast<Tensor&>(*this), k, dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::kthvalue", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, int64_t, bool>(const_cast<Tensor&>(*this), k, dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::kthvalue(const_cast<Tensor&>(*this), k, dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::kthvalue", "dimname");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, Dimname, bool>(const_cast<Tensor&>(*this), k, dim, keepdim);
#endif
}
inline Tensor Tensor::log() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::log_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::log10() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log10(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log10", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::log10_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log10_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log10_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::log1p() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log1p(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log1p", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::log1p_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::log1p_(const_cast<Tensor&>(*this));
break;
case Backend::SparseCPU:
return SparseCPUType::log1p_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("log1p_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log1p_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::log2() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log2(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log2", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::log2_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log2_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log2_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::logdet() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logdet(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logdet", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::log_softmax(int64_t dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log_softmax(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log_softmax", "int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::log_softmax(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log_softmax(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log_softmax", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::logsumexp(IntArrayRef dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logsumexp(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logsumexp", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::logsumexp(DimnameList dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::logsumexp(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::logsumexp", "names");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::matmul(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::matmul(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::matmul", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::matrix_power(int64_t n) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::matrix_power(const_cast<Tensor&>(*this), n);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::matrix_power", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), n);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::max(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::max(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max", "dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::max_values(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max_values", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::max(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::max(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max", "names_dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::max_values(DimnameList dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::max_values(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max_values", "names");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::mean(c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mean(const_cast<Tensor&>(*this), dtype);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::mean(const_cast<Tensor&>(*this), dtype);
break;
default:
AT_ERROR("mean not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mean", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dtype);
#endif
}
inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mean(const_cast<Tensor&>(*this), dim, keepdim, dtype);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::mean(const_cast<Tensor&>(*this), dim, keepdim, dtype);
break;
default:
AT_ERROR("mean not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mean", "dim");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::mean(DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mean(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mean", "names_dim");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::median(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::median(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::median", "dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::median(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::median(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::median", "names_dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::min(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::min(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min", "dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::min_values(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min_values", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::min(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::min(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min", "names_dim");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::min_values(DimnameList dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::min_values(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min_values", "names");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::mm(const Tensor & mat2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mm(const_cast<Tensor&>(*this), mat2);
break;
case Backend::SparseCPU:
return SparseCPUType::mm(const_cast<Tensor&>(*this), mat2);
break;
default:
AT_ERROR("mm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mat2);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::mode(int64_t dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mode(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mode", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::mode(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mode(const_cast<Tensor&>(*this), dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mode", "dimname");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
inline Tensor Tensor::mul(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mul(const_cast<Tensor&>(*this), other);
break;
case Backend::SparseCPU:
return SparseCPUType::mul(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("mul not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mul", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::mul_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mul_(const_cast<Tensor&>(*this), other);
break;
case Backend::SparseCPU:
return SparseCPUType::mul_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("mul_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mul_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::mul(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mul(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mul", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::mul_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mul_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mul_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::mv(const Tensor & vec) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::mv(const_cast<Tensor&>(*this), vec);
break;
default:
AT_ERROR("mv not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mv", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), vec);
#endif
}
inline Tensor Tensor::mvlgamma(int64_t p) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mvlgamma(const_cast<Tensor&>(*this), p);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mvlgamma", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), p);
#endif
}
inline Tensor & Tensor::mvlgamma_(int64_t p) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::mvlgamma_(const_cast<Tensor&>(*this), p);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::mvlgamma_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), p);
#endif
}
inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::narrow_copy(const_cast<Tensor&>(*this), dim, start, length);
break;
case Backend::SparseCPU:
return SparseCPUType::narrow_copy(const_cast<Tensor&>(*this), dim, start, length);
break;
default:
AT_ERROR("narrow_copy not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::narrow_copy", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), dim, start, length);
#endif
}
inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::narrow(const_cast<Tensor&>(*this), dim, start, length);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::narrow", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), dim, start, length);
#endif
}
inline Tensor Tensor::permute(IntArrayRef dims) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::permute(const_cast<Tensor&>(*this), dims);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::permute", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), dims);
#endif
}
inline Tensor Tensor::numpy_T() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::numpy_T(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::numpy_T", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_pinned() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::is_pinned(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_pinned", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::pin_memory() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::pin_memory(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pin_memory", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::pinverse(double rcond) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::pinverse(const_cast<Tensor&>(*this), rcond);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pinverse", "");
return op.callUnboxed<Tensor, const Tensor &, double>(const_cast<Tensor&>(*this), rcond);
#endif
}
inline Tensor Tensor::reciprocal() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::reciprocal(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::reciprocal", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::reciprocal_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::reciprocal_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::reciprocal_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::neg() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::neg(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::neg", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::neg_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::neg_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::neg_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::repeat(IntArrayRef repeats) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::repeat(const_cast<Tensor&>(*this), repeats);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::repeat", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), repeats);
#endif
}
inline Tensor Tensor::repeat_interleave(const Tensor & repeats, c10::optional<int64_t> dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::repeat_interleave(const_cast<Tensor&>(*this), repeats, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::repeat_interleave", "self_Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, c10::optional<int64_t>>(const_cast<Tensor&>(*this), repeats, dim);
#endif
}
inline Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional<int64_t> dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::repeat_interleave(const_cast<Tensor&>(*this), repeats, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::repeat_interleave", "self_int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<int64_t>>(const_cast<Tensor&>(*this), repeats, dim);
#endif
}
inline Tensor Tensor::reshape(IntArrayRef shape) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::reshape(const_cast<Tensor&>(*this), shape);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::reshape", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), shape);
#endif
}
inline Tensor Tensor::reshape_as(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::reshape_as(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::reshape_as", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::round() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::round(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::round", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::round_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::round_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::round_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::relu() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::relu(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::relu(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("relu not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::relu", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::relu_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::relu_(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::relu_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("relu_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::relu_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::prelu(const Tensor & weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::prelu(const_cast<Tensor&>(*this), weight);
break;
default:
AT_ERROR("prelu not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::prelu", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), weight);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::prelu_backward(const Tensor & grad_output, const Tensor & weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::prelu_backward(grad_output, const_cast<Tensor&>(*this), weight);
break;
default:
AT_ERROR("prelu_backward not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::prelu_backward", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, const Tensor &, const Tensor &>(grad_output, const_cast<Tensor&>(*this), weight);
#endif
}
inline Tensor Tensor::hardshrink(Scalar lambd) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::hardshrink(const_cast<Tensor&>(*this), lambd);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::hardshrink", "");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), lambd);
#endif
}
inline Tensor Tensor::hardshrink_backward(const Tensor & grad_out, Scalar lambd) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::hardshrink_backward(grad_out, const_cast<Tensor&>(*this), lambd);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::hardshrink_backward", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(grad_out, const_cast<Tensor&>(*this), lambd);
#endif
}
inline Tensor Tensor::rsqrt() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rsqrt(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rsqrt", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::rsqrt_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rsqrt_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rsqrt_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::select(Dimname dim, int64_t index) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::select(const_cast<Tensor&>(*this), dim, index);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::select", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, int64_t>(const_cast<Tensor&>(*this), dim, index);
#endif
}
inline Tensor Tensor::select(int64_t dim, int64_t index) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::select(const_cast<Tensor&>(*this), dim, index);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::select", "int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), dim, index);
#endif
}
inline Tensor Tensor::sigmoid() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::sigmoid(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::sigmoid(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("sigmoid not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sigmoid", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::sigmoid_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::sigmoid_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("sigmoid_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sigmoid_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::sin() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sin(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sin", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::sin_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sin_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sin_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::sinh() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sinh(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sinh", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::sinh_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sinh_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sinh_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::detach() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::detach(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::detach", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::detach_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::detach_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::detach_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::size(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::size(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::size", "int");
return op.callUnboxed<int64_t, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline int64_t Tensor::size(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::size(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::size", "Dimname");
return op.callUnboxed<int64_t, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::slice(const_cast<Tensor&>(*this), dim, start, end, step);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::slice", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), dim, start, end, step);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::slogdet() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::slogdet(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::slogdet", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::smm(const Tensor & mat2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::smm(const_cast<Tensor&>(*this), mat2);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::smm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mat2);
#endif
}
inline Tensor Tensor::softmax(int64_t dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::softmax(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::softmax", "int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline Tensor Tensor::softmax(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::softmax(const_cast<Tensor&>(*this), dim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::softmax", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
inline std::vector<Tensor> Tensor::split(int64_t split_size, int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::split(const_cast<Tensor&>(*this), split_size, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::split", "Tensor");
return op.callUnboxed<std::vector<Tensor>, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), split_size, dim);
#endif
}
inline std::vector<Tensor> Tensor::split_with_sizes(IntArrayRef split_sizes, int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::split_with_sizes(const_cast<Tensor&>(*this), split_sizes, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::split_with_sizes", "");
return op.callUnboxed<std::vector<Tensor>, const Tensor &, IntArrayRef, int64_t>(const_cast<Tensor&>(*this), split_sizes, dim);
#endif
}
inline Tensor Tensor::squeeze() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::squeeze(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze", "dim");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::squeeze(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor & Tensor::squeeze_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::squeeze_(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze_(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze_", "dim");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor & Tensor::squeeze_(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::squeeze_(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::squeeze_", "dimname");
return op.callUnboxed<Tensor &, Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sspaddmm(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sspaddmm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
#endif
}
inline Tensor Tensor::stft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const Tensor & window, bool normalized, bool onesided) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::stft(const_cast<Tensor&>(*this), n_fft, hop_length, win_length, window, normalized, onesided);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::stft", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, c10::optional<int64_t>, c10::optional<int64_t>, const Tensor &, bool, bool>(const_cast<Tensor&>(*this), n_fft, hop_length, win_length, window, normalized, onesided);
#endif
}
inline int64_t Tensor::stride(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::stride(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::stride", "int");
return op.callUnboxed<int64_t, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline int64_t Tensor::stride(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::stride(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::stride", "Dimname");
return op.callUnboxed<int64_t, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::sum(c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sum(const_cast<Tensor&>(*this), dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sum", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dtype);
#endif
}
inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sum(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sum", "dim_IntList");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::sum(DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sum(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sum", "dim_DimnameList");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::sum_to_size(IntArrayRef size) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sum_to_size(const_cast<Tensor&>(*this), size);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sum_to_size", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), size);
#endif
}
inline Tensor Tensor::sqrt() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sqrt(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sqrt", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::sqrt_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sqrt_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sqrt_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::square() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::square(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::square", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::square_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::square_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::square_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::std(bool unbiased) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::std(const_cast<Tensor&>(*this), unbiased);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::std", "");
return op.callUnboxed<Tensor, const Tensor &, bool>(const_cast<Tensor&>(*this), unbiased);
#endif
}
inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::std(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::std", "dim");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool, bool>(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#endif
}
inline Tensor Tensor::std(DimnameList dim, bool unbiased, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::std(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::std", "names_dim");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool, bool>(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#endif
}
inline Tensor Tensor::prod(c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::prod(const_cast<Tensor&>(*this), dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::prod", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dtype);
#endif
}
inline Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::prod(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::prod", "dim_int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::prod(Dimname dim, bool keepdim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::prod(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::prod", "dim_Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, bool, c10::optional<ScalarType>>(const_cast<Tensor&>(*this), dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::t() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::t(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::t", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::t_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::t_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::t_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::tan() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::tan(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tan", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::tan_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::tan_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("tan_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tan_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::tanh() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::tanh(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::tanh(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("tanh not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tanh", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::tanh_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::tanh_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("tanh_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tanh_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::transpose(const_cast<Tensor&>(*this), dim0, dim1);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::transpose", "int");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), dim0, dim1);
#endif
}
inline Tensor Tensor::transpose(Dimname dim0, Dimname dim1) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::transpose(const_cast<Tensor&>(*this), dim0, dim1);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::transpose", "Dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, Dimname>(const_cast<Tensor&>(*this), dim0, dim1);
#endif
}
inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::transpose_(const_cast<Tensor&>(*this), dim0, dim1);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::transpose_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), dim0, dim1);
#endif
}
inline Tensor Tensor::flip(IntArrayRef dims) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::flip(const_cast<Tensor&>(*this), dims);
break;
default:
AT_ERROR("flip not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::flip", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), dims);
#endif
}
inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::roll(const_cast<Tensor&>(*this), shifts, dims);
break;
default:
AT_ERROR("roll not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::roll", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, IntArrayRef>(const_cast<Tensor&>(*this), shifts, dims);
#endif
}
inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::rot90(const_cast<Tensor&>(*this), k, dims);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::rot90", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, IntArrayRef>(const_cast<Tensor&>(*this), k, dims);
#endif
}
inline Tensor Tensor::trunc() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::trunc(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::trunc", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::trunc_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::trunc_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::trunc_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::type_as(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::type_as(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::type_as", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::unsqueeze(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unsqueeze(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unsqueeze", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor & Tensor::unsqueeze_(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unsqueeze_(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unsqueeze_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::var(bool unbiased) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::var(const_cast<Tensor&>(*this), unbiased);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::var", "");
return op.callUnboxed<Tensor, const Tensor &, bool>(const_cast<Tensor&>(*this), unbiased);
#endif
}
inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::var(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::var", "dim");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef, bool, bool>(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#endif
}
inline Tensor Tensor::var(DimnameList dim, bool unbiased, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::var(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::var", "names_dim");
return op.callUnboxed<Tensor, const Tensor &, DimnameList, bool, bool>(const_cast<Tensor&>(*this), dim, unbiased, keepdim);
#endif
}
inline Tensor Tensor::view_as(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::view_as(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::view_as", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::where(const Tensor & condition, const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::where(condition, const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::where", "self");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &>(condition, const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::norm(c10::optional<Scalar> p, ScalarType dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "ScalarOpt_dtype");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, ScalarType>(const_cast<Tensor&>(*this), p, dtype);
#endif
}
inline Tensor Tensor::norm(Scalar p) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), p);
#endif
}
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "ScalarOpt_dim_dtype");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, IntArrayRef, bool, ScalarType>(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p, dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "ScalarOpt_dim");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, IntArrayRef, bool>(const_cast<Tensor&>(*this), p, dim, keepdim);
#endif
}
inline Tensor Tensor::norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "names_ScalarOpt_dim_dtype");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, DimnameList, bool, ScalarType>(const_cast<Tensor&>(*this), p, dim, keepdim, dtype);
#endif
}
inline Tensor Tensor::norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::norm(const_cast<Tensor&>(*this), p, dim, keepdim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::norm", "names_ScalarOpt_dim");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<Scalar>, DimnameList, bool>(const_cast<Tensor&>(*this), p, dim, keepdim);
#endif
}
inline Tensor Tensor::clone(c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::clone(const_cast<Tensor&>(*this), memory_format);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::clone(const_cast<Tensor&>(*this), memory_format);
break;
case Backend::SparseCPU:
return SparseCPUType::clone(const_cast<Tensor&>(*this), memory_format);
break;
default:
AT_ERROR("clone not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "");
return op.callUnboxed<Tensor, const Tensor &, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), memory_format);
#endif
}
inline Tensor & Tensor::resize_as_(const Tensor & the_template, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::resize_as_(const_cast<Tensor&>(*this), the_template, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::resize_as_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), the_template, memory_format);
#endif
}
inline Tensor Tensor::pow(Scalar exponent) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::pow(const_cast<Tensor&>(*this), exponent);
break;
case Backend::SparseCPU:
return SparseCPUType::pow(const_cast<Tensor&>(*this), exponent);
break;
default:
AT_ERROR("pow not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pow", "Tensor_Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), exponent);
#endif
}
inline Tensor & Tensor::zero_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::zero_(const_cast<Tensor&>(*this));
break;
case Backend::SparseCPU:
return SparseCPUType::zero_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("zero_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::zero_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::sub(const Tensor & other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::sub(const_cast<Tensor&>(*this), other, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::sub(const_cast<Tensor&>(*this), other, alpha);
break;
default:
AT_ERROR("sub not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sub", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor & Tensor::sub_(const Tensor & other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::sub_(const_cast<Tensor&>(*this), other, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::sub_(const_cast<Tensor&>(*this), other, alpha);
break;
default:
AT_ERROR("sub_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sub_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor Tensor::sub(Scalar other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sub(const_cast<Tensor&>(*this), other, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sub", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor & Tensor::sub_(Scalar other, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sub_(const_cast<Tensor&>(*this), other, alpha);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sub_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), other, alpha);
#endif
}
inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addmm(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::addmm(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
break;
default:
AT_ERROR("addmm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addmm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
#endif
}
inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addmm_(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
break;
case Backend::SparseCPU:
return SparseCPUType::addmm_(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
break;
default:
AT_ERROR("addmm_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addmm_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
#endif
}
inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::sparse_resize_(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
break;
default:
AT_ERROR("sparse_resize_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sparse_resize_", "");
return op.callUnboxed<Tensor &, Tensor &, IntArrayRef, int64_t, int64_t>(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
#endif
}
inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::sparse_resize_and_clear_(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
break;
default:
AT_ERROR("sparse_resize_and_clear_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sparse_resize_and_clear_", "");
return op.callUnboxed<Tensor &, Tensor &, IntArrayRef, int64_t, int64_t>(const_cast<Tensor&>(*this), size, sparse_dim, dense_dim);
#endif
}
inline Tensor Tensor::sparse_mask(const Tensor & mask) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::sparse_mask(const_cast<Tensor&>(*this), mask);
break;
default:
AT_ERROR("sparse_mask not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sparse_mask", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask);
#endif
}
inline Tensor Tensor::to_dense() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::to_dense(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("to_dense not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to_dense", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::sparse_dim() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::sparse_dim(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("sparse_dim not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sparse_dim", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::_dimI() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_dimI(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("_dimI not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_dimI", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::dense_dim() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::dense_dim(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("dense_dim not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::dense_dim", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::_dimV() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_dimV(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("_dimV not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_dimV", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::_nnz() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_nnz(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("_nnz not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_nnz", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::coalesce() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::coalesce(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("coalesce not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::coalesce", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline bool Tensor::is_coalesced() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::is_coalesced(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("is_coalesced not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_coalesced", "");
return op.callUnboxed<bool, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::_indices() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_indices(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("_indices not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_indices", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::_values() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_values(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("_values not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_values", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::_coalesced_(bool coalesced) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::_coalesced_(const_cast<Tensor&>(*this), coalesced);
break;
default:
AT_ERROR("_coalesced_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::_coalesced_", "");
return op.callUnboxed<Tensor &, Tensor &, bool>(const_cast<Tensor&>(*this), coalesced);
#endif
}
inline Tensor Tensor::indices() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::indices(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("indices not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::indices", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::values() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::SparseCPU:
return SparseCPUType::values(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("values not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::values", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::vector<Tensor> Tensor::unbind(int64_t dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unbind(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unbind", "int");
return op.callUnboxed<std::vector<Tensor>, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
#endif
}
inline std::vector<Tensor> Tensor::unbind(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::unbind(const_cast<Tensor&>(*this), dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unbind", "Dimname");
return op.callUnboxed<std::vector<Tensor>, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
#endif
}
inline Tensor Tensor::to_sparse(int64_t sparse_dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::to_sparse(const_cast<Tensor&>(*this), sparse_dim);
break;
default:
AT_ERROR("to_sparse not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to_sparse", "sparse_dim");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), sparse_dim);
#endif
}
inline Tensor Tensor::to_sparse() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::to_sparse(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("to_sparse not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to_sparse", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::to_mkldnn() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::to_mkldnn(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("to_mkldnn not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to_mkldnn", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::dequantize() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::dequantize(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("dequantize not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::dequantize", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline double Tensor::q_scale() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::q_scale(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("q_scale not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::q_scale", "");
return op.callUnboxed<double, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::q_zero_point() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::q_zero_point(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("q_zero_point not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::q_zero_point", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::q_per_channel_scales() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::q_per_channel_scales(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("q_per_channel_scales not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::q_per_channel_scales", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::q_per_channel_zero_points() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::q_per_channel_zero_points(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("q_per_channel_zero_points not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::q_per_channel_zero_points", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline int64_t Tensor::q_per_channel_axis() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::q_per_channel_axis(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("q_per_channel_axis not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::q_per_channel_axis", "");
return op.callUnboxed<int64_t, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::int_repr() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::int_repr(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("int_repr not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::int_repr", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline QScheme Tensor::qscheme() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::qscheme(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("qscheme not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::qscheme", "");
return op.callUnboxed<QScheme, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::to(const TensorOptions & options, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::to(const_cast<Tensor&>(*this), options, non_blocking, copy, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to", "dtype_layout");
return op.callUnboxed<Tensor, const Tensor &, const TensorOptions &, bool, bool, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), options, non_blocking, copy, memory_format);
#endif
}
inline Tensor Tensor::to(Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::to(const_cast<Tensor&>(*this), device, dtype, non_blocking, copy, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to", "device");
return op.callUnboxed<Tensor, const Tensor &, Device, ScalarType, bool, bool, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), device, dtype, non_blocking, copy, memory_format);
#endif
}
inline Tensor Tensor::to(ScalarType dtype, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::to(const_cast<Tensor&>(*this), dtype, non_blocking, copy, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to", "dtype");
return op.callUnboxed<Tensor, const Tensor &, ScalarType, bool, bool, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), dtype, non_blocking, copy, memory_format);
#endif
}
inline Tensor Tensor::to(const Tensor & other, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::to(const_cast<Tensor&>(*this), other, non_blocking, copy, memory_format);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::to", "other");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, bool, bool, c10::optional<MemoryFormat>>(const_cast<Tensor&>(*this), other, non_blocking, copy, memory_format);
#endif
}
inline Scalar Tensor::item() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::item(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::item", "");
return op.callUnboxed<Scalar, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::set_(Storage source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::set_(const_cast<Tensor&>(*this), source);
break;
default:
AT_ERROR("set_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_", "source_Storage");
return op.callUnboxed<Tensor &, Tensor &, Storage>(const_cast<Tensor&>(*this), source);
#endif
}
inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::set_(const_cast<Tensor&>(*this), source, storage_offset, size, stride);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::set_(const_cast<Tensor&>(*this), source, storage_offset, size, stride);
break;
default:
AT_ERROR("set_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_", "source_Storage_storage_offset");
return op.callUnboxed<Tensor &, Tensor &, Storage, int64_t, IntArrayRef, IntArrayRef>(const_cast<Tensor&>(*this), source, storage_offset, size, stride);
#endif
}
inline Tensor & Tensor::set_(const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::set_(const_cast<Tensor&>(*this), source);
break;
default:
AT_ERROR("set_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_", "source_Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), source);
#endif
}
inline Tensor & Tensor::set_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::set_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("set_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::set_quantizer_(ConstQuantizerPtr quantizer) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::QuantizedCPU:
return QuantizedCPUType::set_quantizer_(const_cast<Tensor&>(*this), quantizer);
break;
default:
AT_ERROR("set_quantizer_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::set_quantizer_", "");
return op.callUnboxed<Tensor &, Tensor &, ConstQuantizerPtr>(const_cast<Tensor&>(*this), quantizer);
#endif
}
inline bool Tensor::is_set_to(const Tensor & tensor) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::is_set_to(const_cast<Tensor&>(*this), tensor);
break;
default:
AT_ERROR("is_set_to not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::is_set_to", "");
return op.callUnboxed<bool, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), tensor);
#endif
}
inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::masked_fill_(const_cast<Tensor&>(*this), mask, value);
break;
default:
AT_ERROR("masked_fill_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_fill_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), mask, value);
#endif
}
inline Tensor Tensor::masked_fill(const Tensor & mask, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::masked_fill(const_cast<Tensor&>(*this), mask, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_fill", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), mask, value);
#endif
}
inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::masked_fill_(const_cast<Tensor&>(*this), mask, value);
break;
default:
AT_ERROR("masked_fill_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_fill_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask, value);
#endif
}
inline Tensor Tensor::masked_fill(const Tensor & mask, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::masked_fill(const_cast<Tensor&>(*this), mask, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_fill", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask, value);
#endif
}
inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::masked_scatter_(const_cast<Tensor&>(*this), mask, source);
break;
default:
AT_ERROR("masked_scatter_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_scatter_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask, source);
#endif
}
inline Tensor Tensor::masked_scatter(const Tensor & mask, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::masked_scatter(const_cast<Tensor&>(*this), mask, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_scatter", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask, source);
#endif
}
inline Tensor Tensor::view(IntArrayRef size) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::view(const_cast<Tensor&>(*this), size);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::view(const_cast<Tensor&>(*this), size);
break;
default:
AT_ERROR("view not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::view", "");
return op.callUnboxed<Tensor, const Tensor &, IntArrayRef>(const_cast<Tensor&>(*this), size);
#endif
}
inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool accumulate) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::put_(const_cast<Tensor&>(*this), index, source, accumulate);
break;
default:
AT_ERROR("put_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::put_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, bool>(const_cast<Tensor&>(*this), index, source, accumulate);
#endif
}
inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::index_add_(const_cast<Tensor&>(*this), dim, index, source);
break;
default:
AT_ERROR("index_add_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_add_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_add(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_add", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor Tensor::index_add(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_add(const_cast<Tensor&>(*this), dim, index, source);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_add", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::index_fill_(const_cast<Tensor&>(*this), dim, index, value);
break;
default:
AT_ERROR("index_fill_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill_", "int_Scalar");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill", "int_Scalar");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::index_fill_(const_cast<Tensor&>(*this), dim, index, value);
break;
default:
AT_ERROR("index_fill_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill_", "int_Tensor");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill", "int_Tensor");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor & Tensor::index_fill_(Dimname dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill_(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill_", "Dimname_Scalar");
return op.callUnboxed<Tensor &, Tensor &, Dimname, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor & Tensor::index_fill_(Dimname dim, const Tensor & index, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill_(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill_", "Dimname_Tensor");
return op.callUnboxed<Tensor &, Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::index_fill(Dimname dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill", "Dimname_Scalar");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::index_fill(Dimname dim, const Tensor & index, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_fill", "Dimname_Tensor");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::scatter_(const_cast<Tensor&>(*this), dim, index, src);
break;
default:
AT_ERROR("scatter_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter_", "src");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, src);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter", "src");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::scatter_(const_cast<Tensor&>(*this), dim, index, value);
break;
default:
AT_ERROR("scatter_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter_", "value");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter", "value");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor Tensor::scatter(Dimname dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, src);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter", "dimname_src");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor Tensor::scatter(Dimname dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter", "dimname_value");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, Scalar>(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::scatter_add_(const_cast<Tensor&>(*this), dim, index, src);
break;
default:
AT_ERROR("scatter_add_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter_add_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter_add(const_cast<Tensor&>(*this), dim, index, src);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter_add", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor Tensor::scatter_add(Dimname dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::scatter_add(const_cast<Tensor&>(*this), dim, index, src);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::scatter_add", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
inline Tensor & Tensor::lt_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::lt_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lt_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::lt_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::lt_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lt_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::gt_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::gt_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gt_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::gt_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::gt_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gt_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::le_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::le_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::le_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::le_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::le_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::le_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::ge_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ge_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ge_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::ge_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ge_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ge_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::eq_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::eq_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::eq_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::eq_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::eq_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::eq_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::ne_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ne_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ne_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::ne_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::ne_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ne_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_and(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_and(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_and", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_and(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_and(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_and", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_and_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_and_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_and_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_and_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_and_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_and_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__and__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__and__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__and__", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__and__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__and__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__and__", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__iand__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__iand__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__iand__", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__iand__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__iand__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__iand__", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_or(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_or(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_or", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_or(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_or(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_or", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_or_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_or_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_or_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_or_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_or_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_or_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__or__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__or__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__or__", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__or__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__or__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__or__", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ior__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__ior__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ior__", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ior__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__ior__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ior__", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_xor(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_xor(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_xor", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::bitwise_xor(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_xor(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_xor", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_xor_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_xor_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_xor_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::bitwise_xor_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::bitwise_xor_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::bitwise_xor_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__xor__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__xor__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__xor__", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__xor__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__xor__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__xor__", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ixor__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__ixor__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ixor__", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ixor__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::__ixor__(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ixor__", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__lshift__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__lshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__lshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__lshift__", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__lshift__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__lshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__lshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__lshift__", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ilshift__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__ilshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__ilshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ilshift__", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__ilshift__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__ilshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__ilshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__ilshift__", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__rshift__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__rshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__rshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__rshift__", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::__rshift__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__rshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__rshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__rshift__", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__irshift__(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__irshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__irshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__irshift__", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::__irshift__(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::__irshift__(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("__irshift__ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::__irshift__", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::lgamma_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lgamma_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("lgamma_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lgamma_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::atan2_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::atan2_(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::atan2_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::tril_(int64_t diagonal) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::tril_(const_cast<Tensor&>(*this), diagonal);
break;
default:
AT_ERROR("tril_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tril_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), diagonal);
#endif
}
inline Tensor & Tensor::triu_(int64_t diagonal) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::triu_(const_cast<Tensor&>(*this), diagonal);
break;
default:
AT_ERROR("triu_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::triu_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), diagonal);
#endif
}
inline Tensor & Tensor::digamma_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::digamma_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::digamma_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::polygamma_(int64_t n) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::polygamma_(const_cast<Tensor&>(*this), n);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::polygamma_", "");
return op.callUnboxed<Tensor &, Tensor &, int64_t>(const_cast<Tensor&>(*this), n);
#endif
}
inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::renorm_(const_cast<Tensor&>(*this), p, dim, maxnorm);
break;
default:
AT_ERROR("renorm_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::renorm_", "");
return op.callUnboxed<Tensor &, Tensor &, Scalar, int64_t, Scalar>(const_cast<Tensor&>(*this), p, dim, maxnorm);
#endif
}
inline Tensor & Tensor::pow_(Scalar exponent) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::pow_(const_cast<Tensor&>(*this), exponent);
break;
default:
AT_ERROR("pow_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pow_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), exponent);
#endif
}
inline Tensor & Tensor::pow_(const Tensor & exponent) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::pow_(const_cast<Tensor&>(*this), exponent);
break;
default:
AT_ERROR("pow_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pow_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), exponent);
#endif
}
inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lerp_(const_cast<Tensor&>(*this), end, weight);
break;
default:
AT_ERROR("lerp_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lerp_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), end, weight);
#endif
}
inline Tensor & Tensor::lerp_(const Tensor & end, const Tensor & weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lerp_(const_cast<Tensor&>(*this), end, weight);
break;
default:
AT_ERROR("lerp_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lerp_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), end, weight);
#endif
}
inline Tensor & Tensor::fmod_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::fmod_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("fmod_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fmod_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::fmod_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::fmod_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("fmod_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fmod_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::remainder_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::remainder_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("remainder_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::remainder_", "Scalar");
return op.callUnboxed<Tensor &, Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::remainder_(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::remainder_(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("remainder_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::remainder_", "Tensor");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addbmm_(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
break;
default:
AT_ERROR("addbmm_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addbmm_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
#endif
}
inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::addbmm(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
break;
default:
AT_ERROR("addbmm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addbmm", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar>(const_cast<Tensor&>(*this), batch1, batch2, beta, alpha);
#endif
}
inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addcdiv_(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addcdiv_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#endif
}
inline Tensor & Tensor::random_(int64_t from, int64_t to, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::random_(const_cast<Tensor&>(*this), from, to, generator);
break;
default:
AT_ERROR("random_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::random_", "from");
return op.callUnboxed<Tensor &, Tensor &, int64_t, int64_t, Generator *>(const_cast<Tensor&>(*this), from, to, generator);
#endif
}
inline Tensor & Tensor::random_(int64_t to, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::random_(const_cast<Tensor&>(*this), to, generator);
break;
default:
AT_ERROR("random_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::random_", "to");
return op.callUnboxed<Tensor &, Tensor &, int64_t, Generator *>(const_cast<Tensor&>(*this), to, generator);
#endif
}
inline Tensor & Tensor::random_(Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::random_(const_cast<Tensor&>(*this), generator);
break;
default:
AT_ERROR("random_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::random_", "");
return op.callUnboxed<Tensor &, Tensor &, Generator *>(const_cast<Tensor&>(*this), generator);
#endif
}
inline Tensor & Tensor::uniform_(double from, double to, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::uniform_(const_cast<Tensor&>(*this), from, to, generator);
break;
default:
AT_ERROR("uniform_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::uniform_", "");
return op.callUnboxed<Tensor &, Tensor &, double, double, Generator *>(const_cast<Tensor&>(*this), from, to, generator);
#endif
}
inline Tensor & Tensor::cauchy_(double median, double sigma, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cauchy_(const_cast<Tensor&>(*this), median, sigma, generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cauchy_", "");
return op.callUnboxed<Tensor &, Tensor &, double, double, Generator *>(const_cast<Tensor&>(*this), median, sigma, generator);
#endif
}
inline Tensor & Tensor::log_normal_(double mean, double std, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::log_normal_(const_cast<Tensor&>(*this), mean, std, generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::log_normal_", "");
return op.callUnboxed<Tensor &, Tensor &, double, double, Generator *>(const_cast<Tensor&>(*this), mean, std, generator);
#endif
}
inline Tensor & Tensor::exponential_(double lambd, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::exponential_(const_cast<Tensor&>(*this), lambd, generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::exponential_", "");
return op.callUnboxed<Tensor &, Tensor &, double, Generator *>(const_cast<Tensor&>(*this), lambd, generator);
#endif
}
inline Tensor & Tensor::geometric_(double p, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::geometric_(const_cast<Tensor&>(*this), p, generator);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::geometric_", "");
return op.callUnboxed<Tensor &, Tensor &, double, Generator *>(const_cast<Tensor&>(*this), p, generator);
#endif
}
inline Tensor Tensor::diag(int64_t diagonal) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::diag(const_cast<Tensor&>(*this), diagonal);
break;
default:
AT_ERROR("diag not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::diag", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), diagonal);
#endif
}
inline Tensor Tensor::cross(const Tensor & other, c10::optional<int64_t> dim) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cross(const_cast<Tensor&>(*this), other, dim);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cross", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, c10::optional<int64_t>>(const_cast<Tensor&>(*this), other, dim);
#endif
}
inline Tensor Tensor::triu(int64_t diagonal) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::triu(const_cast<Tensor&>(*this), diagonal);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::triu", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), diagonal);
#endif
}
inline Tensor Tensor::tril(int64_t diagonal) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::tril(const_cast<Tensor&>(*this), diagonal);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::tril", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t>(const_cast<Tensor&>(*this), diagonal);
#endif
}
inline Tensor Tensor::trace() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::trace(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("trace not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::trace", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::ne(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ne(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::ne(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("ne not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ne", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::ne(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ne(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::ne(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("ne not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ne", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::eq(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::eq(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::eq(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("eq not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::eq", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::eq(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::eq(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::eq(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("eq not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::eq", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::ge(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ge(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::ge(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("ge not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ge", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::ge(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ge(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::ge(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("ge not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ge", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::le(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::le(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::le(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("le not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::le", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::le(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::le(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::le(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("le not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::le", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::gt(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::gt(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::gt(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("gt not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gt", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::gt(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::gt(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::gt(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("gt not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gt", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::lt(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lt(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::lt(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("lt not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lt", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::lt(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lt(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::lt(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("lt not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lt", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::take(const Tensor & index) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::take(const_cast<Tensor&>(*this), index);
break;
default:
AT_ERROR("take not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::take", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), index);
#endif
}
inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::index_select(const_cast<Tensor&>(*this), dim, index);
break;
case Backend::SparseCPU:
return SparseCPUType::index_select(const_cast<Tensor&>(*this), dim, index);
break;
default:
AT_ERROR("index_select not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_select", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &>(const_cast<Tensor&>(*this), dim, index);
#endif
}
inline Tensor Tensor::index_select(Dimname dim, const Tensor & index) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::index_select(const_cast<Tensor&>(*this), dim, index);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::index_select", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &>(const_cast<Tensor&>(*this), dim, index);
#endif
}
inline Tensor Tensor::masked_select(const Tensor & mask) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::masked_select(const_cast<Tensor&>(*this), mask);
break;
default:
AT_ERROR("masked_select not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::masked_select", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), mask);
#endif
}
inline Tensor Tensor::nonzero() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::nonzero(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("nonzero not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::nonzero", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::vector<Tensor> Tensor::nonzero_numpy() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::nonzero_numpy(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::nonzero_numpy", "");
return op.callUnboxed<std::vector<Tensor>, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::gather(const_cast<Tensor&>(*this), dim, index, sparse_grad);
break;
default:
AT_ERROR("gather not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gather", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, const Tensor &, bool>(const_cast<Tensor&>(*this), dim, index, sparse_grad);
#endif
}
inline Tensor Tensor::gather(Dimname dim, const Tensor & index, bool sparse_grad) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::gather(const_cast<Tensor&>(*this), dim, index, sparse_grad);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::gather", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, const Tensor &, bool>(const_cast<Tensor&>(*this), dim, index, sparse_grad);
#endif
}
inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addcmul(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addcmul", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#endif
}
inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addcmul_(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addcmul_", "");
return op.callUnboxed<Tensor &, Tensor &, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#endif
}
inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::addcdiv(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::addcdiv", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), tensor1, tensor2, value);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::lstsq(const Tensor & A) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lstsq(const_cast<Tensor&>(*this), A);
break;
default:
AT_ERROR("lstsq not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lstsq", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), A);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::triangular_solve(const_cast<Tensor&>(*this), A, upper, transpose, unitriangular);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::triangular_solve", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, const Tensor &, bool, bool, bool>(const_cast<Tensor&>(*this), A, upper, transpose, unitriangular);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::symeig(bool eigenvectors, bool upper) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::symeig(const_cast<Tensor&>(*this), eigenvectors, upper);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::symeig", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, bool, bool>(const_cast<Tensor&>(*this), eigenvectors, upper);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::eig(bool eigenvectors) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::eig(const_cast<Tensor&>(*this), eigenvectors);
break;
default:
AT_ERROR("eig not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::eig", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, bool>(const_cast<Tensor&>(*this), eigenvectors);
#endif
}
inline std::tuple<Tensor,Tensor,Tensor> Tensor::svd(bool some, bool compute_uv) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::svd(const_cast<Tensor&>(*this), some, compute_uv);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::svd", "");
return op.callUnboxed<std::tuple<Tensor,Tensor,Tensor>, const Tensor &, bool, bool>(const_cast<Tensor&>(*this), some, compute_uv);
#endif
}
inline Tensor Tensor::cholesky(bool upper) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cholesky(const_cast<Tensor&>(*this), upper);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cholesky", "");
return op.callUnboxed<Tensor, const Tensor &, bool>(const_cast<Tensor&>(*this), upper);
#endif
}
inline Tensor Tensor::cholesky_solve(const Tensor & input2, bool upper) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::cholesky_solve(const_cast<Tensor&>(*this), input2, upper);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cholesky_solve", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, bool>(const_cast<Tensor&>(*this), input2, upper);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::solve(const Tensor & A) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::solve(const_cast<Tensor&>(*this), A);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::solve", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), A);
#endif
}
inline Tensor Tensor::cholesky_inverse(bool upper) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::cholesky_inverse(const_cast<Tensor&>(*this), upper);
break;
default:
AT_ERROR("cholesky_inverse not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::cholesky_inverse", "");
return op.callUnboxed<Tensor, const Tensor &, bool>(const_cast<Tensor&>(*this), upper);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::qr(bool some) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::qr(const_cast<Tensor&>(*this), some);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::qr", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, bool>(const_cast<Tensor&>(*this), some);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::geqrf() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::geqrf(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("geqrf not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::geqrf", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::orgqr(const Tensor & input2) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::orgqr(const_cast<Tensor&>(*this), input2);
break;
default:
AT_ERROR("orgqr not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::orgqr", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), input2);
#endif
}
inline Tensor Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::ormqr(const_cast<Tensor&>(*this), input2, input3, left, transpose);
break;
default:
AT_ERROR("ormqr not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::ormqr", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &, bool, bool>(const_cast<Tensor&>(*this), input2, input3, left, transpose);
#endif
}
inline Tensor Tensor::lu_solve(const Tensor & LU_data, const Tensor & LU_pivots) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::lu_solve(const_cast<Tensor&>(*this), LU_data, LU_pivots);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lu_solve", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), LU_data, LU_pivots);
#endif
}
inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::multinomial(const_cast<Tensor&>(*this), num_samples, replacement, generator);
break;
default:
AT_ERROR("multinomial not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::multinomial", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool, Generator *>(const_cast<Tensor&>(*this), num_samples, replacement, generator);
#endif
}
inline Tensor Tensor::lgamma() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lgamma(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("lgamma not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lgamma", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::digamma() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::digamma(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::digamma", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::polygamma(int64_t n) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::polygamma(n, const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::polygamma", "");
return op.callUnboxed<Tensor, int64_t, const Tensor &>(n, const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::erfinv() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::erfinv(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("erfinv not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erfinv", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::erfinv_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::erfinv_(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("erfinv_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::erfinv_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::sign() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sign(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sign", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor & Tensor::sign_() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sign_(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sign_", "");
return op.callUnboxed<Tensor &, Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::dist(const Tensor & other, Scalar p) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::dist(const_cast<Tensor&>(*this), other, p);
break;
default:
AT_ERROR("dist not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::dist", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other, p);
#endif
}
inline Tensor Tensor::atan2(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::atan2(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::atan2", "");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lerp(const_cast<Tensor&>(*this), end, weight);
break;
default:
AT_ERROR("lerp not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lerp", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, Scalar>(const_cast<Tensor&>(*this), end, weight);
#endif
}
inline Tensor Tensor::lerp(const Tensor & end, const Tensor & weight) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::lerp(const_cast<Tensor&>(*this), end, weight);
break;
default:
AT_ERROR("lerp not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::lerp", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), end, weight);
#endif
}
inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::histc(const_cast<Tensor&>(*this), bins, min, max);
break;
default:
AT_ERROR("histc not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::histc", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, Scalar, Scalar>(const_cast<Tensor&>(*this), bins, min, max);
#endif
}
inline Tensor Tensor::fmod(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::fmod(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("fmod not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fmod", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::fmod(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::fmod(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("fmod not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::fmod", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::remainder(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::remainder(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("remainder not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::remainder", "Scalar");
return op.callUnboxed<Tensor, const Tensor &, Scalar>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::remainder(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::remainder(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("remainder not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::remainder", "Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::min(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::min(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min", "other");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::min() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::min(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::min(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("min not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::min", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::max(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::max(const_cast<Tensor&>(*this), other);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max", "other");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::max() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::max(const_cast<Tensor&>(*this));
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::max(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("max not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::max", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::median() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::median(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("median not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::median", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::sort(int64_t dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::sort(const_cast<Tensor&>(*this), dim, descending);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::sort(const_cast<Tensor&>(*this), dim, descending);
break;
default:
AT_ERROR("sort not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sort", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, descending);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::sort(Dimname dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::sort(const_cast<Tensor&>(*this), dim, descending);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::sort", "dimname");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, descending);
#endif
}
inline Tensor Tensor::argsort(int64_t dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::argsort(const_cast<Tensor&>(*this), dim, descending);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::argsort", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, bool>(const_cast<Tensor&>(*this), dim, descending);
#endif
}
inline Tensor Tensor::argsort(Dimname dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::argsort(const_cast<Tensor&>(*this), dim, descending);
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::argsort", "dimname");
return op.callUnboxed<Tensor, const Tensor &, Dimname, bool>(const_cast<Tensor&>(*this), dim, descending);
#endif
}
inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::topk(const_cast<Tensor&>(*this), k, dim, largest, sorted);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::topk(const_cast<Tensor&>(*this), k, dim, largest, sorted);
break;
default:
AT_ERROR("topk not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::topk", "");
return op.callUnboxed<std::tuple<Tensor,Tensor>, const Tensor &, int64_t, int64_t, bool, bool>(const_cast<Tensor&>(*this), k, dim, largest, sorted);
#endif
}
inline Tensor Tensor::all() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::all(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::all", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::any() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::any(const_cast<Tensor&>(*this));
break;
case Backend::SparseCPU:
return SparseCPUType::any(const_cast<Tensor&>(*this));
break;
default:
AT_ERROR("any not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::any", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::renorm(const_cast<Tensor&>(*this), p, dim, maxnorm);
break;
default:
AT_ERROR("renorm not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::renorm", "");
return op.callUnboxed<Tensor, const Tensor &, Scalar, int64_t, Scalar>(const_cast<Tensor&>(*this), p, dim, maxnorm);
#endif
}
inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::unfold(const_cast<Tensor&>(*this), dimension, size, step);
break;
default:
AT_ERROR("unfold not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::unfold", "");
return op.callUnboxed<Tensor, const Tensor &, int64_t, int64_t, int64_t>(const_cast<Tensor&>(*this), dimension, size, step);
#endif
}
inline bool Tensor::equal(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::equal(const_cast<Tensor&>(*this), other);
break;
case Backend::QuantizedCPU:
return QuantizedCPUType::equal(const_cast<Tensor&>(*this), other);
break;
default:
AT_ERROR("equal not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::equal", "");
return op.callUnboxed<bool, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), other);
#endif
}
inline Tensor Tensor::pow(const Tensor & exponent) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::pow(const_cast<Tensor&>(*this), exponent);
break;
default:
AT_ERROR("pow not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::pow", "Tensor_Tensor");
return op.callUnboxed<Tensor, const Tensor &, const Tensor &>(const_cast<Tensor&>(*this), exponent);
#endif
}
inline Tensor & Tensor::normal_(double mean, double std, Generator * generator) const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
switch(dispatchKeyToBackend(c10::impl::dispatchTypeId(key_set(), c10::DispatchKeySet(c10::DispatchKeySet::FULL)))) {
case Backend::CPU:
return CPUType::normal_(const_cast<Tensor&>(*this), mean, std, generator);
break;
default:
AT_ERROR("normal_ not implemented for ", at::toString(key_set()));
}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::normal_", "");
return op.callUnboxed<Tensor &, Tensor &, double, double, Generator *>(const_cast<Tensor&>(*this), mean, std, generator);
#endif
}
inline Tensor Tensor::alias() const {
#ifdef USE_STATIC_DISPATCH
at::AutoNonVariableTypeMode _var_guard(true);
return TypeDefault::alias(const_cast<Tensor&>(*this));
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::alias", "");
return op.callUnboxed<Tensor, const Tensor &>(const_cast<Tensor&>(*this));
#endif
}
inline caffe2::TypeMeta Tensor::dtype() const noexcept {
return impl_->dtype();
}
inline Layout Tensor::layout() const noexcept {
return impl_->layout();
}
inline Device Tensor::device() const {
return impl_->device();
}
inline int64_t Tensor::get_device() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->get_device();
}
inline int64_t get_device(Tensor self) {
return self.get_device();
}
inline bool Tensor::is_cuda() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->is_cuda();
}
inline NamedTensorMeta* Tensor::get_named_tensor_meta() {
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
}
inline const NamedTensorMeta* Tensor::get_named_tensor_meta() const {
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
}
inline bool Tensor::has_names() const {
// If a user is using unnamed tensors, then we can short-circuit right here.
// Otherwise, impl::has_names attempts to retrieve names.
if (!impl_->has_named_tensor_meta()) {
return false;
}
return impl::has_names(unsafeGetTensorImpl());
}
inline bool is_cuda(Tensor self) {
return self.is_cuda();
}
inline bool Tensor::is_hip() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->is_hip();
}
inline bool is_hip(Tensor self) {
return self.is_hip();
}
inline bool Tensor::is_sparse() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->is_sparse();
}
inline bool is_sparse(Tensor self) {
return self.is_sparse();
}
inline bool Tensor::is_mkldnn() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->is_mkldnn();
}
inline bool is_mkldnn(Tensor self) {
return self.is_mkldnn();
}
inline bool Tensor::is_quantized() const {
// NB: this is not a native function to avoid dispatching overhead.
return impl_->is_quantized();
}
inline bool is_quantized(Tensor self) {
return self.is_quantized();
}
#define DEFINE_CAST(T, name) \
template <> \
inline T* Tensor::data_ptr() const { \
TORCH_CHECK( \
scalar_type() == ScalarType::name, \
"expected scalar type ", \
#name, \
" but found ", \
c10::toString(scalar_type())); \
return static_cast<T*>(this->unsafeGetTensorImpl()->data()); \
}
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(DEFINE_CAST)
AT_FORALL_QINT_TYPES(DEFINE_CAST)
#undef DEFINE_CAST
#define DEFINE_ITEM(T, name) \
template <> \
inline T Tensor::item() const { \
return item().to##name(); \
}
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(DEFINE_ITEM)
#undef DEFINE_ITEM
// Gradient Node and Edges
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename T>
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
// Return the grad argument in case of a hook with void return type to have an
// std::function with Tensor return type
std::function<void(Tensor)> fn(hook);
return _register_hook([fn](const Tensor& grad) {
fn(grad);
return Tensor();
});
}
template <typename T>
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> {
return _register_hook(hook);
}
} //namespace at
| [
"somashekhar@submit2.chtc.wisc.edu"
] | somashekhar@submit2.chtc.wisc.edu |
3ec0931a3983e352576a86997932857a7faa6624 | b21d7b3a901f751aa9f500dc5b310fc273f869f3 | /Nexus/Source/Nexus/Graphics/GLMessage.cpp | b7d0f2ecfff7c25209158c93e8b84bcd634fc550 | [] | no_license | jkstpierre/Nexus | 0fc8d5af7c622e6ee7e7a526ad917bfbe290245f | 9fc38141ebfb2a284fb9168d4acda2115539454d | refs/heads/master | 2021-05-18T06:05:35.225543 | 2020-04-23T21:50:04 | 2020-04-23T21:50:04 | 251,149,197 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,197 | cpp | /// File: Source\Nexus\Graphics\GLMessage.cpp.
///
/// Summary: Implements the gl message class.
#include <cstring>
#include <Nexus\Graphics\GLMessage.hpp>
namespace Nexus::Graphics
{
GLMessage::GLMessage(const GLMessageSource& source, const GLMessageType& type, const GLMessageSeverity& severity,
const unsigned int& glID, const char* message, const size_t& messageLength) noexcept :
mSource(source), mType(type), mSeverity(severity), mMessage(message, messageLength), GLObject(glID)
{
}
bool GLMessage::operator==(const GLMessage& msg) const noexcept
{
return (mSource == msg.GetSource()) && (mType == msg.GetType()) && (mSeverity == msg.GetSeverity()) && (mGLID == msg.GetGLID());
}
bool GLMessage::operator!=(const GLMessage& msg) const noexcept
{
return !(*this == msg);
}
const GLMessageSource& GLMessage::GetSource() const noexcept
{
return mSource;
}
const GLMessageType& GLMessage::GetType() const noexcept
{
return mType;
}
const GLMessageSeverity& GLMessage::GetSeverity() const noexcept
{
return mSeverity;
}
const char* GLMessage::ReadMessage() const noexcept
{
return mMessage.c_str();
}
}
// End of Source\Nexus\Graphics\GLMessage.cpp
| [
"jkstpierre@wpi.edu"
] | jkstpierre@wpi.edu |
c418f39f1f85bc15613ae1d74bb3cad62469daed | 72852e07bb30adbee608275d6048b2121a5b9d82 | /algorithms/problem_1320/other2.cpp | 32fa01f5ddb28379091e56f9e90b9bdc47caf69e | [] | no_license | drlongle/leetcode | e172ae29ea63911ccc3afb815f6dbff041609939 | 8e61ddf06fb3a4fb4a4e3d8466f3367ee1f27e13 | refs/heads/master | 2023-01-08T16:26:12.370098 | 2023-01-03T09:08:24 | 2023-01-03T09:08:24 | 81,335,609 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,361 | cpp | /*
In our dynamic programming, dp[a] means that,
if our left finger ends at character a,
the maximum we can save is dp[a].
Now our right finger tapped all letters, and left finger did nothing.
We iterate through the whole string one by one
and select some letter to tap with the left finger.
By doing this, we want to find out the maximum distance that we can save from the tapping with one finger.
Assume that our left finger is at a now,
our right finger is at b,
and we the right finger will tap c next.
Instead of moving right finger from b to c with distance d(b, c),
we try moving left finger from a to c with distance d(a, c).
Hopely this will save d(b, c) - d(a, c).
And finaly, we have one fingers at b and the other at c now.
The finger at b will be new left finger, and the other will be the right.
*/
class Solution {
public:
int minimumDistance(string word) {
vector<int> dp(26);
int res = 0, save = 0, n = word.size();
for (int i = 0; i < n - 1; ++i) {
int b = word[i] - 'A', c = word[i + 1] - 'A';
for (int a = 0; a < 26; ++a)
dp[b] = max(dp[b], dp[a] + d(b, c) - d(a, c));
save = max(save, dp[b]);
res += d(b, c);
}
return res - save;
}
int d(int a, int b) {
return abs(a / 6 - b / 6) + abs(a % 6 - b % 6);
}
};
| [
"drlongle@gmail.com"
] | drlongle@gmail.com |
aeed529d373d38b1067ac18d73ec77e35a7f575b | 326ba98eabaa3a05ceb5e314dd1cc8de301fa19a | /ffscript/StaticContext.h | 860d9f91fed9ee0021adf635df4cade970142aad | [
"MIT"
] | permissive | heruix/ffscript | 4823e9afd3ce8eba104ec65d6e0ff2fbba74d8d5 | 1ee86fdaae8e188e4c99ab4cb0c878be919d7d47 | refs/heads/master | 2020-08-01T12:23:26.006286 | 2019-09-24T10:15:27 | 2019-09-24T10:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,164 | h | /******************************************************************
* File: StaticContext.h
* Description: declare StaticContext class. A type of context, but
* the memory of its is global shared memory and can be
* accessed any where, any time in the program.
* Author: Vincent Pham
*
* Copyright (c) 2018 VincentPT.
** Distributed under the MIT License (http://opensource.org/licenses/MIT)
**
*
**********************************************************************/
#pragma once
#include "Context.h"
#include <list>
#include <memory>
namespace ffscript {
class StaticContext :
public Context
{
protected:
std::list<CommandPointer> _globalCommands;
std::list<CommandPointer> _destructorCommands;
void runCommands(const std::list<CommandPointer>& commands);
public:
StaticContext(unsigned char* threadData, int bufferSize);
StaticContext(int bufferSize);
virtual ~StaticContext();
void addCommand(CommandPointer command);
void addDestructorCommand(CommandPointer command);
virtual void run();
virtual void runDestructorCommands();
};
typedef std::shared_ptr<StaticContext> StaticContextRef;
} | [
"minhpta@outlook.com"
] | minhpta@outlook.com |
ed5e039effcb86da8179069736626b794ce028c5 | a9b03f4730534da6e25e8a52ea0cd870db7c28b4 | /modules/perception/lib/test/lib_registerer_test.cc | 717da1f30cff1152f2b4d2bd28dfec849b2e77ac | [
"Apache-2.0"
] | permissive | zhuangli1987/apollo | 6bcf2ddebb23377e701ec9bf2b56c1ea4632bab9 | 2210bef1ef5aadfeccb48949c07b31de4e581b25 | refs/heads/master | 2020-03-28T00:31:50.529228 | 2018-09-25T01:29:51 | 2018-09-25T01:31:39 | 147,425,981 | 0 | 0 | null | 2018-09-04T22:11:18 | 2018-09-04T22:11:18 | null | UTF-8 | C++ | false | false | 2,491 | cc | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include "modules/perception/lib/registerer/registerer.h"
namespace apollo {
namespace perception {
namespace lib {
class BaseClass {
public:
BaseClass() = default;
~BaseClass() = default;
virtual std::string Name() const { return "BaseClass1"; }
};
PERCEPTION_REGISTER_REGISTERER(BaseClass);
#define PERCEPTION_REGISTER_TEST(name) \
PERCEPTION_REGISTER_CLASS(BaseClass, name)
class DerivedClass1 : BaseClass {
public:
DerivedClass1() = default;
~DerivedClass1() = default;
virtual std::string Name() const { return "DerivedClass1"; }
};
PERCEPTION_REGISTER_TEST(DerivedClass1);
TEST(RegistererTest, Test) {
BaseClass* ptr = nullptr;
ptr = BaseClassRegisterer::GetInstanceByName("DerivedClass1");
ASSERT_TRUE(ptr != nullptr);
EXPECT_EQ(ptr->Name(), "DerivedClass1");
ptr = BaseClassRegisterer::GetInstanceByName("NotExists");
ASSERT_TRUE(ptr == nullptr);
EXPECT_TRUE(BaseClassRegisterer::IsValid("DerivedClass1"));
EXPECT_FALSE(BaseClassRegisterer::IsValid("NotExists"));
EXPECT_EQ(BaseClassRegisterer::GetUniqInstanceName(), "DerivedClass1");
BaseClass* ptr1 = BaseClassRegisterer::GetUniqInstance();
EXPECT_FALSE(ptr1 == nullptr);
std::vector<std::string> derived_classes;
EXPECT_TRUE(GetRegisteredClasses("BaseClass", &derived_classes));
EXPECT_FALSE(GetRegisteredClasses("BaseClass2", &derived_classes));
EXPECT_EQ(derived_classes.size(), 1u);
EXPECT_EQ(derived_classes[0], "DerivedClass1");
ObjectFactoryDerivedClass1 obj_factory_drived1;
obj_factory_drived1.NewInstance();
Any any;
EXPECT_EQ(any.content_, nullptr);
}
} // namespace lib
} // namespace perception
} // namespace apollo
| [
"ycool@users.noreply.github.com"
] | ycool@users.noreply.github.com |
5bc23f7119e94f001780f044911d776db2633250 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoMTD/DetLayers/src/MTDDiskSectorBuilderFromDet.cc | da8bc0caadb5fbf31fc1d8f3922017c7ae73107f | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | C++ | false | false | 3,824 | cc | //#define EDM_ML_DEBUG
#include "MTDDiskSectorBuilderFromDet.h"
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include "DataFormats/GeometryVector/interface/VectorUtil.h"
#include "DataFormats/GeometrySurface/interface/BoundingBox.h"
#include <iomanip>
using namespace std;
namespace {
pair<DiskSectorBounds*, GlobalVector> computeBounds(const vector<const GeomDet*>& dets) {
// go over all corners and compute maximum deviations
float rmin(dets.front()->surface().position().perp());
float rmax(rmin);
float zmin(dets.front()->surface().position().z());
float zmax(zmin);
float phimin(dets.front()->surface().position().phi());
float phimax(phimin);
for (auto const& idet : dets) {
vector<GlobalPoint> corners = BoundingBox().corners(idet->specificSurface());
for (auto const& i : corners) {
float r = i.perp();
float z = i.z();
float phi = i.phi();
rmin = min(rmin, r);
rmax = max(rmax, r);
zmin = min(zmin, z);
zmax = max(zmax, z);
if (Geom::phiLess(phi, phimin))
phimin = phi;
if (Geom::phiLess(phimax, phi))
phimax = phi;
}
}
if (!Geom::phiLess(phimin, phimax))
edm::LogError("MTDDetLayers") << " MTDDiskSectorBuilderFromDet : "
<< "Something went wrong with Phi Sorting !";
float zPos = (zmax + zmin) / 2.;
float phiWin = phimax - phimin;
float phiPos = (phimax + phimin) / 2.;
float rmed = (rmin + rmax) / 2.;
if (phiWin < 0.) {
if ((phimin < Geom::pi() / 2.) || (phimax > -Geom::pi() / 2.)) {
edm::LogError("MTDDetLayers") << " something strange going on, please check " << phimin << " " << phimax << " "
<< phiWin;
}
phiWin += 2. * Geom::pi();
phiPos += Geom::pi();
}
GlobalVector pos(rmed * cos(phiPos), rmed * sin(phiPos), zPos);
LogTrace("MTDDetLayers") << "MTDDiskSectorBuilderFromDet::computeBounds sector at: " << std::fixed << pos << "\n"
<< "zmin : " << std::setw(14) << zmin << "\n"
<< "zmax : " << std::setw(14) << zmax << "\n"
<< "rmin : " << std::setw(14) << rmin << "\n"
<< "rmax : " << std::setw(14) << rmax << "\n"
<< "phi ref : " << std::setw(14) << phiPos << "\n"
<< "phi win : " << std::setw(14) << phiWin;
return make_pair(new DiskSectorBounds(rmin, rmax, zmin - zPos, zmax - zPos, phiWin), pos);
}
Surface::RotationType computeRotation(const vector<const GeomDet*>& dets, const Surface::PositionType pos) {
GlobalVector yAxis = (GlobalVector(pos.x(), pos.y(), 0.)).unit();
GlobalVector zAxis(0., 0., 1.);
GlobalVector xAxis = yAxis.cross(zAxis);
return Surface::RotationType(xAxis, yAxis);
}
} // namespace
BoundDiskSector* MTDDiskSectorBuilderFromDet::operator()(const vector<const GeomDet*>& dets) const {
// check that the dets are all at about the same z
float zcheck = dets.front()->surface().position().z();
constexpr double tol(0.5); // minimal safety check on z position of modules within a sector, width ~ 10 mm
for (auto const& idet : dets) {
float zdiff = zcheck - (*idet).surface().position().z();
if (std::abs(zdiff) > tol) {
edm::LogError("MTDDetLayers")
<< " MTDDiskSectorBuilderFromDet: Trying to build sector from Dets at different z positions !! Delta_z = "
<< zdiff;
}
}
auto bo = computeBounds(dets);
Surface::PositionType pos(bo.second.x(), bo.second.y(), bo.second.z());
Surface::RotationType rot = computeRotation(dets, pos);
return new BoundDiskSector(pos, rot, bo.first);
}
| [
"fabio.cossutti@ts.infn.it"
] | fabio.cossutti@ts.infn.it |
e41caa7f92cb18e9c4c002f838bc8ebf6e24e10d | 3edc478db837a27dbf8df7eded45df909dfe3cf9 | /CodeForces/455a.cpp | 93ea09ddcf467464c856ca79758f508c8c1caeca | [] | no_license | ronistone/Maratonas | b60ebeb9e7e9298399652df88faa83389bd94542 | 8bd0bedd476645081a09b19152a007ca1497fe20 | refs/heads/master | 2021-01-12T10:06:04.016208 | 2018-10-19T02:40:54 | 2018-10-19T02:40:54 | 76,360,276 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 514 | cpp | #include <bits/stdc++.h>
using namespace std;
typedef long long int ll;
int n;
ll dp[100010];
ll A[100010];
ll solve(int current){
if(current < 0) return 0;
if(dp[current]!=-1) return dp[current];
return dp[current] = max(solve(current-1),solve(current-2)+A[current]);
}
main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int aux,maior=-1;
cin >> n;
//A[0] = -3;
for(int i=0;i<n;i++){
cin >> aux;
A[aux]+=aux;
maior = max(maior,aux);
}
memset(dp,-1,sizeof dp);
cout << solve(maior) << endl;
}
| [
"ronistonejunior@gmail.com"
] | ronistonejunior@gmail.com |
dc7c28353660e12f28d792e09d754839a3a6f30b | 62e1b92bdca09eb6e61bf5463ec4952c76b46f35 | /src/rpcmining.cpp | 5c3b0b9ef178f1f623d5dac51479eb62d7754b30 | [
"MIT"
] | permissive | zealdeal/zeal | 9406e63b73cb0460581bd07c168bfca8d83ea23a | 9e2049dc506d97d9d184826c8364206b0360f960 | refs/heads/master | 2020-04-02T15:17:25.854093 | 2018-10-29T22:44:27 | 2018-10-29T22:44:27 | 154,561,705 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 25,341 | cpp | // Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "rpcserver.h"
#include "chainparams.h"
#include "main.h"
#include "db.h"
#include "txdb.h"
#include "init.h"
#include "miner.h"
#include "kernel.h"
#include <boost/assign/list_of.hpp>
using namespace json_spirit;
using namespace std;
using namespace boost::assign;
// Key used by getwork/getblocktemplate miners.
// Allocated in InitRPCMining, free'd in ShutdownRPCMining
static CReserveKey* pMiningKey = NULL;
void InitRPCMining()
{
if (!pwalletMain)
return;
// getwork/getblocktemplate mining rewards paid here:
pMiningKey = new CReserveKey(pwalletMain);
}
void ShutdownRPCMining()
{
if (!pMiningKey)
return;
delete pMiningKey; pMiningKey = NULL;
}
Value getsubsidy(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"getsubsidy [nTarget]\n"
"Returns proof-of-work subsidy value for the specified value of target.");
return (uint64_t)GetProofOfWorkReward(0);
}
Value getstakesubsidy(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"getstakesubsidy <hex string>\n"
"Returns proof-of-stake subsidy value for the specified coinstake.");
RPCTypeCheck(params, list_of(str_type));
vector<unsigned char> txData(ParseHex(params[0].get_str()));
CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION);
CTransaction tx;
try {
ssData >> tx;
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
}
uint64_t nCoinAge;
CTxDB txdb("r");
if (!tx.GetCoinAge(txdb, pindexBest, nCoinAge))
throw JSONRPCError(RPC_MISC_ERROR, "GetCoinAge failed");
return (uint64_t)GetProofOfStakeReward(pindexBest, nCoinAge, 0);
}
Value getmininginfo(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 0)
throw runtime_error(
"getmininginfo\n"
"Returns an object containing mining-related information.");
uint64_t nWeight = 0;
if (pwalletMain)
nWeight = pwalletMain->GetStakeWeight();
Object obj, diff, weight;
obj.push_back(Pair("blocks", (int)nBestHeight));
obj.push_back(Pair("currentblocksize",(uint64_t)nLastBlockSize));
obj.push_back(Pair("currentblocktx",(uint64_t)nLastBlockTx));
diff.push_back(Pair("proof-of-work", GetDifficulty()));
diff.push_back(Pair("proof-of-stake", GetDifficulty(GetLastBlockIndex(pindexBest, true))));
diff.push_back(Pair("search-interval", (int)nLastCoinStakeSearchInterval));
obj.push_back(Pair("difficulty", diff));
obj.push_back(Pair("blockvalue", (uint64_t)GetProofOfWorkReward(0)));
obj.push_back(Pair("netmhashps", GetPoWMHashPS()));
obj.push_back(Pair("netstakeweight", GetPoSKernelPS()));
obj.push_back(Pair("errors", GetWarnings("statusbar")));
obj.push_back(Pair("pooledtx", (uint64_t)mempool.size()));
weight.push_back(Pair("minimum", (uint64_t)nWeight));
weight.push_back(Pair("maximum", (uint64_t)0));
weight.push_back(Pair("combined", (uint64_t)nWeight));
obj.push_back(Pair("stakeweight", weight));
obj.push_back(Pair("stakeinterest", (uint64_t)COIN_YEAR_REWARD));
obj.push_back(Pair("testnet", TestNet()));
return obj;
}
Value getstakinginfo(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 0)
throw runtime_error(
"getstakinginfo\n"
"Returns an object containing staking-related information.");
uint64_t nWeight = 0;
if (pwalletMain)
nWeight = pwalletMain->GetStakeWeight();
uint64_t nNetworkWeight = GetPoSKernelPS();
bool staking = nLastCoinStakeSearchInterval && nWeight;
uint64_t nExpectedTime = staking ? (GetTargetSpacing(nBestHeight) * nNetworkWeight / nWeight) : 0;
Object obj;
obj.push_back(Pair("enabled", GetBoolArg("-staking", true)));
obj.push_back(Pair("staking", staking));
obj.push_back(Pair("errors", GetWarnings("statusbar")));
obj.push_back(Pair("currentblocksize", (uint64_t)nLastBlockSize));
obj.push_back(Pair("currentblocktx", (uint64_t)nLastBlockTx));
obj.push_back(Pair("pooledtx", (uint64_t)mempool.size()));
obj.push_back(Pair("difficulty", GetDifficulty(GetLastBlockIndex(pindexBest, true))));
obj.push_back(Pair("search-interval", (int)nLastCoinStakeSearchInterval));
obj.push_back(Pair("weight", (uint64_t)nWeight));
obj.push_back(Pair("netstakeweight", (uint64_t)nNetworkWeight));
obj.push_back(Pair("expectedtime", nExpectedTime));
return obj;
}
Value checkkernel(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
"checkkernel [{\"txid\":txid,\"vout\":n},...] [createblocktemplate=false]\n"
"Check if one of given inputs is a kernel input at the moment.\n"
);
RPCTypeCheck(params, list_of(array_type)(bool_type));
Array inputs = params[0].get_array();
bool fCreateBlockTemplate = params.size() > 1 ? params[1].get_bool() : false;
if (vNodes.empty())
throw JSONRPCError(-9, "Zeal is not connected!");
if (IsInitialBlockDownload())
throw JSONRPCError(-10, "Zeal is downloading blocks...");
COutPoint kernel;
CBlockIndex* pindexPrev = pindexBest;
unsigned int nBits = GetNextTargetRequired(pindexPrev, true);
int64_t nTime = GetAdjustedTime();
nTime &= ~STAKE_TIMESTAMP_MASK;
BOOST_FOREACH(Value& input, inputs)
{
const Object& o = input.get_obj();
const Value& txid_v = find_value(o, "txid");
if (txid_v.type() != str_type)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing txid key");
string txid = txid_v.get_str();
if (!IsHex(txid))
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, expected hex txid");
const Value& vout_v = find_value(o, "vout");
if (vout_v.type() != int_type)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing vout key");
int nOutput = vout_v.get_int();
if (nOutput < 0)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, vout must be positive");
COutPoint cInput(uint256(txid), nOutput);
if (CheckKernel(pindexPrev, nBits, nTime, cInput))
{
kernel = cInput;
break;
}
}
Object result;
result.push_back(Pair("found", !kernel.IsNull()));
if (kernel.IsNull())
return result;
Object oKernel;
oKernel.push_back(Pair("txid", kernel.hash.GetHex()));
oKernel.push_back(Pair("vout", (int64_t)kernel.n));
oKernel.push_back(Pair("time", nTime));
result.push_back(Pair("kernel", oKernel));
if (!fCreateBlockTemplate)
return result;
int64_t nFees;
auto_ptr<CBlock> pblock(CreateNewBlock(*pMiningKey, true, &nFees));
pblock->nTime = pblock->vtx[0].nTime = nTime;
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
ss << *pblock;
result.push_back(Pair("blocktemplate", HexStr(ss.begin(), ss.end())));
result.push_back(Pair("blocktemplatefees", nFees));
CPubKey pubkey;
if (!pMiningKey->GetReservedKey(pubkey))
throw JSONRPCError(RPC_MISC_ERROR, "GetReservedKey failed");
result.push_back(Pair("blocktemplatesignkey", HexStr(pubkey)));
return result;
}
Value getworkex(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 2)
throw runtime_error(
"getworkex [data, coinbase]\n"
"If [data, coinbase] is not specified, returns extended work data.\n"
);
if (vNodes.empty())
throw JSONRPCError(-9, "Zeal is not connected!");
if (IsInitialBlockDownload())
throw JSONRPCError(-10, "Zeal is downloading blocks...");
if (pindexBest->nHeight >= Params().LastPOWBlock())
throw JSONRPCError(RPC_MISC_ERROR, "No more PoW blocks");
typedef map<uint256, pair<CBlock*, CScript> > mapNewBlock_t;
static mapNewBlock_t mapNewBlock;
static vector<CBlock*> vNewBlock;
if (params.size() == 0)
{
// Update block
static unsigned int nTransactionsUpdatedLast;
static CBlockIndex* pindexPrev;
static int64_t nStart;
static CBlock* pblock;
if (pindexPrev != pindexBest ||
(mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 60))
{
if (pindexPrev != pindexBest)
{
// Deallocate old blocks since they're obsolete now
mapNewBlock.clear();
BOOST_FOREACH(CBlock* pblock, vNewBlock)
delete pblock;
vNewBlock.clear();
}
nTransactionsUpdatedLast = mempool.GetTransactionsUpdated();
pindexPrev = pindexBest;
nStart = GetTime();
// Create new block
pblock = CreateNewBlock(*pMiningKey);
if (!pblock)
throw JSONRPCError(-7, "Out of memory");
vNewBlock.push_back(pblock);
}
// Update nTime
pblock->nTime = max(pindexPrev->GetPastTimeLimit()+1, GetAdjustedTime());
pblock->nNonce = 0;
// Update nExtraNonce
static unsigned int nExtraNonce = 0;
IncrementExtraNonce(pblock, pindexPrev, nExtraNonce);
// Save
mapNewBlock[pblock->hashMerkleRoot] = make_pair(pblock, pblock->vtx[0].vin[0].scriptSig);
// Prebuild hash buffers
char pmidstate[32];
char pdata[128];
char phash1[64];
FormatHashBuffers(pblock, pmidstate, pdata, phash1);
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
CTransaction coinbaseTx = pblock->vtx[0];
std::vector<uint256> merkle = pblock->GetMerkleBranch(0);
Object result;
result.push_back(Pair("data", HexStr(BEGIN(pdata), END(pdata))));
result.push_back(Pair("target", HexStr(BEGIN(hashTarget), END(hashTarget))));
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << coinbaseTx;
result.push_back(Pair("coinbase", HexStr(ssTx.begin(), ssTx.end())));
Array merkle_arr;
BOOST_FOREACH(uint256 merkleh, merkle) {
merkle_arr.push_back(HexStr(BEGIN(merkleh), END(merkleh)));
}
result.push_back(Pair("merkle", merkle_arr));
return result;
}
else
{
// Parse parameters
vector<unsigned char> vchData = ParseHex(params[0].get_str());
vector<unsigned char> coinbase;
if(params.size() == 2)
coinbase = ParseHex(params[1].get_str());
if (vchData.size() != 128)
throw JSONRPCError(-8, "Invalid parameter");
CBlock* pdata = (CBlock*)&vchData[0];
// Byte reverse
for (int i = 0; i < 128/4; i++)
((unsigned int*)pdata)[i] = ByteReverse(((unsigned int*)pdata)[i]);
// Get saved block
if (!mapNewBlock.count(pdata->hashMerkleRoot))
return false;
CBlock* pblock = mapNewBlock[pdata->hashMerkleRoot].first;
pblock->nTime = pdata->nTime;
pblock->nNonce = pdata->nNonce;
if(coinbase.size() == 0)
pblock->vtx[0].vin[0].scriptSig = mapNewBlock[pdata->hashMerkleRoot].second;
else
CDataStream(coinbase, SER_NETWORK, PROTOCOL_VERSION) >> pblock->vtx[0]; // FIXME - HACK!
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
assert(pwalletMain != NULL);
return CheckWork(pblock, *pwalletMain, *pMiningKey);
}
}
Value getwork(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"getwork [data]\n"
"If [data] is not specified, returns formatted hash data to work on:\n"
" \"midstate\" : precomputed hash state after hashing the first half of the data (DEPRECATED)\n" // deprecated
" \"data\" : block data\n"
" \"hash1\" : formatted hash buffer for second hash (DEPRECATED)\n" // deprecated
" \"target\" : little endian hash target\n"
"If [data] is specified, tries to solve the block and returns true if it was successful.");
if (vNodes.empty())
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Zeal is not connected!");
if (IsInitialBlockDownload())
throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Zeal is downloading blocks...");
if (pindexBest->nHeight >= Params().LastPOWBlock())
throw JSONRPCError(RPC_MISC_ERROR, "No more PoW blocks");
typedef map<uint256, pair<CBlock*, CScript> > mapNewBlock_t;
static mapNewBlock_t mapNewBlock; // FIXME: thread safety
static vector<CBlock*> vNewBlock;
if (params.size() == 0)
{
// Update block
static unsigned int nTransactionsUpdatedLast;
static CBlockIndex* pindexPrev;
static int64_t nStart;
static CBlock* pblock;
if (pindexPrev != pindexBest ||
(mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 60))
{
if (pindexPrev != pindexBest)
{
// Deallocate old blocks since they're obsolete now
mapNewBlock.clear();
BOOST_FOREACH(CBlock* pblock, vNewBlock)
delete pblock;
vNewBlock.clear();
}
// Clear pindexPrev so future getworks make a new block, despite any failures from here on
pindexPrev = NULL;
// Store the pindexBest used before CreateNewBlock, to avoid races
nTransactionsUpdatedLast = mempool.GetTransactionsUpdated();
CBlockIndex* pindexPrevNew = pindexBest;
nStart = GetTime();
// Create new block
pblock = CreateNewBlock(*pMiningKey);
if (!pblock)
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
vNewBlock.push_back(pblock);
// Need to update only after we know CreateNewBlock succeeded
pindexPrev = pindexPrevNew;
}
// Update nTime
pblock->UpdateTime(pindexPrev);
pblock->nNonce = 0;
// Update nExtraNonce
static unsigned int nExtraNonce = 0;
IncrementExtraNonce(pblock, pindexPrev, nExtraNonce);
// Save
mapNewBlock[pblock->hashMerkleRoot] = make_pair(pblock, pblock->vtx[0].vin[0].scriptSig);
// Pre-build hash buffers
char pmidstate[32];
char pdata[128];
char phash1[64];
FormatHashBuffers(pblock, pmidstate, pdata, phash1);
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
Object result;
result.push_back(Pair("midstate", HexStr(BEGIN(pmidstate), END(pmidstate)))); // deprecated
result.push_back(Pair("data", HexStr(BEGIN(pdata), END(pdata))));
result.push_back(Pair("hash1", HexStr(BEGIN(phash1), END(phash1)))); // deprecated
result.push_back(Pair("target", HexStr(BEGIN(hashTarget), END(hashTarget))));
return result;
}
else
{
// Parse parameters
vector<unsigned char> vchData = ParseHex(params[0].get_str());
if (vchData.size() != 128)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter");
CBlock* pdata = (CBlock*)&vchData[0];
// Byte reverse
for (int i = 0; i < 128/4; i++)
((unsigned int*)pdata)[i] = ByteReverse(((unsigned int*)pdata)[i]);
// Get saved block
if (!mapNewBlock.count(pdata->hashMerkleRoot))
return false;
CBlock* pblock = mapNewBlock[pdata->hashMerkleRoot].first;
pblock->nTime = pdata->nTime;
pblock->nNonce = pdata->nNonce;
pblock->vtx[0].vin[0].scriptSig = mapNewBlock[pdata->hashMerkleRoot].second;
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
assert(pwalletMain != NULL);
return CheckWork(pblock, *pwalletMain, *pMiningKey);
}
}
Value getblocktemplate(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"getblocktemplate [params]\n"
"Returns data needed to construct a block to work on:\n"
" \"version\" : block version\n"
" \"previousblockhash\" : hash of current highest block\n"
" \"transactions\" : contents of non-coinbase transactions that should be included in the next block\n"
" \"coinbaseaux\" : data that should be included in coinbase\n"
" \"coinbasevalue\" : maximum allowable input to coinbase transaction, including the generation award and transaction fees\n"
" \"target\" : hash target\n"
" \"mintime\" : minimum timestamp appropriate for next block\n"
" \"curtime\" : current timestamp\n"
" \"mutable\" : list of ways the block template may be changed\n"
" \"noncerange\" : range of valid nonces\n"
" \"sigoplimit\" : limit of sigops in blocks\n"
" \"sizelimit\" : limit of block size\n"
" \"bits\" : compressed target of next block\n"
" \"height\" : height of the next block\n"
"See https://en.bitcoin.it/wiki/BIP_0022 for full specification.");
std::string strMode = "template";
if (params.size() > 0)
{
const Object& oparam = params[0].get_obj();
const Value& modeval = find_value(oparam, "mode");
if (modeval.type() == str_type)
strMode = modeval.get_str();
else if (modeval.type() == null_type)
{
/* Do nothing */
}
else
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode");
}
if (strMode != "template")
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode");
if (vNodes.empty())
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Zeal is not connected!");
if (IsInitialBlockDownload())
throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Zeal is downloading blocks...");
if (pindexBest->nHeight >= Params().LastPOWBlock())
throw JSONRPCError(RPC_MISC_ERROR, "No more PoW blocks");
// Update block
static unsigned int nTransactionsUpdatedLast;
static CBlockIndex* pindexPrev;
static int64_t nStart;
static CBlock* pblock;
if (pindexPrev != pindexBest ||
(mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5))
{
// Clear pindexPrev so future calls make a new block, despite any failures from here on
pindexPrev = NULL;
// Store the pindexBest used before CreateNewBlock, to avoid races
nTransactionsUpdatedLast = mempool.GetTransactionsUpdated();
CBlockIndex* pindexPrevNew = pindexBest;
nStart = GetTime();
// Create new block
if(pblock)
{
delete pblock;
pblock = NULL;
}
pblock = CreateNewBlock(*pMiningKey);
if (!pblock)
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
// Need to update only after we know CreateNewBlock succeeded
pindexPrev = pindexPrevNew;
}
// Update nTime
pblock->UpdateTime(pindexPrev);
pblock->nNonce = 0;
Array transactions;
map<uint256, int64_t> setTxIndex;
int i = 0;
CTxDB txdb("r");
BOOST_FOREACH (CTransaction& tx, pblock->vtx)
{
uint256 txHash = tx.GetHash();
setTxIndex[txHash] = i++;
if (tx.IsCoinBase() || tx.IsCoinStake())
continue;
Object entry;
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << tx;
entry.push_back(Pair("data", HexStr(ssTx.begin(), ssTx.end())));
entry.push_back(Pair("hash", txHash.GetHex()));
MapPrevTx mapInputs;
map<uint256, CTxIndex> mapUnused;
bool fInvalid = false;
if (tx.FetchInputs(txdb, mapUnused, false, false, mapInputs, fInvalid))
{
entry.push_back(Pair("fee", (int64_t)(tx.GetValueIn(mapInputs) - tx.GetValueOut())));
Array deps;
BOOST_FOREACH (MapPrevTx::value_type& inp, mapInputs)
{
if (setTxIndex.count(inp.first))
deps.push_back(setTxIndex[inp.first]);
}
entry.push_back(Pair("depends", deps));
int64_t nSigOps = GetLegacySigOpCount(tx);
nSigOps += GetP2SHSigOpCount(tx, mapInputs);
entry.push_back(Pair("sigops", nSigOps));
}
transactions.push_back(entry);
}
Object aux;
aux.push_back(Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end())));
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
static Array aMutable;
if (aMutable.empty())
{
aMutable.push_back("time");
aMutable.push_back("transactions");
aMutable.push_back("prevblock");
}
Object result;
result.push_back(Pair("version", pblock->nVersion));
result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex()));
result.push_back(Pair("transactions", transactions));
result.push_back(Pair("coinbaseaux", aux));
result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0].vout[0].nValue));
result.push_back(Pair("target", hashTarget.GetHex()));
result.push_back(Pair("mintime", (int64_t)pindexPrev->GetPastTimeLimit()+1));
result.push_back(Pair("mutable", aMutable));
result.push_back(Pair("noncerange", "00000000ffffffff"));
result.push_back(Pair("sigoplimit", (int64_t)MAX_BLOCK_SIGOPS));
result.push_back(Pair("sizelimit", (int64_t)MAX_BLOCK_SIZE));
result.push_back(Pair("curtime", (int64_t)pblock->nTime));
result.push_back(Pair("bits", strprintf("%08x", pblock->nBits)));
result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1)));
return result;
}
Value submitblock(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
"submitblock <hex data> [optional-params-obj]\n"
"[optional-params-obj] parameter is currently ignored.\n"
"Attempts to submit new block to network.\n"
"See https://en.bitcoin.it/wiki/BIP_0022 for full specification.");
vector<unsigned char> blockData(ParseHex(params[0].get_str()));
CDataStream ssBlock(blockData, SER_NETWORK, PROTOCOL_VERSION);
CBlock block;
try {
ssBlock >> block;
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed");
}
if (params.size() > 1)
{
const Object& oparam = params[1].get_obj();
const Value& coinstake_v = find_value(oparam, "coinstake");
if (coinstake_v.type() == str_type)
{
vector<unsigned char> txData(ParseHex(coinstake_v.get_str()));
CDataStream ssTx(txData, SER_NETWORK, PROTOCOL_VERSION);
CTransaction txCoinStake;
try {
ssTx >> txCoinStake;
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Coinstake decode failed");
}
block.vtx.insert(block.vtx.begin() + 1, txCoinStake);
block.hashMerkleRoot = block.BuildMerkleTree();
CPubKey pubkey;
if (!pMiningKey->GetReservedKey(pubkey))
throw JSONRPCError(RPC_MISC_ERROR, "GetReservedKey failed");
CKey key;
if (!pwalletMain->GetKey(pubkey.GetID(), key))
throw JSONRPCError(RPC_MISC_ERROR, "GetKey failed");
if (!key.Sign(block.GetHash(), block.vchBlockSig))
throw JSONRPCError(RPC_MISC_ERROR, "Sign failed");
}
}
bool fAccepted = ProcessBlock(NULL, &block);
if (!fAccepted)
return "rejected";
return Value::null;
}
Value setgenerate(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
"setgenerate <generate> [genproclimit]\n"
"<generate> is true or false to turn generation on or off.\n"
"Generation is limited to [genproclimit] processors, -1 is unlimited.");
bool fGenerate = true;
if (params.size() > 0)
fGenerate = params[0].get_bool();
int nGenProcLimit = 1;
if (params.size() > 1)
{
nGenProcLimit = params[1].get_int();
mapArgs["-genproclimit"] = itostr(nGenProcLimit);
if (nGenProcLimit == 0)
fGenerate = false;
}
mapArgs["-gen"] = (fGenerate ? "1" : "0");
GenerateBitcoins(fGenerate, pwalletMain, nGenProcLimit);
return Value::null;
}
| [
"root@Razvoj-Rasa.localdomain"
] | root@Razvoj-Rasa.localdomain |
b4de7ff718a6b765d7eaad0c35c99b6e8ca2200f | a6b46ceba9753a519a5db131df26ca41b5f31ac3 | /No3/No3/main.cpp | ca6f9d855d511e74f23ebc3e25cd18f544f653ab | [] | no_license | YugoTakagi/yukicoder | 697d1414a6b2a26aebedabcdfa6846720e1e0232 | a8563806b4d22747d0caf8f34220f70b6a748e70 | refs/heads/master | 2020-06-08T06:27:44.214856 | 2019-06-22T01:29:03 | 2019-06-22T01:29:03 | 193,177,419 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,256 | cpp | //
// main.cpp
// No3
//
// Created by 高城友豪 on 2019/06/21.
// Copyright © 2019 高城友豪. All rights reserved.
//
#include <iostream>
#include <bitset>
#include <queue>
int main(int argc, const char * argv[]) {
// insert code here...
int N;
std::cin >> N ;
std::queue<int> q;
std::vector<int> directions(N+1);
q.push(1);
directions[1]=1;
int state=1;
while (state != N) {
int bit = std::bitset<32>(state).count();
int v1 = state +bit, v2 = state -bit;
if (v1 <= N && directions[v1] == 0) {
q.push(v1);
directions[v1] = directions[state] + 1;
}
if (v2 > 0 && directions[v2] == 0) {
q.push(v2);
directions[v2] = directions[state] + 1;
}
if (q.size() == 0) {
std::cout << -1 << std::endl;
return 0;
}
state = q.front();
q.pop();
}
std::cout << directions[state] << std::endl;
return 0;
}
// std::bitset<8>bit = std::bitset<8>(i);
// std::cout << std::bitset<8>(8).count() << std::endl;
// std::cout << std::bitset<8>(i) << std::endl;
// std::cout << bit.count() << std::endl;
// state = q.front();
| [
"a217048@ns.kogakuin.ac.jp"
] | a217048@ns.kogakuin.ac.jp |
58970eff345c450ea0f0fca5bbec7fb5a1fb68f6 | 2591ae43a809173d223b78207c7638d80d839962 | /GFG/searching/1_linear.cpp | 9eec098dd26b44436e3b2e4bcebd7068e3e745a7 | [] | no_license | DeveshDutt2710/Competitive_Programming | 82fc97efcae65b23738d7f4e4ecc880e43bf975c | 78a6251e74c8b261c94ebf79408e7c36f9331838 | refs/heads/master | 2023-07-30T16:05:54.659186 | 2021-09-22T17:44:08 | 2021-09-22T17:44:08 | 300,497,546 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,404 | cpp | #include <iostream>
#include <bits/stdc++.h>
using namespace std;
int search(int arr[], int n, int x)
{
int i;
for(i=0;i<n;i++)
{
if(arr[i]==x)
{
return i;
}
}
return -1;
}
int searchOptimised(vector<int> arr, int search_Element)
{
int left = 0;
int length =arr.size();
int position = -1;
int right = length -1;
while(left<right)
{
if(arr[left]== search_Element)
{
position=left;
cout<<search_Element<<" found at : "<<position+1<<" in "<<left+1<<" Attempt"<<endl;
}
if(arr[right]==search_Element)
{
position=right;
cout<<search_Element<<" found at position : "<<position+1<<" in "<<length-right<<" Attempt"<<endl;
}
left++;
right--;
}
if(position==-1)
{
cout<<search_Element<<" not found in the array"<<endl;
}
}
int main()
{
vector<int> arr{ 1, 2, 3, 4, 5 };
int arr1[] = { 1, 2, 3, 4, 5 };
int search_element = 5;
int n = sizeof(arr1) / sizeof(arr1[0]);
int result = search(arr1, n, search_element);
(result == -1)
? cout << "Element is not present in array"<<endl
: cout << "Element is found at position " << result+1<<" in "<<result+1<<"th Attempt"<<endl;
searchOptimised(arr, search_element);
return 0;
} | [
"dd123.ues2017@gmail.com"
] | dd123.ues2017@gmail.com |
da18895ca675ce4517c85436f3ca7a98e20de1d2 | c181f915220d2f182e237cebddceffc20052930c | /Classes/MapSprite/MapSprite10.h | a306b51e55da4d027519315a1cd570c01ae6222f | [] | no_license | sharezer/HC | cf46003b19f497c259198002fe319aca0101728c | bb03a5c8458eed03c82942a9ec7a1d2dfc4a3dc8 | refs/heads/master | 2021-01-21T01:49:32.989854 | 2019-10-23T08:07:29 | 2019-10-23T08:07:29 | 15,626,010 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 146 | h | #pragma once
#include "MapSprite.h"
class CMapSprite10 : public CMapSprite
{
public:
void firstAction();
public:
virtual bool initSprite();
}; | [
"liangshaoze@sina.cn"
] | liangshaoze@sina.cn |
5fb7eecdea4c86ecaf93e503fd22ffacb30f94c5 | c301c81f7560125e130a9eb67f5231b3d08a9d67 | /lc/lc/2021_target/companies/amazon/lc_347_top_k_frequent_elements.cpp | 8038df016945bf7d029bb55ad5925f83014fff73 | [] | no_license | vikashkumarjha/missionpeace | f55f593b52754c9681e6c32d46337e5e4b2d5f8b | 7d5db52486c55b48fe761e0616d550439584f199 | refs/heads/master | 2021-07-11T07:34:08.789819 | 2021-07-06T04:25:18 | 2021-07-06T04:25:18 | 241,745,271 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,823 | cpp | /*
Given a non-empty array of integers, return the k most frequent elements.
Example 1:
Input: nums = [1,1,1,2,2,3], k = 2
Output: [1,2]
Example 2:
Input: nums = [1], k = 1
Output: [1]
*/
#include "header.hpp"
class Solution {
public:
vector<int> topKFrequent(vector<int>& nums, int k) {
std::unordered_map<int,int> m;
for ( auto n : nums ) {
m[n]++;
}
std::priority_queue<std::pair<int,int>> pq;
for ( auto it = m.begin(); it != m.end(); it++)
{
pq.push(std::make_pair(it->second,it->first));
}
int count = 0;
std::vector<int> result;
while ( !pq.empty()){
auto it = pq.top();
++count;
pq.pop();
result.push_back(it.second);
if ( count == k ) break;
}
return result;
}
};
class Solution
{
public:
vector<int> topKFrequent(vector<int> &nums, in k)
{
std::unordered_map<int, int> frequency;
for (auto n : nums)
{
frequency[n]++;
}
std::vector<std::list<int>> bucket(nums.size() + 1, std::list<int>{});
for (auto it = frequency.begin(); it != frequency.end(); it++)
{
bucket[it->second].push_back(it->first);
}
int count = 0;
std::vector<int> result;
for (int j = bucket.size() - 1; j >= 0 && count < k; j--)
{
if (bucket[j].size() > 0)
{
for (auto it = bucket[j].begin(); it != bucket[j].end(); it++)
{
++count;
result.push_back(*it);
if (count == k)
break;
}
}
}
return result;
}
}; | [
"vjha2100@gmail.com"
] | vjha2100@gmail.com |
fb3489c6263909cf91ccb78abc9a8e927d6a1ca5 | eec2eb8d09c05ae3ef92ff1ff7c43d8e0ee67784 | /rfc/threads/KThread.h | f4167f898b63e0e1fa583328672a0c43e6ef5d30 | [
"MIT"
] | permissive | camark/RFC-Framework | 6dc9c6668f2d01c6648f99c1b86913c2b257ad51 | e82b51be20750f47ab73bb2176bd0344d3f32b57 | refs/heads/master | 2020-03-24T16:48:20.644669 | 2018-07-19T03:05:26 | 2018-07-19T03:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,308 | h |
/*
RFC - KThread.h
Copyright (C) 2013-2018 CrownSoft
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef _RFC_KTHREAD_H_
#define _RFC_KTHREAD_H_
#include "../config.h"
#include <windows.h>
#include "../containers/KLeakDetector.h"
/**
Encapsulates a thread.
Subclasses derive from KThread and implement the Run() method, in which they
do their business. The thread can then be started with the StartThread() method
and controlled with various other methods.
Run() method implementation might be like this
@code
virtual void Run()
{
while(!threadShouldStop)
{
// your code goes here...
}
isThreadRunning=false;
}
@endcode
*/
class RFC_API KThread
{
protected:
HANDLE handle;
volatile bool isThreadRunning;
volatile bool threadShouldStop;
public:
KThread();
/**
Sets thread handle.
*/
virtual void SetHandle(HANDLE handle);
/**
Returns handle of the thread
*/
virtual HANDLE GetHandle();
/**
Override this method in your class.
*/
virtual void Run();
/**
Starts thread
*/
virtual bool StartThread();
/**
Another thread can signal this thread should stop.
*/
virtual void ThreadShouldStop();
/**
@returns true if thread is still running
*/
virtual bool IsThreadRunning();
/**
Calling thread is not return until this thread finish.
*/
virtual void WaitUntilThreadFinish();
/**
Sleeps calling thread to given micro seconds.
*/
static void uSleep(int waitTime);
virtual ~KThread();
private:
RFC_LEAK_DETECTOR(KThread)
};
#endif | [
"ruchira66@gmail.com"
] | ruchira66@gmail.com |
8d1e847d5ce2fa845d6a3c65d5ebe4f93e0d7c78 | 814fd0bea5bc063a4e34ebdd0a5597c9ff67532b | /components/password_manager/core/browser/password_manager_url_collection_experiment.h | 3ba8f4171e55264f8f3f75a722a133a0e39a6ef1 | [
"BSD-3-Clause"
] | permissive | rzr/chromium-crosswalk | 1b22208ff556d69c009ad292bc17dca3fe15c493 | d391344809adf7b4f39764ac0e15c378169b805f | refs/heads/master | 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 | NOASSERTION | 2019-08-07T21:59:20 | 2015-07-10T15:35:50 | C++ | UTF-8 | C++ | false | false | 2,293 | h | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_PASSWORD_MANAGER_URL_COLLECTION_EXPERIMENT_H_
#define COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_PASSWORD_MANAGER_URL_COLLECTION_EXPERIMENT_H_
#include "base/time/time.h"
namespace user_prefs {
class PrefRegistrySyncable;
}
class PrefService;
// These functions implement the algorithms according to which the "Allow to
// collect URL?" bubble is shown to user.
namespace password_manager {
namespace urls_collection_experiment {
void RegisterPrefs(user_prefs::PrefRegistrySyncable* registry);
// Implements an algorithm determining when the period starts, in which "Allow
// to collect URL?" bubble can be shown.
base::Time DetermineStartOfActivityPeriod(PrefService* prefs,
int experiment_length_in_days);
// Based on |prefs| and experiment settings, decides whether to show the
// "Allow to collect URL?" bubble and should be called before showing it.
// The default value is false.
bool ShouldShowBubble(PrefService* prefs);
// Should be called when the "Allow to collect URL?" bubble was shown.
// It stores the fact that bubble was shown in |prefs|.
void RecordBubbleOpened(PrefService* prefs);
// The name of the finch experiment controlling the algorithm.
extern const char kExperimentName[];
// The name of the experiment parameter, value of which determines determines
// how long the experiment is active.
extern const char kParamExperimentLengthInDays[];
// The bubble is shown only once and only within a certain period. The length of
// the period is the value of the experiment parameter |kParamTimePeriodInDays|.
extern const char kParamActivePeriodInDays[];
// The name of the experiment parameter, value of which defines whether
// the bubble should appear or not.
extern const char kParamBubbleStatus[];
// The value of the experiment parameter, when bubble should appear.
extern const char kParamBubbleStatusValueWhenShouldShow[];
} // namespace urls_collection_experiment
} // namespace password_manager
#endif // COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_PASSWORD_MANAGER_URL_COLLECTION_EXPERIMENT_H_
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.