repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/caffe2/utils/threadpool/ThreadPoolCommon.h
|
#ifndef CAFFE2_UTILS_THREADPOOL_COMMON_H_
#define CAFFE2_UTILS_THREADPOOL_COMMON_H_
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
// caffe2 depends upon NNPACK, which depends upon this threadpool, so
// unfortunately we can't reference core/common.h here
// This is copied from core/common.h's definition of C10_MOBILE
// Define enabled when building for iOS or Android devices
#if defined(__ANDROID__)
#define C10_ANDROID 1
#elif (defined(__APPLE__) && \
(TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE))
#define C10_IOS 1
#endif // ANDROID / IOS
#endif // CAFFE2_UTILS_THREADPOOL_COMMON_H_
| 677
| 31.285714
| 76
|
h
|
null |
pytorch-main/caffe2/utils/threadpool/WorkersPool.h
|
#pragma once
#include <atomic>
#include <condition_variable>
#include <thread>
#include "c10/util/thread_name.h"
#include <c10/util/irange.h>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#if defined(_MSC_VER)
#include <intrin.h>
#endif
namespace caffe2 {
// Uses code derived from gemmlowp,
// https://github.com/google/gemmlowp/blob/6c91e1ed0c2eff1182d804310b92911fe9c18019/internal/multi_thread_gemm.h
// Changes:
// - allocation-free execute()
// - Use RAII where possible.
// - Run the first task on the main thread (since that is the largest task).
// - removed custom allocator.
// - Removed some ifdef's
// - cache-line align Worker.
// - use std::atomic instead of volatile and custom barriers.
// - use std::mutex/std::condition_variable instead of raw pthreads.
constexpr size_t kGEMMLOWPCacheLineSize = 64;
template <typename T>
struct AllocAligned {
// Allocate a T aligned at an `align` byte address
template <typename... Args>
static T* alloc(Args&&... args) {
void* p = nullptr;
#if defined(__ANDROID__)
p = memalign(kGEMMLOWPCacheLineSize, sizeof(T));
#elif defined(_MSC_VER)
p = _aligned_malloc(sizeof(T), kGEMMLOWPCacheLineSize);
#else
posix_memalign((void**)&p, kGEMMLOWPCacheLineSize, sizeof(T));
#endif
if (p) {
return new (p) T(std::forward<Args>(args)...);
}
return nullptr;
}
// Free a T previously allocated via AllocAligned<T>::alloc()
static void release(T* p) {
if (p) {
p->~T();
#if defined(_MSC_VER)
_aligned_free((void*)p);
#else
free((void*)p);
#endif
}
}
};
// Deleter object for unique_ptr for an aligned object
template <typename T>
struct AlignedDeleter {
void operator()(T* p) const { AllocAligned<T>::release(p); }
};
// make_unique that guarantees alignment
template <typename T>
struct MakeAligned {
template <typename... Args>
static std::unique_ptr<T, AlignedDeleter<T>> make(Args&&... args) {
return std::unique_ptr<T, AlignedDeleter<T>>(
AllocAligned<T>::alloc(std::forward<Args>(args)...));
}
};
const int kMaxBusyWaitNOPs = 32 * 1000 * 1000;
#if defined(_MSC_VER)
#define GEMMLOWP_NOP __nop();
#else
#define GEMMLOWP_NOP "nop\n"
#endif
#define GEMMLOWP_STRING_CONCAT_4(X) X X X X
#define GEMMLOWP_NOP4 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP)
#define GEMMLOWP_NOP16 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP4)
#define GEMMLOWP_NOP64 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP16)
inline int Do256NOPs() {
#if defined(_MSC_VER)
GEMMLOWP_NOP64;
#else
asm volatile(GEMMLOWP_NOP64);
#endif
return 64;
}
#undef GEMMLOWP_STRING_CONCAT_4
#undef GEMMLOWP_NOP256
#undef GEMMLOWP_NOP64
#undef GEMMLOWP_NOP16
#undef GEMMLOWP_NOP4
#undef GEMMLOWP_NOP
// Waits until *var != initial_value.
//
// Returns the new value of *var. The guarantee here is that
// the return value is different from initial_value, and that that
// new value has been taken by *var at some point during the
// execution of this function. There is no guarantee that this is
// still the value of *var when this function returns, since *var is
// not assumed to be guarded by any lock.
//
// First does some busy-waiting for a fixed number of no-op cycles,
// then falls back to passive waiting for the given condvar, guarded
// by the given mutex.
//
// The idea of doing some initial busy-waiting is to help get
// better and more consistent multithreading benefits for small GEMM sizes.
// Busy-waiting help ensuring that if we need to wake up soon after having
// started waiting, then we can wake up quickly (as opposed to, say,
// having to wait to be scheduled again by the OS). On the other hand,
// we must still eventually revert to passive waiting for longer waits
// (e.g. worker threads having finished a GEMM and waiting until the next GEMM)
// so as to avoid permanently spinning.
//
template <typename T>
T WaitForVariableChange(std::atomic<T>* var,
T initial_value,
std::condition_variable* cond,
std::mutex* mutex) {
// If we are on a platform that supports it, spin for some time.
{
int nops = 0;
// First, trivial case where the variable already changed value.
T new_value = var->load(std::memory_order_relaxed);
if (new_value != initial_value) {
std::atomic_thread_fence(std::memory_order_acquire);
return new_value;
}
// Then try busy-waiting.
while (nops < kMaxBusyWaitNOPs) {
nops += Do256NOPs();
new_value = var->load(std::memory_order_relaxed);
if (new_value != initial_value) {
std::atomic_thread_fence(std::memory_order_acquire);
return new_value;
}
}
}
// Finally, do real passive waiting.
{
std::unique_lock<std::mutex> g(*mutex);
T new_value = var->load(std::memory_order_relaxed);
// Handle spurious wakeups.
cond->wait(g, [&]() {
new_value = var->load(std::memory_order_relaxed);
return new_value != initial_value;
});
TORCH_DCHECK_NE(static_cast<size_t>(new_value), static_cast<size_t>(initial_value));
return new_value;
}
}
// A BlockingCounter lets one thread to wait for N events to occur.
// This is how the master thread waits for all the worker threads
// to have finished working.
class BlockingCounter {
public:
// Sets/resets the counter; initial_count is the number of
// decrementing events that the Wait() call will be waiting for.
void Reset(std::size_t initial_count) {
std::lock_guard<std::mutex> g(mutex_);
TORCH_DCHECK_EQ(count_, 0);
count_ = initial_count;
}
// Decrements the counter; if the counter hits zero, signals
// the thread that was waiting for that, and returns true.
// Otherwise (if the decremented count is still nonzero),
// returns false.
bool DecrementCount() {
const auto count_value = count_.fetch_sub(1, std::memory_order_relaxed) - 1;
TORCH_DCHECK_GE(count_value, 0);
if (count_value == 0) {
std::lock_guard<std::mutex> g(mutex_);
cond_.notify_one();
}
bool retval = count_value == 0;
return retval;
}
// Waits for the N other threads (N having been set by Reset())
// to hit the BlockingCounter.
void Wait() {
while (size_t count_value = count_.load(std::memory_order_relaxed)) {
WaitForVariableChange(&count_, count_value, &cond_, &mutex_);
}
}
private:
std::condition_variable cond_;
std::mutex mutex_;
std::atomic<std::size_t> count_{0};
};
// A workload for a worker.
struct Task {
Task() = default;
virtual ~Task() = default;
virtual void Run() = 0;
};
// A worker thread.
class alignas(kGEMMLOWPCacheLineSize) Worker {
public:
enum class State : uint8_t {
ThreadStartup, // The initial state before the thread main loop runs.
Ready, // Is not working, has not yet received new work to do.
HasWork, // Has work to do.
ExitAsSoonAsPossible // Should exit at earliest convenience.
};
explicit Worker(BlockingCounter* counter_to_decrement_when_ready)
: task_(nullptr),
state_(State::ThreadStartup),
counter_to_decrement_when_ready_(counter_to_decrement_when_ready) {
thread_ = std::make_unique<std::thread>([this]() { this->ThreadFunc(); });
}
~Worker() {
ChangeState(State::ExitAsSoonAsPossible);
thread_->join();
}
// Changes State; may be called from either the worker thread
// or the master thread; however, not all state transitions are legal,
// which is guarded by assertions.
void ChangeState(State new_state) {
std::lock_guard<std::mutex> g(state_mutex_);
DCHECK(new_state != state_.load(std::memory_order_relaxed));
switch (state_.load(std::memory_order_relaxed)) {
case State::ThreadStartup:
DCHECK(new_state == State::Ready);
break;
case State::Ready:
DCHECK(new_state == State::HasWork || new_state == State::ExitAsSoonAsPossible);
break;
case State::HasWork:
DCHECK(new_state == State::Ready || new_state == State::ExitAsSoonAsPossible);
break;
default:
abort();
}
state_.store(new_state, std::memory_order_relaxed);
state_cond_.notify_one();
if (new_state == State::Ready) {
counter_to_decrement_when_ready_->DecrementCount();
}
}
// Thread entry point.
void ThreadFunc() {
c10::setThreadName("CaffeWorkersPool");
ChangeState(State::Ready);
// Thread main loop
while (true) {
// Get a state to act on
// In the 'Ready' state, we have nothing to do but to wait until
// we switch to another state.
State state_to_act_upon =
WaitForVariableChange(&state_, State::Ready, &state_cond_, &state_mutex_);
// We now have a state to act on, so act.
switch (state_to_act_upon) {
case State::HasWork:
// Got work to do! So do it, and then revert to 'Ready' state.
DCHECK(task_.load());
(*task_).Run();
task_ = nullptr;
ChangeState(State::Ready);
break;
case State::ExitAsSoonAsPossible:
return;
default:
abort();
}
}
}
static void* ThreadFunc(void* arg) {
static_cast<Worker*>(arg)->ThreadFunc();
return nullptr;
}
// Called by the master thread to give this worker work to do.
// It is only legal to call this if the worker
void StartWork(Task* task) {
DCHECK(!task_.load());
task_ = task;
DCHECK(state_.load(std::memory_order_acquire) == State::Ready);
ChangeState(State::HasWork);
}
private:
// The underlying thread.
std::unique_ptr<std::thread> thread_;
// The task to be worked on.
std::atomic<Task*> task_;
// The condition variable and mutex guarding state changes.
std::condition_variable state_cond_;
std::mutex state_mutex_;
// The state enum tells if we're currently working, waiting for work, etc.
std::atomic<State> state_;
// pointer to the master's thread BlockingCounter object, to notify the
// master thread of when this worker switches to the 'Ready' state.
BlockingCounter* const counter_to_decrement_when_ready_;
};
class WorkersPool {
public:
WorkersPool() = default;
void Execute(const std::vector<std::shared_ptr<Task>>& tasks) {
CAFFE_ENFORCE_GE(tasks.size(), 1);
// One of the tasks will be run on the current thread.
int workers_count = tasks.size() - 1;
CreateWorkers(workers_count);
TORCH_DCHECK_LE(workers_count, (int)workers_.size());
counter_to_decrement_when_ready_.Reset(workers_count);
for (const auto task : c10::irange(1, tasks.size())) {
workers_[task - 1]->StartWork(tasks[task].get());
}
// Execute the remaining workload immediately on the current thread.
auto& task = tasks.front();
task->Run();
// Wait for the workers submitted above to finish.
counter_to_decrement_when_ready_.Wait();
}
private:
// Ensures that the pool has at least the given count of workers.
// If any new worker has to be created, this function waits for it to
// be ready.
void CreateWorkers(std::size_t workers_count) {
if (workers_.size() >= workers_count) {
return;
}
counter_to_decrement_when_ready_.Reset(workers_count - workers_.size());
while (workers_.size() < workers_count) {
workers_.push_back(MakeAligned<Worker>::make(&counter_to_decrement_when_ready_));
}
counter_to_decrement_when_ready_.Wait();
}
C10_DISABLE_COPY_AND_ASSIGN(WorkersPool);
std::vector<std::unique_ptr<Worker, AlignedDeleter<Worker>>> workers_;
// The BlockingCounter used to wait for the workers.
BlockingCounter counter_to_decrement_when_ready_;
};
} // namespace caffe2
| 11,631
| 30.101604
| 112
|
h
|
null |
pytorch-main/caffe2/utils/threadpool/pthreadpool-cpp.h
|
#pragma once
#ifdef USE_PTHREADPOOL
#ifdef USE_INTERNAL_PTHREADPOOL_IMPL
#include <caffe2/utils/threadpool/pthreadpool.h>
#else
#include <pthreadpool.h>
#endif
#include <functional>
#include <memory>
#include <mutex>
namespace caffe2 {
class PThreadPool final {
public:
explicit PThreadPool(size_t thread_count);
~PThreadPool() = default;
PThreadPool(const PThreadPool&) = delete;
PThreadPool& operator=(const PThreadPool&) = delete;
PThreadPool(PThreadPool&&) = delete;
PThreadPool& operator=(PThreadPool&&) = delete;
size_t get_thread_count() const;
void set_thread_count(size_t thread_count);
// Run, in parallel, function fn(task_id) over task_id in range [0, range).
// This function is blocking. All input is processed by the time it returns.
void run(const std::function<void(size_t)>& fn, size_t range);
private:
friend pthreadpool_t pthreadpool_();
private:
mutable std::mutex mutex_;
std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool_;
};
// Return a singleton instance of PThreadPool for ATen/TH multithreading.
PThreadPool* pthreadpool();
// Exposes the underlying implementation of PThreadPool.
// Only for use in external libraries so as to unify threading across
// internal (i.e. ATen, etc.) and external (e.g. NNPACK, QNNPACK, XNNPACK)
// use cases.
pthreadpool_t pthreadpool_();
} // namespace caffe2
#endif /* USE_PTHREADPOOL */
| 1,417
| 24.781818
| 79
|
h
|
null |
pytorch-main/caffe2/utils/threadpool/pthreadpool.h
|
// pthreadpool header from https://github.com/Maratyszcza/pthreadpool
// for NNPACK
#ifndef CAFFE2_UTILS_PTHREADPOOL_H_
#define CAFFE2_UTILS_PTHREADPOOL_H_
#include "ThreadPoolCommon.h"
#include <stddef.h> // for size_t
#include <stdint.h> // for uint32_t
#if defined(USE_PTHREADPOOL)
// This is a hack.
// Mainly introduced here because
// 1. NNPACK can be compiled to use internal legacy threadpool implementation because much of C2 depends on that.
// 2. Then if we want to use NNPACK in PyTorch, which uses new pthreadpool, then we will supply new pthreadpool pointer
// to NNPACK. This will not work if NNPACK is compiled with internal legacy threadpool. Thus this guard
// along with changes in pthreadpool_impl.cc allows us to override that behavior.
// It enables us to use NNPACK from pytorch using `caffe2::pthreadpool_()`
namespace caffe2 {
class WithCastToNewThreadPool {
public:
explicit WithCastToNewThreadPool(bool use_new_threadpool);
~WithCastToNewThreadPool();
private:
bool use_new_threadpool_;
};
}
#endif
typedef struct pthreadpool* legacy_pthreadpool_t;
typedef void (*legacy_pthreadpool_function_1d_t)(void*, size_t);
typedef void (*legacy_pthreadpool_function_1d_tiled_t)(void*, size_t, size_t);
typedef void (*legacy_pthreadpool_function_2d_t)(void*, size_t, size_t);
typedef void (*legacy_pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*legacy_pthreadpool_function_3d_tiled_t)(
void*,
size_t,
size_t,
size_t,
size_t,
size_t,
size_t);
typedef void (*legacy_pthreadpool_function_4d_tiled_t)(
void*,
size_t,
size_t,
size_t,
size_t,
size_t,
size_t,
size_t,
size_t);
#ifdef __cplusplus
extern "C" {
#endif
/**
* Creates a thread pool with the specified number of threads.
*
* @param[in] threads_count The number of threads in the thread pool.
* A value of 0 has special interpretation: it creates a thread for each
* processor core available in the system.
*
* @returns A pointer to an opaque thread pool object.
* On error the function returns NULL and sets errno accordingly.
*/
// Returns internal threadpool impl.
legacy_pthreadpool_t legacy_pthreadpool_create(size_t threads_count);
/**
* Queries the number of threads in a thread pool.
*
* @param[in] threadpool The thread pool to query.
*
* @returns The number of threads in the thread pool.
*/
size_t legacy_pthreadpool_get_threads_count(legacy_pthreadpool_t threadpool);
/**
* Processes items in parallel using threads from a thread pool.
*
* When the call returns, all items have been processed and the thread pool is
* ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param[in] threadpool The thread pool to use for parallelisation.
* @param[in] function The function to call for each item.
* @param[in] argument The first argument passed to the @a function.
* @param[in] items The number of items to process. The @a function
* will be called once for each item.
*/
void legacy_pthreadpool_compute_1d(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_1d_t function,
void* argument,
size_t range);
void legacy_pthreadpool_parallelize_1d(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_1d_t function,
void* argument,
size_t range,
uint32_t flags);
void legacy_pthreadpool_compute_1d_tiled(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_1d_tiled_t function,
void* argument,
size_t range,
size_t tile);
void legacy_pthreadpool_compute_2d(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_2d_t function,
void* argument,
size_t range_i,
size_t range_j);
void legacy_pthreadpool_compute_2d_tiled(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_2d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j);
void legacy_pthreadpool_compute_3d_tiled(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_3d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_i,
size_t tile_j,
size_t tile_k);
void legacy_pthreadpool_compute_4d_tiled(
legacy_pthreadpool_t threadpool,
legacy_pthreadpool_function_4d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_i,
size_t tile_j,
size_t tile_k,
size_t tile_l);
/**
* Terminates threads in the thread pool and releases associated resources.
*
* @warning Accessing the thread pool after a call to this function constitutes
* undefined behaviour and may cause data corruption.
*
* @param[in,out] threadpool The thread pool to destroy.
*/
void legacy_pthreadpool_destroy(legacy_pthreadpool_t threadpool);
#ifdef USE_INTERNAL_PTHREADPOOL_IMPL
#define pthreadpool_t legacy_pthreadpool_t
#define pthreadpool_function_1d_t legacy_pthreadpool_function_1d_t
#define pthreadpool_function_1d_tiled_t legacy_pthreadpool_function_1d_tiled_t
#define pthreadpool_function_2d_t legacy_pthreadpool_function_2d_t
#define pthreadpool_function_2d_tiled_t legacy_pthreadpool_function_2d_tiled_t
#define pthreadpool_function_3d_tiled_t legacy_pthreadpool_function_3d_tiled_t
#define pthreadpool_function_4d_tiled_t legacy_pthreadpool_function_4d_tiled_t
#define pthreadpool_create legacy_pthreadpool_create
#define pthreadpool_destroy legacy_pthreadpool_destroy
#define pthreadpool_get_threads_count legacy_pthreadpool_get_threads_count
#define pthreadpool_compute_1d legacy_pthreadpool_compute_1d
#define pthreadpool_parallelize_1d legacy_pthreadpool_parallelize_1d
#define pthreadpool_compute_1d_tiled legacy_pthreadpool_compute_1d_tiled
#define pthreadpool_compute_2d legacy_pthreadpool_compute_2d
#define pthreadpool_compute_2d_tiled legacy_pthreadpool_compute_2d_tiled
#define pthreadpool_compute_3d_tiled legacy_pthreadpool_compute_3d_tiled
#define pthreadpool_compute_4d_tiled legacy_pthreadpool_compute_4d_tiled
#endif /* USE_INTERNAL_PTHREADPOOL_IMPL */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif // CAFFE2_UTILS_PTHREADPOOL_H_
| 6,344
| 31.706186
| 119
|
h
|
null |
pytorch-main/caffe2/utils/threadpool/thread_pool_guard.h
|
#pragma once
#include <c10/macros/Macros.h>
namespace caffe2 {
// A RAII, thread local (!) guard that enables or disables grad mode upon
// construction, and sets it back to the original value upon destruction.
struct TORCH_API _NoPThreadPoolGuard {
static bool is_enabled();
static void set_enabled(bool enabled);
_NoPThreadPoolGuard(): prev_mode_(_NoPThreadPoolGuard::is_enabled()) {
_NoPThreadPoolGuard::set_enabled(true);
}
~_NoPThreadPoolGuard() {
_NoPThreadPoolGuard::set_enabled(prev_mode_);
}
private:
bool prev_mode_;
};
}
| 567
| 22.666667
| 73
|
h
|
null |
pytorch-main/caffe2/video/optical_flow.h
|
#ifndef CAFFE2_VIDEO_OPTICAL_FLOW_H_
#define CAFFE2_VIDEO_OPTICAL_FLOW_H_
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <caffe2/core/logging.h>
namespace caffe2 {
// Four different types of optical flow algorithms supported;
// BroxOpticalFlow doesn't have a CPU version;
// DensePyrLKOpticalFlow only has sparse CPU version;
enum FLowAlgType {
FarnebackOpticalFlow = 0,
DensePyrLKOpticalFlow = 1,
BroxOpticalFlow = 2,
OpticalFlowDual_TVL1 = 3,
};
// Define different types of optical flow data type
// 0: original two channel optical flow
// 1: three channel optical flow with magnitude as the third channel
// 2: two channel optical flow + one channel gray
// 3: two channel optical flow + three channel rgb
enum FlowDataType {
Flow2C = 0,
Flow3C = 1,
FlowWithGray = 2,
FlowWithRGB = 3,
};
void OpticalFlowExtractor(
const cv::Mat& prev_gray,
const cv::Mat& curr_gray,
const int optical_flow_alg_type,
cv::Mat& flow);
void MergeOpticalFlow(cv::Mat& prev_flow, const cv::Mat& curr_flow);
void MultiFrameOpticalFlowExtractor(
const std::vector<cv::Mat>& grays,
const int optical_flow_alg_type,
cv::Mat& flow);
} // namespace caffe2
#endif // CAFFE2_VIDEO_OPTICAL_FLOW_H_
| 1,306
| 24.627451
| 68
|
h
|
null |
pytorch-main/caffe2/video/video_decoder.h
|
#ifndef CAFFE2_VIDEO_VIDEO_DECODER_H_
#define CAFFE2_VIDEO_VIDEO_DECODER_H_
#include <caffe2/core/logging.h>
#include <stdio.h>
#include <memory>
#include <string>
#include <vector>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/log.h>
#include <libavutil/motion_vector.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
}
namespace caffe2 {
#define VIO_BUFFER_SZ 32768
#define MAX_DECODING_FRAMES 10000
// enum to specify 3 special fps sampling behaviors:
// 0: disable fps sampling, no frame sampled at all
// -1: unlimited fps sampling, will sample at native video fps
// -2: disable fps sampling, but will get the frame at specific timestamp
enum SpecialFps {
SAMPLE_NO_FRAME = 0,
SAMPLE_ALL_FRAMES = -1,
SAMPLE_TIMESTAMP_ONLY = -2,
};
// three different types of resolution when decoding the video
// 0: resize to width x height and ignore the aspect ratio;
// 1: resize to short_edge and keep the aspect ratio;
// 2: using the original resolution of the video; if resolution
// is smaller than crop_size x crop_size, resize to crop_size
// and keep the aspect ratio;
// 3: for xray video service
enum VideoResType {
USE_WIDTH_HEIGHT = 0,
USE_SHORT_EDGE = 1,
ORIGINAL_RES = 2,
};
// three different types of decoding behavior are supported
// 0: do temporal jittering to sample a random clip from the video
// 1: uniformly sample multiple clips from the video;
// 2: sample a clip from a given starting frame
// 3: for xray video service
enum DecodeType {
DO_TMP_JITTER = 0,
DO_UNIFORM_SMP = 1,
USE_START_FRM = 2,
};
// sampling interval for fps starting at specified timestamp
// use enum SpecialFps to set special fps decoding behavior
// note sampled fps will not always accurately follow the target fps,
// because sampled frame has to snap to actual frame timestamp,
// e.g. video fps = 25, sample fps = 4 will sample every 0.28s, not 0.25
// video fps = 25, sample fps = 5 will sample every 0.24s, not 0.2,
// because of floating-point division accuracy (1 / 5.0 is not exactly 0.2)
struct SampleInterval {
double timestamp;
double fps;
SampleInterval() : timestamp(-1), fps(SpecialFps::SAMPLE_ALL_FRAMES) {}
SampleInterval(double ts, double f) : timestamp(ts), fps(f) {}
bool operator<(const SampleInterval& itvl) const {
return (timestamp < itvl.timestamp);
}
};
class Params {
public:
// return all key-frames regardless of specified fps
bool keyFrames_ = false;
// return audio data while decoding the video
bool getAudio_ = false;
// for sampling audio data
int outrate_ = 22000;
int outfmt_ = AV_SAMPLE_FMT_FLT;
int64_t outlayout_ = AV_CH_LAYOUT_MONO;
// Output image pixel format
AVPixelFormat pixelFormat_ = AVPixelFormat::AV_PIX_FMT_RGB24;
// Index of stream to decode.
// -1 will automatically decode the first video stream.
int streamIndex_ = -1;
// How many frames to output at most from the video
// -1 no limit
int maximumOutputFrames_ = -1;
// params for video resolution
int video_res_type_ = VideoResType::USE_WIDTH_HEIGHT;
int crop_size_ = -1;
int short_edge_ = -1;
// Output video size, -1 to preserve origianl dimension
int outputWidth_ = -1;
int outputHeight_ = -1;
// max output dimension, -1 to preserve original size
// the larger dimension of the video will be scaled to this size,
// and the second dimension will be scaled to preserve aspect ratio
int maxOutputDimension_ = -1;
// params for decoding behavior
int decode_type_ = DecodeType::DO_TMP_JITTER;
int num_of_required_frame_ = -1;
// intervals_ control variable sampling fps between different timestamps
// intervals_ must be ordered strictly ascending by timestamps
// the first interval must have a timestamp of zero
// fps must be either the 3 special fps defined in SpecialFps, or > 0
std::vector<SampleInterval> intervals_ = {{0, SpecialFps::SAMPLE_ALL_FRAMES}};
Params() {}
/**
* FPS of output frames
* setting here will reset intervals_ and force decoding at target FPS
* This can be used if user just want to decode at a steady fps
*/
Params& fps(float v) {
intervals_.clear();
intervals_.emplace_back(0, v);
return *this;
}
/**
* Sample output frames at a specified list of timestamps
* Timestamps must be in increasing order, and timestamps past the end of the
* video will be ignored
* Setting here will reset intervals_
*/
Params& setSampleTimestamps(const std::vector<double>& timestamps) {
intervals_.clear();
// insert an interval per desired frame.
for (auto& timestamp : timestamps) {
intervals_.emplace_back(timestamp, SpecialFps::SAMPLE_TIMESTAMP_ONLY);
}
return *this;
}
/**
* Pixel format of output buffer, default PIX_FMT_RGB24
*/
Params& pixelFormat(AVPixelFormat pixelFormat) {
pixelFormat_ = pixelFormat;
return *this;
}
/**
* Return all key-frames
*/
Params& keyFrames(bool keyFrames) {
keyFrames_ = keyFrames;
return *this;
}
/**
* Index of video stream to process, defaults to the first video stream
*/
Params& streamIndex(int index) {
streamIndex_ = index;
return *this;
}
/**
* Only output this many frames, default to no limit
*/
Params& maxOutputFrames(int count) {
maximumOutputFrames_ = count;
return *this;
}
/**
* Output frame width, default to video width
*/
Params& outputWidth(int width) {
outputWidth_ = width;
return *this;
}
/**
* Output frame height, default to video height
*/
Params& outputHeight(int height) {
outputHeight_ = height;
return *this;
}
/**
* Max dimension of either width or height, if any is bigger
* it will be scaled down to this and econd dimension
* will be scaled down to maintain aspect ratio.
*/
Params& maxOutputDimension(int size) {
maxOutputDimension_ = size;
return *this;
}
};
// data structure for storing decoded video frames
class DecodedFrame {
public:
struct avDeleter {
void operator()(unsigned char* p) const {
av_free(p);
}
};
using AvDataPtr = std::unique_ptr<uint8_t, avDeleter>;
// decoded data buffer
AvDataPtr data_;
// size in bytes
int size_ = 0;
// frame dimensions
int width_ = 0;
int height_ = 0;
// timestamp in seconds since beginning of video
double timestamp_ = 0;
// true if this is a key frame.
bool keyFrame_ = false;
// index of frame in video
int index_ = -1;
// Sequential number of outputted frame
int outputFrameIndex_ = -1;
};
// data structure for storing decoded audio data
struct DecodedAudio {
int dataSize_;
int outSampleSize_;
std::unique_ptr<float[]> audio_data_;
explicit DecodedAudio(
int dataSize = 0,
int outSampleSize = 0,
std::unique_ptr<float[]> audio_data = nullptr)
: dataSize_(dataSize),
outSampleSize_(outSampleSize),
audio_data_(std::move(audio_data)) {}
};
class VideoIOContext {
public:
explicit VideoIOContext(const std::string& fname)
: workBuffersize_(VIO_BUFFER_SZ),
workBuffer_((uint8_t*)av_malloc(workBuffersize_)),
inputFile_(nullptr),
inputBuffer_(nullptr),
inputBufferSize_(0) {
inputFile_ = fopen(fname.c_str(), "rb");
if (inputFile_ == nullptr) {
LOG(ERROR) << "Error opening video file " << fname;
return;
}
ctx_ = avio_alloc_context(
static_cast<unsigned char*>(workBuffer_.get()),
workBuffersize_,
0,
this,
&VideoIOContext::readFile,
nullptr, // no write function
&VideoIOContext::seekFile);
}
explicit VideoIOContext(const char* buffer, int size)
: workBuffersize_(VIO_BUFFER_SZ),
workBuffer_((uint8_t*)av_malloc(workBuffersize_)),
inputFile_(nullptr),
inputBuffer_(buffer),
inputBufferSize_(size) {
ctx_ = avio_alloc_context(
static_cast<unsigned char*>(workBuffer_.get()),
workBuffersize_,
0,
this,
&VideoIOContext::readMemory,
nullptr, // no write function
&VideoIOContext::seekMemory);
}
~VideoIOContext() {
av_free(ctx_);
if (inputFile_) {
fclose(inputFile_);
}
}
int read(unsigned char* buf, int buf_size) {
if (inputBuffer_) {
return readMemory(this, buf, buf_size);
} else if (inputFile_) {
return readFile(this, buf, buf_size);
} else {
return -1;
}
}
int64_t seek(int64_t offset, int whence) {
if (inputBuffer_) {
return seekMemory(this, offset, whence);
} else if (inputFile_) {
return seekFile(this, offset, whence);
} else {
return -1;
}
}
static int readFile(void* opaque, unsigned char* buf, int buf_size) {
VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
if (feof(h->inputFile_)) {
return AVERROR_EOF;
}
size_t ret = fread(buf, 1, buf_size, h->inputFile_);
if (ret < buf_size) {
if (ferror(h->inputFile_)) {
return -1;
}
}
return ret;
}
static int64_t seekFile(void* opaque, int64_t offset, int whence) {
VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
switch (whence) {
case SEEK_CUR: // from current position
case SEEK_END: // from eof
case SEEK_SET: // from beginning of file
return fseek(h->inputFile_, static_cast<long>(offset), whence);
break;
case AVSEEK_SIZE:
int64_t cur = ftell(h->inputFile_);
fseek(h->inputFile_, 0L, SEEK_END);
int64_t size = ftell(h->inputFile_);
fseek(h->inputFile_, cur, SEEK_SET);
return size;
}
return -1;
}
static int readMemory(void* opaque, unsigned char* buf, int buf_size) {
VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
if (buf_size < 0) {
return -1;
}
int reminder = h->inputBufferSize_ - h->offset_;
int r = buf_size < reminder ? buf_size : reminder;
if (r < 0) {
return AVERROR_EOF;
}
memcpy(buf, h->inputBuffer_ + h->offset_, r);
h->offset_ += r;
return r;
}
static int64_t seekMemory(void* opaque, int64_t offset, int whence) {
VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
switch (whence) {
case SEEK_CUR: // from current position
h->offset_ += offset;
break;
case SEEK_END: // from eof
h->offset_ = h->inputBufferSize_ + offset;
break;
case SEEK_SET: // from beginning of file
h->offset_ = offset;
break;
case AVSEEK_SIZE:
return h->inputBufferSize_;
}
return h->offset_;
}
AVIOContext* get_avio() {
return ctx_;
}
private:
int workBuffersize_;
DecodedFrame::AvDataPtr workBuffer_;
// for file mode
FILE* inputFile_;
// for memory mode
const char* inputBuffer_;
int inputBufferSize_;
int offset_ = 0;
AVIOContext* ctx_;
};
struct VideoMeta {
double fps;
int width;
int height;
enum AVMediaType codec_type;
AVPixelFormat pixFormat;
VideoMeta()
: fps(-1),
width(-1),
height(-1),
codec_type(AVMEDIA_TYPE_VIDEO),
pixFormat(AVPixelFormat::AV_PIX_FMT_RGB24) {}
};
class Callback {
public:
virtual void frameDecoded(std::unique_ptr<DecodedFrame> img) = 0;
virtual void audioDecoded(
std::unique_ptr<DecodedAudio> /*decoded audio data*/) {}
virtual void videoDecodingStarted(const VideoMeta& /*videoMeta*/) {}
virtual void videoDecodingEnded(double /*lastFrameTimestamp*/) {}
virtual ~Callback() {}
};
class VideoDecoder {
public:
VideoDecoder();
void decodeFile(
const std::string& filename,
const Params& params,
const int start_frm,
Callback& callback);
void decodeMemory(
const std::string& filename,
const char* buffer,
const int size,
const Params& params,
const int start_frm,
Callback& callback);
private:
std::string ffmpegErrorStr(int result);
void ResizeAndKeepAspectRatio(
const int origWidth,
const int origHeight,
const int short_edge,
const int long_edge,
int& outWidth,
int& outHeight);
void getAudioSample(
AVPacket& packet,
AVCodecContext* audioCodecContext_,
AVFrame* audioStreamFrame_,
SwrContext* convertCtx_,
Callback& callback,
const Params& params);
void decodeLoop(
const std::string& videoName,
VideoIOContext& ioctx,
const Params& params,
const int start_frm,
Callback& callback);
};
TORCH_API void FreeDecodedData(
std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,
std::vector<std::unique_ptr<DecodedAudio>>& sampledAudio);
TORCH_API bool DecodeMultipleClipsFromVideo(
const char* video_buffer,
const std::string& video_filename,
const int encoded_size,
const Params& params,
const int start_frm,
const int clip_per_video,
const std::vector<int>& clip_start_positions,
const bool use_local_file,
int& height,
int& width,
std::vector<unsigned char*>& buffer_rgb);
class CallbackImpl : public Callback {
public:
std::vector<std::unique_ptr<DecodedFrame>> frames;
std::vector<std::unique_ptr<DecodedAudio>> audio_samples;
explicit CallbackImpl() {
clear();
}
void clear() {
FreeDecodedData(frames, audio_samples);
}
void frameDecoded(std::unique_ptr<DecodedFrame> frame) override {
frames.push_back(std::move(frame));
}
void audioDecoded(std::unique_ptr<DecodedAudio> audio_sample) override {
audio_samples.push_back(std::move(audio_sample));
}
void videoDecodingStarted(const VideoMeta& /*videoMeta*/) override {
clear();
}
};
} // namespace caffe2
#endif // CAFFE2_VIDEO_VIDEO_DECODER_H_
| 13,882
| 25.393536
| 80
|
h
|
null |
pytorch-main/caffe2/video/video_io.h
|
#ifndef CAFFE2_VIDEO_VIDEO_IO_H_
#define CAFFE2_VIDEO_VIDEO_IO_H_
#include <caffe2/core/common.h>
#include <caffe2/video/optical_flow.h>
#include <caffe2/video/video_decoder.h>
#include <opencv2/opencv.hpp>
#include <random>
#include <istream>
#include <ostream>
namespace caffe2 {
TORCH_API void ClipTransformRGB(
const unsigned char* buffer_rgb,
const int crop_size,
const int length_rgb,
const int channels_rgb,
const int sampling_rate_rgb,
const int height,
const int width,
const int h_off,
const int w_off,
const bool mirror_me,
const std::vector<float>& mean_rgb,
const std::vector<float>& inv_std_rgb,
float* transformed_clip);
TORCH_API void ClipTransformOpticalFlow(
const unsigned char* buffer_rgb,
const int crop_size,
const int length_of,
const int channels_of,
const int sampling_rate_of,
const int height,
const int width,
const cv::Rect& rect,
const int channels_rgb,
const bool mirror_me,
const int flow_alg_type,
const int flow_data_type,
const int frame_gap_of,
const bool do_flow_aggregation,
const std::vector<float>& mean_of,
const std::vector<float>& inv_std_of,
float* transformed_clip);
} // namespace caffe2
#endif // CAFFE2_VIDEO_VIDEO_IO_H_
| 1,296
| 23.942308
| 42
|
h
|
null |
pytorch-main/functorch/csrc/dim/arena.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/ATen.h>
#include "minpybind.h"
#ifdef _WIN32
#include <intrin.h>
// https://stackoverflow.com/questions/355967/how-to-use-msvc-intrinsics-to-get-the-equivalent-of-this-gcc-code
inline unsigned int __builtin_clz(unsigned int x) {
unsigned long r = 0;
_BitScanReverse(&r, x);
return (31 - r);
}
#endif
inline int round2min8(int num) {
int nzeros = __builtin_clz((num - 1)|4);
return 1 << (32 - nzeros);
}
struct Arena;
template<typename T>
struct OwnedSlice;
template<typename T>
struct Slice {
Slice()
: begin_(nullptr), size_(0), capacity_(0) {}
template<typename... Args>
Slice(Arena& arena, Args&&... args);
T* begin() const {
return begin_;
}
T* end() const {
return begin_ + size_;
}
int size() const {
return size_;
}
int capacity() const {
return capacity_;
}
T& back(int i=-1) {
return begin_[size_ + i];
}
T& operator[](int i) const {
return begin_[i];
}
c10::optional<int> index(const T& value) {
for (int i : enumerate()) {
if (begin_[i] == value) {
return i;
}
}
return c10::nullopt;
}
bool contains(const T& value) {
return index(value).has_value();
}
void insert(Arena& arena, Slice where, Slice to_insert);
void insert(Arena& arena, Slice where, T v) {
return insert(arena, where, Slice(&v, &v + 1));
}
void insert(Arena& arena, int where, T v) {
return insert(arena, slice(where, where), v);
}
void append(Arena& arena, T value);
void extend(Arena& arena, Slice to_insert);
void extend(Arena& arena, const T* begin, const T* end) {
return extend(arena, Slice<T>((T*)begin, (T*)end));
}
bool remove(Arena& A, T value) {
auto idx = index(value);
if (idx) {
insert(A, slice(*idx, *idx + 1), Slice());
}
return idx.has_value();
}
Slice slice(int begin) {
return slice(begin, size_);
}
Slice slice(int begin, int end) {
if (begin < 0) {
begin += size_;
}
if (end < 0) {
end += size_;
}
Slice result;
result.begin_ = begin_ + begin;
result.size_ = end - begin;
result.capacity_ = result.size_;
return result;
}
bool inside(Slice where) {
return begin() <= where.begin() && where.end() <= end();
}
irange enumerate() const {
return irange(size_);
}
irange reversed_enumerate() const {
return irange(size_ - 1, -1, -1);
}
bool operator==(const Slice<T>& rhs) const {
if (size() != rhs.size()) {
return false;
}
return std::equal(begin(), end(), rhs.begin());
}
Slice(T* begin, T* end)
: begin_(begin), size_(end - begin), capacity_(size_) {}
protected:
static int _length(const T& t) {
return 1;
}
static int _length(Slice t) {
return t.size_;
}
static T* _insert(T*& dst, T t) {
*dst = std::move(t);
return ++dst;
}
static T* _insert(T*& dst, Slice t) {
std::memcpy(dst, t.begin_, sizeof(T)*t.size_);
dst += t.size_;
return dst;
}
T* begin_;
int size_;
int capacity_;
friend struct OwnedSlice<T>;
};
template<typename T>
struct OwnedSlice {
typedef void (*deleter_t)(Slice<T>);
static void _no_delete(Slice<T>) {}
OwnedSlice()
: deleter_(_no_delete) {}
OwnedSlice(const OwnedSlice&) = delete;
OwnedSlice& operator=(const OwnedSlice&) = delete;
~OwnedSlice() {
deleter_(slice_);
if (slice_.size_ > 8) {
delete [] slice_.begin_;
}
}
void set(Slice<T> to_own, deleter_t deleter = _no_delete) {
slice_.size_ = slice_.capacity_ = to_own.size();
slice_.begin_ = (slice_.size_ > 8) ? new T[slice_.size_] : &small_buf[0];
std::memcpy(slice_.begin_, to_own.begin(), slice_.size_ * sizeof(T));
deleter_ = deleter;
}
Slice<T> slice() const {
return slice_;
}
private:
Slice<T> slice_;
deleter_t deleter_;
T small_buf[8];
};
template<typename T>
inline std::ostream& operator<<(std::ostream& s, const Slice<T>& v) {
s << "[";
for (int i : v.enumerate()) {
if (i > 0) {
s << ", ";
}
s << v[i];
}
s << "]";
return s;
}
struct TensorRef {
TensorRef()
: impl_(nullptr){}
TensorRef(const at::Tensor& t)
: impl_(t.unsafeGetTensorImpl()) {}
const at::Tensor& operator*() const {
return *(at::Tensor*)this;
}
at::Tensor* operator->() const {
return (at::Tensor*)this;
}
operator bool() const {
return impl_ != nullptr;
}
private:
at::TensorImpl* impl_;
};
constexpr int ARENA_MAX_SIZE = 4096;
constexpr int ALIGNMENT = 8;
struct Arena {
Arena()
: allocated_(0) {}
template<typename T>
T* allocate(int n) {
if (!n) {
return nullptr;
}
int to_allocate = sizeof(T)*n;
int to_allocate_rounded = ALIGNMENT * ((to_allocate - 1) / ALIGNMENT + 1);
auto prev_allocated = allocated_;
allocated_ += to_allocate_rounded;
if (C10_UNLIKELY_OR_CONST(allocated_ > ARENA_MAX_SIZE)) {
overflow_.emplace_back(new char[to_allocate]);
return (T*) &overflow_.back()[0];
}
return (T*) (buffer_ + prev_allocated);
}
TensorRef autorelease(at::Tensor s) {
auto ref = TensorRef(s);
s.unsafeReleaseTensorImpl();
ar_tensors_.append(*this, ref);
return ref;
}
mpy::handle autorelease(mpy::object obj) {
ar_objects_.append(*this, obj);
obj.release();
return ar_objects_.back();
}
~Arena() {
for(TensorRef t: ar_tensors_) {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(t->unsafeGetTensorImpl());
}
for(mpy::handle h: ar_objects_) {
mpy::object::steal(h);
}
}
private:
int64_t allocated_;
char buffer_[ARENA_MAX_SIZE];
Slice<TensorRef> ar_tensors_;
Slice<mpy::handle> ar_objects_;
std::vector<std::unique_ptr<char[]>> overflow_;
};
template<typename T>
inline void Slice<T>::insert(Arena& arena, Slice where, Slice to_insert) {
AT_ASSERT(inside(where));
Slice result = *this;
/// b------sb---se-----e, 0----n
T* body_dest = where.begin();
if (where.size() != to_insert.size()) {
int new_size = size() - where.size() + to_insert.size();
T* tail_dest = where.begin() + to_insert.size();
if (new_size >= capacity_) {
int new_capacity = new_size ? round2min8(new_size) : 0;
result.capacity_ = new_capacity;
result.begin_ = arena.allocate<T>(new_capacity);
body_dest = result.begin_ + (where.begin() - begin());
tail_dest = body_dest + to_insert.size();
//std::memcpy(result.begin_, begin_, sizeof(T)*(where.begin() - begin()));
std::copy(begin_, begin_ + (where.begin() - begin()), result.begin_);
}
std::memmove(tail_dest, where.end(), sizeof(T)*(end() - where.end()));
result.size_ = new_size;
}
//std::memcpy(body_dest, to_insert.begin(), sizeof(T)*to_insert.size());
std::copy(to_insert.begin(), to_insert.end(), body_dest);
*this = result;
}
template<typename T>
inline void Slice<T>::append(Arena& arena, T value) {
Slice result = *this;
if (size_ == capacity_) {
int new_size = size_ ? round2min8(size_)*2 : 8;
T* n = arena.allocate<T>(new_size);
//memcpy(n, begin_, size_*sizeof(T));
std::copy(begin_, begin_ + size_, n);
result.begin_ = n;
result.capacity_ = new_size;
}
result[result.size_++] = std::move(value);
*this = result;
}
template<typename T>
inline void Slice<T>::extend(Arena& arena, Slice<T> rhs) {
Slice result = *this;
result.size_ = size_ + rhs.size();
if (result.size_ > capacity_) {
int new_size = round2min8(result.size_);
T* n = arena.allocate<T>(new_size);
//memcpy(n, begin_, size_*sizeof(T));
std::copy(begin_, begin_+size_, n);
result.begin_ = n;
result.capacity_ = new_size;
}
//memcpy(result.begin_ + size_, rhs.begin(), sizeof(T)*rhs.size());
std::copy(rhs.begin(), rhs.end(), result.begin_ + size_);
*this = result;
}
template<typename T>
template<typename... Args>
Slice<T>::Slice(Arena& arena, Args&&... args) {
int lens[] = {_length(args)...};
size_ = 0;
for (auto i : lens) {
size_ += i;
}
capacity_ = size_ ? round2min8(size_) : 0;
begin_ = arena.allocate<T>(capacity_);
T* dst_ = begin_;
T* unused[] = {_insert(dst_, args)...};
(void) unused;
}
| 9,186
| 26.588589
| 111
|
h
|
null |
pytorch-main/functorch/csrc/dim/python_variable_simple.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
// note: pytorch's python variable simple includes pybind which conflicts with minpybind
// so this file just reproduces the minimial API needed to extract Tensors from python objects.
#include <torch/csrc/python_headers.h>
#include <ATen/core/Tensor.h>
#include <torch/csrc/Export.h>
// Python object that backs torch.autograd.Variable
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPVariable {
PyObject_HEAD;
// Payload
c10::MaybeOwned<at::Tensor> cdata;
// Hooks to be run on backwards pass (corresponds to Python attr
// '_backwards_hooks', set by 'register_hook')
PyObject* backward_hooks = nullptr;
};
TORCH_PYTHON_API extern PyObject *THPVariableClass;
TORCH_PYTHON_API extern PyObject *ParameterClass;
TORCH_PYTHON_API PyObject * THPVariable_Wrap(at::TensorBase var);
inline bool THPVariable_Check(PyObject *obj)
{
if (!THPVariableClass)
return false;
const auto result = PyObject_IsInstance(obj, THPVariableClass);
AT_ASSERT(result != -1);
return result;
}
inline const at::Tensor& THPVariable_Unpack(THPVariable* var) {
return *var->cdata;
}
inline const at::Tensor& THPVariable_Unpack(PyObject* obj) {
return THPVariable_Unpack(reinterpret_cast<THPVariable*>(obj));
}
TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter();
| 1,526
| 29.54
| 95
|
h
|
null |
pytorch-main/modules/detectron/group_spatial_softmax_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GROUP_SPATIAL_SOFTMAX_OP_H_
#define GROUP_SPATIAL_SOFTMAX_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class GroupSpatialSoftmaxOp final : public Operator<Context> {
public:
GroupSpatialSoftmaxOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
int num_classes_;
StorageOrder order_;
};
template <typename T, class Context>
class GroupSpatialSoftmaxGradientOp final : public Operator<Context> {
public:
GroupSpatialSoftmaxGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
int num_classes_;
StorageOrder order_;
Tensor sum_probs_;
};
} // namespace caffe2
#endif // GROUP_SPATIAL_SOFTMAX_OP_H_
| 2,354
| 29.584416
| 79
|
h
|
null |
pytorch-main/modules/detectron/ps_roi_pool_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PS_ROI_POOL_OP_H_
#define PS_ROI_POOL_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class PSRoIPoolOp final : public Operator<Context> {
public:
PSRoIPoolOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
spatial_scale_(this->template GetSingleArgument<float>(
"spatial_scale", 1.)),
group_size_(this->template GetSingleArgument<int>("group_size", 1)),
output_dim_(this->template GetSingleArgument<int>("output_dim", 1)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(group_size_, 0);
pooled_height_ = group_size_;
pooled_width_ = group_size_;
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int group_size_;
int output_dim_;
int pooled_height_;
int pooled_width_;
int channels_;
int height_;
int width_;
};
template <typename T, class Context>
class PSRoIPoolGradientOp final : public Operator<Context> {
public:
PSRoIPoolGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
spatial_scale_(this->template GetSingleArgument<float>(
"spatial_scale", 1.)),
group_size_(this->template GetSingleArgument<int>("group_size", 1)),
output_dim_(this->template GetSingleArgument<int>("output_dim", 1)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(group_size_, 0);
pooled_height_ = group_size_;
pooled_width_ = group_size_;
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int group_size_;
int output_dim_;
int pooled_height_;
int pooled_width_;
int channels_;
int height_;
int width_;
};
} // namespace caffe2
#endif // PS_ROI_POOL_OP_H_
| 2,702
| 27.755319
| 77
|
h
|
null |
pytorch-main/modules/detectron/roi_pool_f_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ROI_POOL_F_OP_H_
#define ROI_POOL_F_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class RoIPoolFOp final : public Operator<Context> {
public:
RoIPoolFOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
spatial_scale_(this->template GetSingleArgument<float>(
"spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(pooled_height_, 0);
TORCH_DCHECK_GT(pooled_width_, 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int pooled_height_;
int pooled_width_;
};
template <typename T, class Context>
class RoIPoolFGradientOp final : public Operator<Context> {
public:
RoIPoolFGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
spatial_scale_(this->template GetSingleArgument<float>(
"spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(pooled_height_, 0);
TORCH_DCHECK_GT(pooled_width_, 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int pooled_height_;
int pooled_width_;
};
} // namespace caffe2
#endif // ROI_POOL_F_OP_H_
| 2,470
| 29.134146
| 77
|
h
|
null |
pytorch-main/modules/detectron/sample_as_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SAMPLE_AS_OP_H_
#define SAMPLE_AS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SampleAsOp final : public Operator<Context> {
public:
SampleAsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
};
template <typename T, class Context>
class SampleAsGradientOp final : public Operator<Context> {
public:
SampleAsGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
};
} // namespace caffe2
#endif // SAMPLE_AS_OP_H_
| 1,551
| 26.714286
| 75
|
h
|
null |
pytorch-main/modules/detectron/select_smooth_l1_loss_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SELECT_SMOOTH_L1_LOSS_OP_H_
#define SELECT_SMOOTH_L1_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SelectSmoothL1LossOp final : public Operator<Context> {
public:
SelectSmoothL1LossOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
beta_(this->template GetSingleArgument<float>("beta", 1.)),
scale_(this->template GetSingleArgument<float>("scale", 1.)) {
CAFFE_ENFORCE(beta_ > 0);
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
int dim_; // dimension for 1 anchor prediction
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
template <typename T, class Context>
class SelectSmoothL1LossGradientOp final : public Operator<Context> {
public:
SelectSmoothL1LossGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
beta_(this->template GetSingleArgument<float>("beta", 1.)),
scale_(this->template GetSingleArgument<float>("scale", 1.)) {
CAFFE_ENFORCE(beta_ > 0);
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
int dim_; // dimension for 1 anchor prediction
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
} // namespace caffe2
#endif // SELECT_SMOOTH_L1_LOSS_OP_H_
| 2,515
| 31.25641
| 80
|
h
|
null |
pytorch-main/modules/detectron/sigmoid_cross_entropy_loss_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SIGMOID_CROSS_ENTROPY_LOSS_OP_H_
#define SIGMOID_CROSS_ENTROPY_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SigmoidCrossEntropyLossOp final : public Operator<Context> {
public:
SigmoidCrossEntropyLossOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
normalize_(this->template GetSingleArgument<int>("normalize", 1)) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE(normalize_ == 0 || normalize_ == 1);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
int normalize_;
Tensor losses_{Context::GetDeviceType()};
Tensor counts_{Context::GetDeviceType()};
Tensor normalizer_;
};
template <typename T, class Context>
class SigmoidCrossEntropyLossGradientOp final : public Operator<Context> {
public:
SigmoidCrossEntropyLossGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
normalize_(this->template GetSingleArgument<int>("normalize", 1)) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE(normalize_ == 0 || normalize_ == 1);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
int normalize_;
Tensor counts_{Context::GetDeviceType()};
Tensor normalizer_;
};
} // namespace caffe2
#endif // SIGMOID_CROSS_ENTROPY_LOSS_OP_H_
| 2,402
| 29.417722
| 75
|
h
|
null |
pytorch-main/modules/detectron/sigmoid_focal_loss_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SIGMOID_FOCAL_LOSS_OP_H_
#define SIGMOID_FOCAL_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SigmoidFocalLossOp final : public Operator<Context> {
public:
SigmoidFocalLossOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
num_classes_(this->template GetSingleArgument<int>("num_classes", 80)),
gamma_(this->template GetSingleArgument<float>("gamma", 1.)),
alpha_(this->template GetSingleArgument<float>("alpha", 0.25)) {
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
int num_classes_;
float gamma_;
float alpha_;
Tensor losses_{Context::GetDeviceType()};
Tensor counts_{Context::GetDeviceType()};
};
template <typename T, class Context>
class SigmoidFocalLossGradientOp final : public Operator<Context> {
public:
SigmoidFocalLossGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
num_classes_(this->template GetSingleArgument<int>("num_classes", 80)),
gamma_(this->template GetSingleArgument<float>("gamma", 1.)),
alpha_(this->template GetSingleArgument<float>("alpha", 0.25)) {
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
int num_classes_;
float gamma_;
float alpha_;
Tensor counts_{Context::GetDeviceType()};
Tensor weights_{Context::GetDeviceType()}; // unignored weights
};
} // namespace caffe2
#endif // SIGMOID_FOCAL_LOSS_OP_H_
| 2,624
| 30.25
| 79
|
h
|
null |
pytorch-main/modules/detectron/smooth_l1_loss_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SMOOTH_L1_LOSS_OP_H_
#define SMOOTH_L1_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SmoothL1LossOp final : public Operator<Context> {
public:
SmoothL1LossOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
beta_(this->template GetSingleArgument<float>("beta", 1.)),
scale_(this->template GetSingleArgument<float>("scale", 1.)) {
CAFFE_ENFORCE(beta_ > 0);
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
template <typename T, class Context>
class SmoothL1LossGradientOp final : public Operator<Context> {
public:
SmoothL1LossGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
beta_(this->template GetSingleArgument<float>("beta", 1.)),
scale_(this->template GetSingleArgument<float>("scale", 1.)) {
CAFFE_ENFORCE(beta_ > 0);
CAFFE_ENFORCE(scale_ >= 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
} // namespace caffe2
#endif // SMOOTH_L1_LOSS_OP_H_
| 2,372
| 30.223684
| 80
|
h
|
null |
pytorch-main/modules/detectron/softmax_focal_loss_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SOFTMAX_FOCAL_LOSS_OP_H_
#define SOFTMAX_FOCAL_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SoftmaxFocalLossOp final : public Operator<Context> {
public:
SoftmaxFocalLossOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
gamma_(this->template GetSingleArgument<float>("gamma", 1.)),
alpha_(this->template GetSingleArgument<float>("alpha", 0.25)),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
float gamma_;
float alpha_;
int num_classes_;
StorageOrder order_;
Tensor losses_;
};
template <typename T, class Context>
class SoftmaxFocalLossGradientOp final : public Operator<Context> {
public:
SoftmaxFocalLossGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
gamma_(this->template GetSingleArgument<float>("gamma", 1.)),
alpha_(this->template GetSingleArgument<float>("alpha", 0.25)),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
float scale_;
float gamma_;
float alpha_;
int num_classes_;
StorageOrder order_;
Tensor buff_;
};
} // namespace caffe2
#endif // SOFTMAX_FOCAL_LOSS_OP_H_
| 2,930
| 30.858696
| 79
|
h
|
null |
pytorch-main/modules/detectron/spatial_narrow_as_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SPATIAL_NARROW_AS_OP_H_
#define SPATIAL_NARROW_AS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class SpatialNarrowAsOp final : public Operator<Context> {
public:
SpatialNarrowAsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
template <typename T>
bool DoRunWithType();
};
template <class Context>
class SpatialNarrowAsGradientOp final : public Operator<Context> {
public:
SpatialNarrowAsGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
template <typename T>
bool DoRunWithType();
};
} // namespace caffe2
#endif // SPATIAL_NARROW_AS_OP_H_
| 1,745
| 26.28125
| 75
|
h
|
null |
pytorch-main/modules/detectron/upsample_nearest_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef UPSAMPLE_NEAREST_OP_H_
#define UPSAMPLE_NEAREST_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class UpsampleNearestOp final : public Operator<Context> {
public:
UpsampleNearestOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
scale_(this->template GetSingleArgument<int>("scale", 2)) {
TORCH_DCHECK_GE(scale_, 1);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& X = Input(0);
auto out_shape = X.sizes().vec();
out_shape[X.dim() - 1] *= scale_;
out_shape[X.dim() - 2] *= scale_;
auto* Y = Output(0, out_shape, at::dtype<T>());
int d1;
int d2;
int d3;
if (X.dim() == 3) {
d1 = Y->dim32(0);
d2 = Y->dim32(1);
d3 = Y->dim32(2);
} else {
d1 = Y->dim32(0) * Y->dim32(1);
d2 = Y->dim32(2);
d3 = Y->dim32(3);
}
const T *input_data = X.template data<T>();
T *output_data = Y->template mutable_data<T>();
int scaled_d2 = d2 / scale_;
int scaled_d3 = d3 / scale_;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < d1; ++i) {
for (int j = 0; j < d2; ++j) {
for (int u = 0; u < d3; ++u) {
int ii = (i * d2 + j) * d3 + u;
int scaled_u = u / scale_;
int scaled_j = j / scale_;
int ipidx = ((i * scaled_d2) + scaled_j) * scaled_d3 + scaled_u;
output_data[ii] = input_data[ipidx];
}
}
}
return true;
}
protected:
int scale_;
};
template <typename T, class Context>
class UpsampleNearestGradientOp final : public Operator<Context> {
public:
UpsampleNearestGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
scale_(this->template GetSingleArgument<int>("scale", 2)) {
TORCH_DCHECK_GE(scale_, 1);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
int scale_;
};
} // namespace caffe2
#endif // UPSAMPLE_NEAREST_OP_H_
| 2,829
| 25.448598
| 75
|
h
|
null |
pytorch-main/modules/observers/net_observer_reporter.h
|
#pragma once
#include <map>
#include "caffe2/core/common.h"
#include "caffe2/core/net.h"
#include "observers/macros.h"
namespace caffe2 {
struct PerformanceInformation {
// Analytic
int64_t flops = 0;
int64_t bytes_written = 0;
int64_t bytes_read = 0;
std::vector<TensorShape> tensor_shapes = {};
std::vector<Argument> args = {};
std::string engine = ""; // the engine used
std::string type = ""; // the type of the operator
// Measured
double latency = 0;
double cpuMilliseconds = 0;
};
class CAFFE2_OBSERVER_API NetObserverReporter {
public:
virtual ~NetObserverReporter() = default;
/*
Report the delay metric collected by the observer.
The delays are saved in a map. The key is an identifier associated
with the reported delay. The value is the delay value in float
*/
virtual void report(
NetBase* net,
std::map<std::string, PerformanceInformation>&) = 0;
};
}
| 927
| 22.794872
| 70
|
h
|
null |
pytorch-main/modules/observers/observer_config.h
|
#pragma once
#include "observers/macros.h"
#include "observers/net_observer_reporter.h"
#include "caffe2/core/common.h"
namespace caffe2 {
/*
netInitSampleRate_ == 1 && operatorNetSampleRatio_ == 1 :
Log operator metrics in every iteration
netInitSampleRate_ == 1 && operatorNetSampleRatio_ == 0 :
Log net metrics in every iterationn
netInitSampleRate_ == n && netFollowupSampleRate_ == m &&
netFollowupSampleCount == c && operatorNetSampleRatio_ == 1 :
Log operator metrics first at odds of 1 / n. Once first logged,
the following c logs are at odds of 1 / min(n, m). Then repeat
netInitSampleRate_ == n && netFollowupSampleRate_ == m &&
netFollowupSampleCount == c && operatorNetSampleRatio_ == 0 :
Log net metrics first at odds of 1 / n. Once first logged,
the following c logs are at odds of 1 / min(n, m). Then repeat
netInitSampleRate_ == n && netFollowupSampleRate_ == m &&
netFollowupSampleCount == c && operatorNetSampleRatio_ == o :
Log net metrics first at odds of 1 / n. Once first logged,
the following c logs are at odds of 1 / min(n, m), if the random number
is multiples of o, log operator metrics instead. Then repeat
skipIters_ == n: skip the first n iterations of the net.
*/
class CAFFE2_OBSERVER_API ObserverConfig {
public:
static void initSampleRate(
int netInitSampleRate,
int netFollowupSampleRate,
int netFollowupSampleCount,
int operatorNetSampleRatio,
int skipIters) {
CAFFE_ENFORCE(netFollowupSampleRate <= netInitSampleRate);
CAFFE_ENFORCE(netFollowupSampleRate >= 1 || netInitSampleRate == 0);
netInitSampleRate_ = netInitSampleRate;
netFollowupSampleRate_ = netFollowupSampleRate;
netFollowupSampleCount_ = netFollowupSampleCount;
operatorNetSampleRatio_ = operatorNetSampleRatio;
skipIters_ = skipIters;
}
static int getNetInitSampleRate() {
return netInitSampleRate_;
}
static int getNetFollowupSampleRate() {
return netFollowupSampleRate_;
}
static int getNetFollowupSampleCount() {
return netFollowupSampleCount_;
}
static int getOpoeratorNetSampleRatio() {
return operatorNetSampleRatio_;
}
static int getSkipIters() {
return skipIters_;
}
static void setReporter(unique_ptr<NetObserverReporter> reporter) {
reporter_ = std::move(reporter);
}
static NetObserverReporter* getReporter() {
CAFFE_ENFORCE(reporter_);
return reporter_.get();
}
static void setMarker(int marker) {
marker_ = marker;
}
static int getMarker() {
return marker_;
}
private:
/* The odds of log net metric initially or immediately after reset */
static int netInitSampleRate_;
/* The odds of log net metric after log once after start of reset */
static int netFollowupSampleRate_;
/* The number of follow up logs to be collected for odds of
netFollowupSampleRate_ */
static int netFollowupSampleCount_;
/* The odds to log the operator metric instead of the net metric.
When the operator is logged the net is not logged. */
static int operatorNetSampleRatio_;
/* skip the first few iterations */
static int skipIters_;
static unique_ptr<NetObserverReporter> reporter_;
/* marker used in identifying the metrics in certain reporters */
static int marker_;
};
}
| 3,351
| 32.52
| 77
|
h
|
null |
pytorch-main/modules/observers/perf_observer.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/timer.h"
#include "observers/macros.h"
#include <unordered_map>
namespace caffe2 {
double getClockTimeMilliseconds();
class CAFFE2_OBSERVER_API PerfNetObserver : public NetObserver {
public:
explicit PerfNetObserver(NetBase* subject_);
virtual ~PerfNetObserver();
private:
void Start() override;
void Stop() override;
caffe2::string getObserverName(const OperatorBase* op, int idx) const;
private:
enum LogType {
NONE,
OPERATOR_DELAY,
NET_DELAY,
};
LogType logType_;
unsigned int numRuns_;
std::unordered_map<const OperatorBase*, const ObserverBase<OperatorBase>*>
observerMap_;
double wallMilliseconds_;
double cpuMilliseconds_;
};
class PerfOperatorObserver : public ObserverBase<OperatorBase> {
public:
PerfOperatorObserver(OperatorBase* op, PerfNetObserver* netObserver);
virtual ~PerfOperatorObserver();
double getWallMilliseconds() const;
double getCpuMilliseconds() const;
std::vector<TensorShape> getTensorShapes() const;
private:
void Start() override;
void Stop() override;
private:
// Observer of a net that owns corresponding op. We make sure net is never
// destructed while operator observer is still alive. First operator observer
// gets destructed, then the op, then the net and its observer.
// We do this trick in order to get access to net's name and other fields
// without storing inside the operator observer. Each field is memory
// costly here and a raw pointer is a cheapest sholution
PerfNetObserver* netObserver_;
double wallMilliseconds_;
double cpuMilliseconds_;
std::vector<TensorShape> tensor_shapes_;
};
} // namespace caffe2
| 1,784
| 25.641791
| 79
|
h
|
null |
pytorch-main/test/cpp/api/support.h
|
#pragma once
#include <test/cpp/common/support.h>
#include <gtest/gtest.h>
#include <ATen/TensorIndexing.h>
#include <c10/util/Exception.h>
#include <torch/nn/cloneable.h>
#include <torch/types.h>
#include <torch/utils.h>
#include <string>
#include <utility>
namespace torch {
namespace test {
// Lets you use a container without making a new class,
// for experimental implementations
class SimpleContainer : public nn::Cloneable<SimpleContainer> {
public:
void reset() override {}
template <typename ModuleHolder>
ModuleHolder add(
ModuleHolder module_holder,
std::string name = std::string()) {
return Module::register_module(std::move(name), module_holder);
}
};
struct SeedingFixture : public ::testing::Test {
SeedingFixture() {
torch::manual_seed(0);
}
};
struct WarningCapture : public WarningHandler {
WarningCapture() : prev_(WarningUtils::get_warning_handler()) {
WarningUtils::set_warning_handler(this);
}
~WarningCapture() {
WarningUtils::set_warning_handler(prev_);
}
const std::vector<std::string>& messages() {
return messages_;
}
std::string str() {
return c10::Join("\n", messages_);
}
void process(const c10::Warning& warning) override {
messages_.push_back(warning.msg());
}
private:
WarningHandler* prev_;
std::vector<std::string> messages_;
};
inline bool pointer_equal(at::Tensor first, at::Tensor second) {
return first.data_ptr() == second.data_ptr();
}
// This mirrors the `isinstance(x, torch.Tensor) and isinstance(y,
// torch.Tensor)` branch in `TestCase.assertEqual` in
// torch/testing/_internal/common_utils.py
inline void assert_tensor_equal(
at::Tensor a,
at::Tensor b,
bool allow_inf = false) {
ASSERT_TRUE(a.sizes() == b.sizes());
if (a.numel() > 0) {
if (a.device().type() == torch::kCPU &&
(a.scalar_type() == torch::kFloat16 ||
a.scalar_type() == torch::kBFloat16)) {
// CPU half and bfloat16 tensors don't have the methods we need below
a = a.to(torch::kFloat32);
}
if (a.device().type() == torch::kCUDA &&
a.scalar_type() == torch::kBFloat16) {
// CUDA bfloat16 tensors don't have the methods we need below
a = a.to(torch::kFloat32);
}
b = b.to(a);
if ((a.scalar_type() == torch::kBool) !=
(b.scalar_type() == torch::kBool)) {
TORCH_CHECK(false, "Was expecting both tensors to be bool type.");
} else {
if (a.scalar_type() == torch::kBool && b.scalar_type() == torch::kBool) {
// we want to respect precision but as bool doesn't support subtraction,
// boolean tensor has to be converted to int
a = a.to(torch::kInt);
b = b.to(torch::kInt);
}
auto diff = a - b;
if (a.is_floating_point()) {
// check that NaNs are in the same locations
auto nan_mask = torch::isnan(a);
ASSERT_TRUE(torch::equal(nan_mask, torch::isnan(b)));
diff.index_put_({nan_mask}, 0);
// inf check if allow_inf=true
if (allow_inf) {
auto inf_mask = torch::isinf(a);
auto inf_sign = inf_mask.sign();
ASSERT_TRUE(torch::equal(inf_sign, torch::isinf(b).sign()));
diff.index_put_({inf_mask}, 0);
}
}
// TODO: implement abs on CharTensor (int8)
if (diff.is_signed() && diff.scalar_type() != torch::kInt8) {
diff = diff.abs();
}
auto max_err = diff.max().item<double>();
ASSERT_LE(max_err, 1e-5);
}
}
}
// This mirrors the `isinstance(x, torch.Tensor) and isinstance(y,
// torch.Tensor)` branch in `TestCase.assertNotEqual` in
// torch/testing/_internal/common_utils.py
inline void assert_tensor_not_equal(at::Tensor x, at::Tensor y) {
if (x.sizes() != y.sizes()) {
return;
}
ASSERT_GT(x.numel(), 0);
y = y.type_as(x);
y = x.is_cuda() ? y.to({torch::kCUDA, x.get_device()}) : y.cpu();
auto nan_mask = x != x;
if (torch::equal(nan_mask, y != y)) {
auto diff = x - y;
if (diff.is_signed()) {
diff = diff.abs();
}
diff.index_put_({nan_mask}, 0);
// Use `item()` to work around:
// https://github.com/pytorch/pytorch/issues/22301
auto max_err = diff.max().item<double>();
ASSERT_GE(max_err, 1e-5);
}
}
inline int count_substr_occurrences(
const std::string& str,
const std::string& substr) {
int count = 0;
size_t pos = str.find(substr);
while (pos != std::string::npos) {
count++;
pos = str.find(substr, pos + substr.size());
}
return count;
}
// A RAII, thread local (!) guard that changes default dtype upon
// construction, and sets it back to the original dtype upon destruction.
//
// Usage of this guard is synchronized across threads, so that at any given
// time, only one guard can take effect.
struct AutoDefaultDtypeMode {
static std::mutex default_dtype_mutex;
AutoDefaultDtypeMode(c10::ScalarType default_dtype)
: prev_default_dtype(
torch::typeMetaToScalarType(torch::get_default_dtype())) {
default_dtype_mutex.lock();
torch::set_default_dtype(torch::scalarTypeToTypeMeta(default_dtype));
}
~AutoDefaultDtypeMode() {
default_dtype_mutex.unlock();
torch::set_default_dtype(torch::scalarTypeToTypeMeta(prev_default_dtype));
}
c10::ScalarType prev_default_dtype;
};
inline void assert_tensor_creation_meta(
torch::Tensor& x,
torch::autograd::CreationMeta creation_meta) {
auto autograd_meta = x.unsafeGetTensorImpl()->autograd_meta();
TORCH_CHECK(autograd_meta);
auto view_meta =
static_cast<torch::autograd::DifferentiableViewMeta*>(autograd_meta);
TORCH_CHECK(view_meta->has_bw_view());
ASSERT_EQ(view_meta->get_creation_meta(), creation_meta);
}
} // namespace test
} // namespace torch
| 5,772
| 28.304569
| 80
|
h
|
null |
pytorch-main/test/cpp/common/support.h
|
#pragma once
#include <c10/util/Exception.h>
#include <gtest/gtest.h>
#include <stdexcept>
#include <string>
namespace torch {
namespace test {
#define ASSERT_THROWS_WITH(statement, substring) \
{ \
std::string assert_throws_with_error_message; \
try { \
(void)statement; \
FAIL() << "Expected statement `" #statement \
"` to throw an exception, but it did not"; \
} catch (const c10::Error& e) { \
assert_throws_with_error_message = e.what_without_backtrace(); \
} catch (const std::exception& e) { \
assert_throws_with_error_message = e.what(); \
} \
if (assert_throws_with_error_message.find(substring) == \
std::string::npos) { \
FAIL() << "Error message \"" << assert_throws_with_error_message \
<< "\" did not contain expected substring \"" << substring \
<< "\""; \
} \
}
} // namespace test
} // namespace torch
| 1,526
| 43.911765
| 73
|
h
|
null |
pytorch-main/test/cpp/jit/test_custom_class_registrations.h
|
#include <torch/custom_class.h>
#include <torch/script.h>
namespace torch {
namespace jit {
struct ScalarTypeClass : public torch::CustomClassHolder {
ScalarTypeClass(at::ScalarType s) : scalar_type_(s) {}
at::ScalarType scalar_type_;
};
template <class T>
struct MyStackClass : torch::CustomClassHolder {
std::vector<T> stack_;
MyStackClass(std::vector<T> init) : stack_(init.begin(), init.end()) {}
void push(T x) {
stack_.push_back(x);
}
T pop() {
auto val = stack_.back();
stack_.pop_back();
return val;
}
c10::intrusive_ptr<MyStackClass> clone() const {
return c10::make_intrusive<MyStackClass>(stack_);
}
void merge(const c10::intrusive_ptr<MyStackClass>& c) {
for (auto& elem : c->stack_) {
push(elem);
}
}
std::tuple<double, int64_t> return_a_tuple() const {
return std::make_tuple(1337.0f, 123);
}
};
} // namespace jit
} // namespace torch
| 923
| 21
| 73
|
h
|
null |
pytorch-main/test/cpp/jit/test_utils.h
|
#pragma once
#include <torch/csrc/jit/ir/irparser.h>
#include <torch/csrc/jit/runtime/autodiff.h>
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/jit/testing/file_check.h>
namespace {
static inline void trim(std::string& s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](unsigned char ch) {
return !std::isspace(ch);
}));
s.erase(
std::find_if(
s.rbegin(),
s.rend(),
[](unsigned char ch) { return !std::isspace(ch); })
.base(),
s.end());
for (size_t i = 0; i < s.size(); ++i) {
while (i < s.size() && s[i] == '\n') {
s.erase(i, 1);
}
}
for (size_t i = 0; i < s.size(); ++i) {
if (s[i] == ' ') {
while (i + 1 < s.size() && s[i + 1] == ' ') {
s.erase(i + 1, 1);
}
}
}
}
} // namespace
#define ASSERT_THROWS_WITH_MESSAGE(statement, substring) \
try { \
(void)statement; \
FAIL(); \
} catch (const std::exception& e) { \
std::string substring_s(substring); \
trim(substring_s); \
auto exception_string = std::string(e.what()); \
trim(exception_string); \
ASSERT_NE(exception_string.find(substring_s), std::string::npos) \
<< " Error was: \n" \
<< exception_string; \
}
namespace torch {
namespace jit {
using tensor_list = std::vector<at::Tensor>;
using namespace torch::autograd;
// work around the fact that variable_tensor_list doesn't duplicate all
// of std::vector's constructors.
// most constructors are never used in the implementation, just in our tests.
Stack createStack(std::vector<at::Tensor>&& list);
void assertAllClose(const tensor_list& a, const tensor_list& b);
std::vector<at::Tensor> run(
InterpreterState& interp,
const std::vector<at::Tensor>& inputs);
std::pair<tensor_list, tensor_list> runGradient(
Gradient& grad_spec,
tensor_list& tensors_in,
tensor_list& tensor_grads_in);
std::shared_ptr<Graph> build_lstm();
std::shared_ptr<Graph> build_mobile_export_analysis_graph();
std::shared_ptr<Graph> build_mobile_export_with_out();
std::shared_ptr<Graph> build_mobile_export_analysis_graph_with_vararg();
std::shared_ptr<Graph> build_mobile_export_analysis_graph_nested();
std::shared_ptr<Graph> build_mobile_export_analysis_graph_non_const();
at::Tensor t_use(at::Tensor x);
at::Tensor t_def(at::Tensor x);
// given the difference of output vs expected tensor, check whether the
// difference is within a relative tolerance range. This is a standard way of
// matching tensor values up to certain precision
bool checkRtol(const at::Tensor& diff, const std::vector<at::Tensor> inputs);
bool almostEqual(const at::Tensor& a, const at::Tensor& b);
bool exactlyEqual(const at::Tensor& a, const at::Tensor& b);
bool exactlyEqual(
const std::vector<at::Tensor>& a,
const std::vector<at::Tensor>& b);
std::vector<at::Tensor> runGraph(
std::shared_ptr<Graph> graph,
const std::vector<at::Tensor>& inputs);
std::pair<at::Tensor, at::Tensor> lstm(
at::Tensor input,
at::Tensor hx,
at::Tensor cx,
at::Tensor w_ih,
at::Tensor w_hh);
} // namespace jit
} // namespace torch
| 3,586
| 33.161905
| 77
|
h
|
null |
pytorch-main/test/cpp/lazy/test_lazy_ops_util.h
|
#pragma once
#include <gtest/gtest.h>
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/core/debug_util.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/tensor.h>
#include <torch/torch.h>
#include <cmath>
#include <functional>
#include <string>
#include <unordered_set>
namespace torch {
namespace lazy {
const std::unordered_set<std::string>* GetIgnoredCounters();
// Converts an at::Tensor(device=torch::kLazy) to at::Tensor(device=torch::kCPU)
// This at::Tensor can be torch::Tensor which is a Variable, or at::Tensor which
// know nothing about autograd. If the input tensor is already a CPU tensor, it
// will be returned. Needed because EqualValues and AllClose require CPU tensors
// on both sides.
at::Tensor ToCpuTensor(const at::Tensor& tensor);
// Helper function to copy a tensor to device.
torch::Tensor CopyToDevice(
const torch::Tensor& tensor,
const torch::Device& device);
bool EqualValues(at::Tensor tensor1, at::Tensor tensor2);
bool EqualValuesNoElementTypeCheck(at::Tensor tensor1, at::Tensor tensor2);
bool CloseValues(
at::Tensor tensor1,
at::Tensor tensor2,
double rtol = 1e-5,
double atol = 1e-8);
static inline void AllClose(
at::Tensor tensor,
at::Tensor xla_tensor,
double rtol = 1e-5,
double atol = 1e-8) {
EXPECT_TRUE(CloseValues(tensor, xla_tensor, rtol, atol));
}
static inline void AllClose(
at::Tensor tensor,
torch::lazy::LazyTensor& xla_tensor,
double rtol = 1e-5,
double atol = 1e-8) {
EXPECT_TRUE(
CloseValues(tensor, xla_tensor.ToTensor(/*detached=*/false), rtol, atol));
}
static inline void AllEqual(at::Tensor tensor, at::Tensor xla_tensor) {
EXPECT_TRUE(EqualValues(tensor, xla_tensor));
}
void ForEachDevice(const std::function<void(const torch::Device&)>& devfn);
std::string GetTensorTextGraph(at::Tensor tensor);
std::string GetTensorDotGraph(at::Tensor tensor);
std::string GetTensorHloGraph(at::Tensor tensor);
void TestBackward(
const std::vector<torch::Tensor>& inputs,
const torch::Device& device,
const std::function<torch::Tensor(const std::vector<torch::Tensor>&)>&
testfn,
double rtol = 1e-5,
double atol = 1e-8,
int derivative_level = 1);
} // namespace lazy
} // namespace torch
| 2,306
| 27.134146
| 80
|
h
|
null |
pytorch-main/test/cpp/rpc/e2e_test_base.h
|
#include <gtest/gtest.h>
#include <torch/csrc/distributed/autograd/context/container.h>
#include <torch/csrc/distributed/autograd/context/context.h>
#include <torch/csrc/distributed/autograd/engine/dist_engine.h>
#include <torch/csrc/distributed/autograd/utils.h>
#include <torch/csrc/distributed/c10d/TCPStore.hpp>
#include <torch/csrc/distributed/rpc/rref_context.h>
#include <torch/csrc/distributed/rpc/script_call.h>
#include <torch/csrc/distributed/rpc/script_remote_call.h>
#include <torch/csrc/distributed/rpc/script_resp.h>
#include <torch/csrc/distributed/rpc/utils.h>
#include <torch/csrc/jit/runtime/operator.h>
namespace torch {
namespace distributed {
namespace rpc {
using torch::distributed::autograd::DistAutogradContainer;
using torch::distributed::autograd::DistAutogradContext;
DistAutogradContainer* getDistAutogradContainer();
class TestE2EBase : public ::testing::Test {
protected:
void SetUp() override {
// Setup distributed autograd.
autogradContainer = getDistAutogradContainer();
// Setup server store.
c10d::TCPStoreOptions opts{
/* port */ 0,
/* isServer */ true,
numWorkers,
/* waitWorkers */ true,
/* timeout */ std::chrono::seconds(10)};
store = c10::make_intrusive<c10d::TCPStore>(serverAddress, opts);
buildRpcAgent();
rpcAgentPostProcessing();
}
void rpcAgentPostProcessing() {
RpcAgent::setCurrentRpcAgent(rpcAgent);
std::shared_ptr<TypeResolver> typeResolver =
std::make_shared<TypeResolver>([&](const c10::QualifiedName& qn) {
// For Dict that is used for device map.
auto pos = qn.name().find("Dict");
if (pos != std::string::npos) {
return c10::StrongTypePtr(
nullptr,
c10::DictType::create(
c10::StringType::get(), c10::StringType::get()));
}
return c10::StrongTypePtr(
nullptr, c10::TensorType::create(at::Tensor()));
});
rpcAgent->setTypeResolver(typeResolver);
rpcAgent->start();
}
void TearDown() override {
rpcAgent->join();
rpcAgent->shutdown();
RpcAgent::setCurrentRpcAgent(nullptr);
}
c10::intrusive_ptr<OwnerRRef> createRemoteRRef(
at::Tensor t1,
at::Tensor t2,
std::shared_ptr<torch::jit::Operator> op) {
auto& ctx = RRefContext::getInstance();
auto ownerRRef = ctx.createOwnerRRef(c10::TensorType::create(t1));
// prevent this owner RRef being deleted due to other forks
ctx.addSelfAsFork(ownerRRef);
ScriptRemoteCall scriptRemoteCall(
op, {t1, t2, 1}, ownerRRef->rrefId(), ownerRRef->rrefId());
auto jitFuture = autograd::sendMessageWithAutograd(
*rpcAgent,
rpcAgent->getWorkerInfo("worker"),
std::move(scriptRemoteCall).toMessage(),
false);
ownerRRef->registerOwnerCreationFuture(jitFuture);
// Builtin operators does not return py::object, and hence does not require
// GIL for destructing the potentially deleted OwerRRef.
jitFuture->addCallback(
[ownerRRefId = ownerRRef->rrefId()](JitFuture& jitFuture) {
callback::finishCreatingOwnerRRef(jitFuture, ownerRRefId);
});
return ownerRRef;
}
at::Tensor remoteAdd(
at::Tensor t1,
at::Tensor t2,
std::shared_ptr<torch::jit::Operator> op) {
ScriptCall scriptCall(op, {t1, t2, /* alpha */ 1});
// Send the RPC and return result.
auto response = autograd::sendMessageWithAutograd(
*rpcAgent,
rpcAgent->getWorkerInfo("worker"),
std::move(scriptCall).toMessage());
response->waitAndThrow();
MessageType messageType = MessageType::FORWARD_AUTOGRAD_RESP;
auto wrappedResponse = deserializeResponse(
std::move(*response->value().toCustomClass<Message>()), messageType);
return static_cast<ScriptResp&>(*wrappedResponse).value().toTensor();
}
virtual void buildRpcAgent() = 0;
class AutogradContextGuard {
public:
explicit AutogradContextGuard()
: context(DistAutogradContainer::getInstance().newContext()) {}
~AutogradContextGuard() {
DistAutogradContainer::getInstance().releaseContext(context->contextId());
}
private:
std::shared_ptr<DistAutogradContext> context;
};
void runTrainingLoop() {
auto options = at::TensorOptions().requires_grad(true);
auto t1 = torch::ones({3, 3}, options);
auto t2 = torch::ones({3, 3}, options);
c10::OperatorName full_name("aten::add", "Tensor");
auto matchedOp = torch::jit::findOperatorFor(full_name);
ASSERT_TRUE(matchedOp);
for (size_t i = 0; i < numIters; i++) {
// Create the autograd context guard.
AutogradContextGuard guard;
// Multiple RPCs within one autograd context for the forward pass.
auto result = remoteAdd(t1, t2, matchedOp);
for (size_t j = 0; j < 5; j++) {
result = remoteAdd(t1, result, matchedOp);
}
auto rref = createRemoteRRef(t1, result, matchedOp);
result = rref->getValue().toTensor();
// Run backward pass now.
autograd::DistEngine::getInstance().execute(
DistAutogradContainer::currentContextId(),
{torch::sum(result)},
/* retainGraph */ false);
}
}
DistAutogradContainer* autogradContainer;
std::shared_ptr<RpcAgent> rpcAgent;
static const size_t numIters;
static const size_t numWorkers;
c10::intrusive_ptr<c10d::Store> store;
static const char* serverAddress;
};
} // namespace rpc
} // namespace distributed
} // namespace torch
| 5,578
| 31.248555
| 80
|
h
|
null |
pytorch-main/test/cpp/tensorexpr/gtest_assert_float_eq.h
|
#pragma once
#include <cmath>
// Copyright 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// The Google C++ Testing and Mocking Framework (Google Test)
//
// This header file declares functions and macros used internally by
// Google Test. They are subject to change without notice.
using Bits = uint32_t;
// this avoids the "dereferencing type-punned pointer
// will break strict-aliasing rules" error
union Float {
float float_;
Bits bits_;
};
// # of bits in a number.
static const size_t kBitCount = 8 * sizeof(Bits);
// The mask for the sign bit.
static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
// GOOGLETEST_CM0001 DO NOT DELETE
// Converts an integer from the sign-and-magnitude representation to
// the biased representation. More precisely, let N be 2 to the
// power of (kBitCount - 1), an integer x is represented by the
// unsigned number x + N.
//
// For instance,
//
// -N + 1 (the most negative number representable using
// sign-and-magnitude) is represented by 1;
// 0 is represented by N; and
// N - 1 (the biggest number representable using
// sign-and-magnitude) is represented by 2N - 1.
//
// Read http://en.wikipedia.org/wiki/Signed_number_representations
// for more details on signed number representations.
static Bits SignAndMagnitudeToBiased(const Bits& sam) {
if (kSignBitMask & sam) {
// sam represents a negative number.
return ~sam + 1;
} else {
// sam represents a positive number.
return kSignBitMask | sam;
}
}
// Given two numbers in the sign-and-magnitude representation,
// returns the distance between them as an unsigned number.
static Bits DistanceBetweenSignAndMagnitudeNumbers(
const Bits& sam1,
const Bits& sam2) {
const Bits biased1 = SignAndMagnitudeToBiased(sam1);
const Bits biased2 = SignAndMagnitudeToBiased(sam2);
return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
}
// How many ULP's (Units in the Last Place) we want to tolerate when
// comparing two numbers. The larger the value, the more error we
// allow. A 0 value means that two numbers must be exactly the same
// to be considered equal.
//
// The maximum error of a single floating-point operation is 0.5
// units in the last place. On Intel CPU's, all floating-point
// calculations are done with 80-bit precision, while double has 64
// bits. Therefore, 4 should be enough for ordinary use.
//
// See the following article for more details on ULP:
// http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
static const size_t kMaxUlps = 4;
// Returns true if and only if this number is at most kMaxUlps ULP's away
// from rhs. In particular, this function:
//
// - returns false if either number is (or both are) NAN.
// - treats really large numbers as almost equal to infinity.
// - thinks +0.0 and -0.0 are 0 DLP's apart.
inline bool AlmostEquals(float lhs, float rhs) {
// The IEEE standard says that any comparison operation involving
// a NAN must return false.
if (std::isnan(lhs) || std::isnan(rhs))
return false;
Float l = {lhs};
Float r = {rhs};
return DistanceBetweenSignAndMagnitudeNumbers(l.bits_, r.bits_) <= kMaxUlps;
}
| 4,741
| 38.516667
| 93
|
h
|
null |
pytorch-main/test/cpp/tensorexpr/padded_buffer.h
|
#pragma once
#include <string>
#include <vector>
#include <c10/util/irange.h>
#include "torch/csrc/jit/tensorexpr/eval.h"
namespace torch {
namespace jit {
namespace tensorexpr {
template <typename T>
struct DefaultPaddedValue;
template <>
struct DefaultPaddedValue<int> {
static const int kValue = static_cast<int>(0xDEADBEEF);
};
template <>
struct DefaultPaddedValue<int8_t> {
static const int8_t kValue = static_cast<int8_t>(0xBE);
};
template <>
struct DefaultPaddedValue<uint8_t> {
static const uint8_t kValue = static_cast<uint8_t>(0xBE);
};
template <>
struct DefaultPaddedValue<int16_t> {
static const int16_t kValue = static_cast<int16_t>(0xBEEF);
};
template <>
struct DefaultPaddedValue<int64_t> {
static const int64_t kValue = static_cast<int64_t>(0xDEADBEEF);
};
template <>
struct DefaultPaddedValue<float> {
static constexpr float kValue = 0.1357;
};
template <>
struct DefaultPaddedValue<at::Half> {
// at::Half ctor isn't constexpr, so just fill it with bits.
static constexpr uint16_t kValue = 1357;
};
template <>
struct DefaultPaddedValue<double> {
static constexpr double kValue = 0.1357;
};
// A concrete base to be used in PaddedBase.
class PaddedBufferBase {
public:
const std::string& name() const {
return name_;
}
int size() const {
return total_size_;
}
int raw_size() const {
return total_size_ + 2 * kPaddingSize;
}
virtual ~PaddedBufferBase() {}
protected:
explicit PaddedBufferBase(
const std::vector<int>& dims,
const std::string& name);
int Index(const std::vector<int>& indices) const;
std::vector<int> dims_;
std::string name_;
std::vector<int> strides_;
int total_size_; // total number of useful element, does not include the
// paddings
static constexpr int kPaddingSize = 64;
};
// A padded buffer with wartermarks for testing.
// The buffer carries padded watermarks on both sides to catch potential
// out-of-bounds writes. For read-only data that are not supposed to change, it
// can also make a backup and be compared later.
template <typename T>
class PaddedBuffer : public PaddedBufferBase {
public:
PaddedBuffer(int d0, const std::string& name = "")
: PaddedBuffer(std::vector<int>({d0}), name) {}
PaddedBuffer(int d0, int d1, const std::string& name = "")
: PaddedBuffer(std::vector<int>({d0, d1}), name) {}
PaddedBuffer(int d0, int d1, int d2, const std::string& name = "")
: PaddedBuffer(std::vector<int>({d0, d1, d2}), name) {}
PaddedBuffer(int d0, int d1, int d2, int d3, const std::string& name = "")
: PaddedBuffer(std::vector<int>({d0, d1, d2, d3}), name) {}
PaddedBuffer(const std::vector<int>& dims, const std::string& name = "")
: PaddedBufferBase(dims, name) {
data_.resize(total_size_ + 2 * kPaddingSize, kPaddingValue);
}
PaddedBuffer(const PaddedBuffer& other, const std::string& name)
: PaddedBuffer(other) {
this->name_ = name;
}
T* data() {
return data_.data() + kPaddingSize;
}
const T* data() const {
return const_cast<PaddedBuffer*>(this)->data();
}
T* raw_data() {
return data_.data();
}
const T* raw_data() const {
return const_cast<PaddedBuffer*>(this)->raw_data();
}
T& operator()(int i0) {
// There is a bit performance impact with forming a vector here. But this
// data structure is for testing only, and not performance critical.
return this->operator()(std::vector<int>({i0}));
}
const T& operator()(int i0) const {
return const_cast<PaddedBuffer*>(this)->operator()(i0);
}
T& operator()(int i0, int i1) {
return this->operator()(std::vector<int>({i0, i1}));
}
const T& operator()(int i0, int i1) const {
return const_cast<PaddedBuffer*>(this)->operator()(i0, i1);
}
T& operator()(int i0, int i1, int i2) {
return this->operator()(std::vector<int>({i0, i1, i2}));
}
const T& operator()(int i0, int i1, int i2) const {
return const_cast<PaddedBuffer*>(this)->operator()(i0, i1, i2);
}
T& operator()(int i0, int i1, int i2, int i3) {
return this->operator()(std::vector<int>({i0, i1, i2, i3}));
}
const T& operator()(int i0, int i1, int i2, int i3) const {
return const_cast<PaddedBuffer*>(this)->operator()(i0, i1, i2, i3);
}
T& operator()(const std::vector<int>& indices) {
return data_[kPaddingSize + Index(indices)];
}
const T& operator()(const std::vector<int>& indices) const {
return const_cast<PaddedBuffer*>(this)->operator()(indices);
}
template <typename U>
friend void ExpectAllNear(
const PaddedBuffer<U>& v1,
const PaddedBuffer<U>& v2,
float abs_error);
template <typename U>
friend void ExpectAllEqual(
const PaddedBuffer<U>& v1,
const PaddedBuffer<U>& v2);
void Backup() {
backup_data_ = data_;
}
// Verify the watermarks in the paddings are intact.
void ValidateWatermark() const {
for (const auto i : c10::irange(kPaddingSize)) {
ASSERT_EQ(data_[i], kPaddingValue);
ASSERT_EQ(data_[i + total_size_ + kPaddingSize], kPaddingValue);
}
}
void CheckBackup() const {
ValidateWatermark();
DCHECK(backup_data_.size() == data_.size())
<< "Please make sure you have call Backup() before calling CheckBackup()";
for (const auto i : c10::irange(total_size_)) {
ASSERT_EQ(data_[i + kPaddingSize], backup_data_[i + kPaddingSize]);
}
}
private:
std::vector<T> data_;
std::vector<T> backup_data_;
T kPaddingValue = DefaultPaddedValue<T>::kValue;
};
template <typename T>
inline CodeGen::CallArg::CallArg(const PaddedBuffer<T>& buffer)
: data_(const_cast<T*>(buffer.data())) {}
template <typename T>
std::string CompareErrorMsg(
const PaddedBuffer<T>& v1,
const PaddedBuffer<T>& v2,
int index) {
std::ostringstream oss;
oss << "index: " << index << ", v1: (" << v1.name() << ", " << v1(index)
<< ")"
<< ", v2: (" << v2.name() << ", " << v2(index) << ")";
return oss.str();
}
template <typename T>
void ExpectAllEqual(const PaddedBuffer<T>& f1, const PaddedBuffer<T>& f2) {
const std::vector<T>& v1 = f1.data_;
const std::vector<T>& v2 = f2.data_;
const int kPaddingSize = f1.kPaddingSize;
const int total_size = f1.total_size_;
ASSERT_EQ(v1.size(), v2.size());
f1.ValidateWatermark();
f2.ValidateWatermark();
for (const auto i : c10::irange(total_size)) {
ASSERT_EQ(v1[kPaddingSize + i], v2[kPaddingSize + i]);
}
}
template <typename T>
void ExpectAllNear(
const PaddedBuffer<T>& f1,
const PaddedBuffer<T>& f2,
float abs_error) {
const std::vector<T>& v1 = f1.data_;
const std::vector<T>& v2 = f2.data_;
const int kPaddingSize = f1.kPaddingSize;
const int total_size = f1.total_size_;
ASSERT_EQ(v1.size(), v2.size());
f1.ValidateWatermark();
f2.ValidateWatermark();
for (const auto i : c10::irange(total_size)) {
ASSERT_NEAR(v1[kPaddingSize + i], v2[kPaddingSize + i], abs_error);
}
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch
| 7,034
| 27.950617
| 82
|
h
|
null |
pytorch-main/test/cpp/tensorexpr/test_base.h
|
#pragma once
#if defined(USE_GTEST)
#include <gtest/gtest.h>
#include <test/cpp/common/support.h>
#else
#include <cmath>
#include "c10/util/Exception.h"
#include "test/cpp/tensorexpr/gtest_assert_float_eq.h"
#define ASSERT_EQ(x, y, ...) TORCH_INTERNAL_ASSERT((x) == (y), __VA_ARGS__)
#define ASSERT_FLOAT_EQ(x, y, ...) \
TORCH_INTERNAL_ASSERT(AlmostEquals((x), (y)), __VA_ARGS__)
#define ASSERT_NE(x, y, ...) TORCH_INTERNAL_ASSERT((x) != (y), __VA_ARGS__)
#define ASSERT_GT(x, y, ...) TORCH_INTERNAL_ASSERT((x) > (y), __VA_ARGS__)
#define ASSERT_GE(x, y, ...) TORCH_INTERNAL_ASSERT((x) >= (y), __VA_ARGS__)
#define ASSERT_LT(x, y, ...) TORCH_INTERNAL_ASSERT((x) < (y), __VA_ARGS__)
#define ASSERT_LE(x, y, ...) TORCH_INTERNAL_ASSERT((x) <= (y), __VA_ARGS__)
#define ASSERT_NEAR(x, y, a, ...) \
TORCH_INTERNAL_ASSERT(std::fabs((x) - (y)) < (a), __VA_ARGS__)
#define ASSERT_TRUE TORCH_INTERNAL_ASSERT
#define ASSERT_FALSE(x) ASSERT_TRUE(!(x))
#define ASSERT_THROWS_WITH(statement, substring) \
try { \
(void)statement; \
ASSERT_TRUE(false); \
} catch (const std::exception& e) { \
ASSERT_NE(std::string(e.what()).find(substring), std::string::npos); \
}
#define ASSERT_ANY_THROW(statement) \
{ \
bool threw = false; \
try { \
(void)statement; \
} catch (const std::exception& e) { \
threw = true; \
} \
ASSERT_TRUE(threw); \
}
#endif // defined(USE_GTEST)
namespace torch {
namespace jit {
namespace tensorexpr {
template <typename U, typename V>
void ExpectAllNear(
const std::vector<U>& v1,
const std::vector<U>& v2,
V threshold,
const std::string& name = "") {
ASSERT_EQ(v1.size(), v2.size());
for (size_t i = 0; i < v1.size(); i++) {
ASSERT_NEAR(v1[i], v2[i], threshold);
}
}
template <typename U, typename V>
void ExpectAllNear(
const std::vector<U>& vec,
const U& val,
V threshold,
const std::string& name = "") {
for (size_t i = 0; i < vec.size(); i++) {
ASSERT_NEAR(vec[i], val, threshold);
}
}
template <typename T>
static void assertAllEqual(const std::vector<T>& vec, const T& val) {
for (auto const& elt : vec) {
ASSERT_EQ(elt, val);
}
}
template <typename T>
static void assertAllEqual(const std::vector<T>& v1, const std::vector<T>& v2) {
ASSERT_EQ(v1.size(), v2.size());
for (size_t i = 0; i < v1.size(); ++i) {
ASSERT_EQ(v1[i], v2[i]);
}
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch
| 2,853
| 31.431818
| 80
|
h
|
null |
pytorch-main/test/cpp/tensorexpr/test_utils.h
|
#pragma once
#include <memory>
#include <vector>
#include <test/cpp/tensorexpr/test_base.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/testing/file_check.h>
namespace torch {
namespace jit {
using namespace torch::jit::tensorexpr;
#define IS_NODE(T, node) \
{ \
auto node_ = to<T>(node); \
ASSERT_NE(nullptr, node_); \
}
#define IS_NODE_WITH_NAME(T, node, name) \
auto name = to<T>(node); \
ASSERT_NE(nullptr, name);
#define IS_NODE_WITH_NAME_AND_CAST(T, node, name, Type) \
NodePtr<T> name = nullptr; \
{ \
auto node_ = to<Cast>(node); \
ASSERT_NE(nullptr, node_); \
ASSERT_EQ(node_->dtype().scalar_type(), ScalarType::Type); \
name = to<T>(node_->src_value()); \
} \
ASSERT_NE(nullptr, name);
#define IS_IMM_WITH_VAL(T, node, val) \
{ \
auto node_ = to<T##Imm>(node); \
ASSERT_NE(nullptr, node_); \
ASSERT_EQ(node_->value(), val); \
}
#define IS_VAR_WITH_NAME(node, name) \
{ \
auto node_ = to<Var>(node); \
ASSERT_NE(nullptr, node_); \
ASSERT_EQ(node_->name_hint(), name); \
}
#define IS_BINOP_W_VARS(T, node, name, v1, v2) \
NodePtr<T> name = nullptr; \
{ \
name = to<T>(node); \
ASSERT_NE(nullptr, name); \
IS_VAR_WITH_NAME(name->lhs(), v1); \
IS_VAR_WITH_NAME(name->rhs(), v2); \
}
#define IS_BINOP_W_CONST(T, node, name, v, c) \
NodePtr<T> name = nullptr; \
{ \
name = to<T>(node); \
ASSERT_NE(nullptr, name); \
IS_VAR_WITH_NAME(name->lhs(), v); \
IS_IMM_WITH_VAL(Int, name->rhs(), c); \
}
#define IS_RAND(node) \
{ \
auto node_ = to<Intrinsics>(node); \
ASSERT_NE(nullptr, node_); \
ASSERT_EQ(node_->op_type(), kRand); \
}
void checkIR(StmtPtr s, const std::string& pattern);
void checkExprIR(ExprPtr e, const std::string& pattern);
void checkExprIR(const ExprHandle& e, const std::string& pattern);
} // namespace jit
} // namespace torch
| 2,614
| 32.101266
| 66
|
h
|
null |
pytorch-main/test/custom_backend/custom_backend.h
|
#include <torch/csrc/jit/backends/backend.h>
#include <torch/csrc/jit/backends/backend_detail.h>
#include <torch/csrc/jit/api/module.h>
namespace torch {
namespace custom_backend {
// This custom JIT backend is intended to do the minimal amount of work
// necessary to test that the JIT backend registration endpoints and
// code generation are working correctly. It is not intended to
// produce numerically correct results.
class CustomBackend : public torch::jit::PyTorchBackendInterface {
public:
// Constructor.
explicit CustomBackend() {}
virtual ~CustomBackend() = default;
bool is_available() override {
return true;
}
c10::impl::GenericDict compile(
c10::IValue processed,
c10::impl::GenericDict method_compile_spec) override {
auto spec =
c10::impl::toTypedDict<std::string, at::IValue>(method_compile_spec);
// Return the same string as a value for every key in method_compile_spec.
auto handles = c10::Dict<std::string, std::string>();
for (auto it = spec.begin(), end = spec.end(); it != end; ++it) {
handles.insert(it->key(), it->key());
}
return c10::impl::toGenericDict(handles);
}
c10::impl::GenericList execute(
c10::IValue handle,
c10::impl::GenericList inputs) override {
TORCH_INTERNAL_ASSERT(handle.isString());
TORCH_INTERNAL_ASSERT(inputs.size() > 0);
c10::List<at::Tensor> output_list;
// Implement simple accumulator and negative accumulator (?) ops. Return one
// or both of them depending on the handle to make sure multiple outputs are
// handled.
c10::IValue value = inputs[0];
at::Tensor accum = value.toTensor();
accum = accum.clone();
at::Tensor sub_accum = value.toTensor();
sub_accum = sub_accum.clone();
for (size_t i = 1, e = inputs.size(); i < e; ++i) {
value = inputs[i];
accum.add_(value.toTensor(), 1.0);
sub_accum.sub_(value.toTensor(), 1.0);
}
if (handle.toStringRef() == "accum") {
output_list.emplace_back(accum);
} else if (handle.toStringRef() == "sub_accum") {
output_list.emplace_back(sub_accum);
} else if (handle.toStringRef() == "forward") {
output_list.emplace_back(accum);
output_list.emplace_back(sub_accum);
}
return c10::impl::toList(output_list);
}
};
c10::IValue preprocess(
const torch::jit::Module& mod,
const c10::Dict<c10::IValue, c10::IValue>& method_compile_spec,
const torch::jit::BackendDebugHandleGenerator& generate_debug_handles) {
return mod._ivalue();
}
// clang-format off
# if defined(_WIN32)
# if defined(custom_ops_EXPORTS)
# define CUSTOM_BACKEND_API __declspec(dllexport)
# else
# define CUSTOM_BACKEND_API __declspec(dllimport)
# endif
# else
# define CUSTOM_BACKEND_API
# endif
// clang-format on
CUSTOM_BACKEND_API std::string getBackendName();
} // namespace custom_backend
} // namespace torch
| 2,927
| 30.826087
| 80
|
h
|
null |
pytorch-main/test/custom_operator/op.h
|
#include <torch/script.h>
#include <cstddef>
#include <vector>
#include <string>
// clang-format off
# if defined(_WIN32)
# if defined(custom_ops_EXPORTS)
# define CUSTOM_OP_API __declspec(dllexport)
# else
# define CUSTOM_OP_API __declspec(dllimport)
# endif
# else
# define CUSTOM_OP_API
# endif
// clang-format on
CUSTOM_OP_API torch::List<torch::Tensor> custom_op(
torch::Tensor tensor,
double scalar,
int64_t repeat);
CUSTOM_OP_API int64_t custom_op2(std::string s1, std::string s2);
| 530
| 20.24
| 65
|
h
|
null |
pytorch-main/test/edge/Evalue.h
|
#pragma once
#include <ATen/ATen.h>
/**
* WARNING: EValue is a class used by Executorch, for its boxed operators. It
* contains similar logic as `IValue` in PyTorch, by providing APIs to convert
* boxed values to unboxed values.
*
* It's mirroring a fbcode internal source file
* [`EValue.h`](https://www.internalfb.com/code/fbsource/xplat/executorch/core/values/Evalue.h).
*
* The reason why we are mirroring this class, is to make sure we have CI job
* coverage on torchgen logic, given that torchgen is used for both Executorch
* and PyTorch.
*
* If any of the logic here needs to be changed, please update fbcode version of
* `Evalue.h` as well. These two versions will be merged as soon as Executorch
* is in OSS (hopefully by Q2 2023).
*/
namespace torch {
namespace executor {
#define ET_CHECK_MSG TORCH_CHECK_MSG
#define EXECUTORCH_FORALL_TAGS(_) \
_(None) \
_(Tensor) \
_(String) \
_(Double) \
_(Int) \
_(Bool) \
_(ListBool) \
_(ListDouble) \
_(ListInt) \
_(ListTensor) \
_(ListScalar) \
_(ListOptionalTensor)
enum class Tag : uint32_t {
#define DEFINE_TAG(x) x,
EXECUTORCH_FORALL_TAGS(DEFINE_TAG)
#undef DEFINE_TAG
};
struct EValue;
template <typename T>
struct evalue_to_const_ref_overload_return {
using type = T;
};
template <>
struct evalue_to_const_ref_overload_return<at::Tensor> {
using type = const at::Tensor&;
};
template <typename T>
struct evalue_to_ref_overload_return {
using type = T;
};
template <>
struct evalue_to_ref_overload_return<at::Tensor> {
using type = at::Tensor&;
};
/*
* Helper class used to correlate EValues in the executor table, with the
* unwrapped list of the proper type. Because values in the runtime's values
* table can change during execution, we cannot statically allocate list of
* objects at deserialization. Imagine the serialized list says index 0 in the
* value table is element 2 in the list, but during execution the value in
* element 2 changes (in the case of tensor this means the TensorImpl* stored in
* the tensor changes). To solve this instead they must be created dynamically
* whenever they are used.
*/
template <typename T>
class EValObjectList {
public:
EValObjectList() = default;
/*
* Wrapped_vals is a list of pointers into the values table of the runtime
* whose destinations correlate with the elements of the list, unwrapped_vals
* is a container of the same size whose serves as memory to construct the
* unwrapped vals.
*/
EValObjectList(EValue** wrapped_vals, T* unwrapped_vals, int size)
: wrapped_vals_(wrapped_vals, size), unwrapped_vals_(unwrapped_vals) {}
/*
* Constructs and returns the list of T specified by the EValue pointers
*/
at::ArrayRef<T> get() const;
private:
// Source of truth for the list
at::ArrayRef<EValue*> wrapped_vals_;
// Same size as wrapped_vals
mutable T* unwrapped_vals_;
};
// Aggregate typing system similar to IValue only slimmed down with less
// functionality, no dependencies on atomic, and fewer supported types to better
// suit embedded systems (ie no intrusive ptr)
struct EValue {
union Payload {
// When in ATen mode at::Tensor is not trivially copyable, this nested union
// lets us handle tensor as a special case while leaving the rest of the
// fields in a simple state instead of requiring a switch on tag everywhere.
union TriviallyCopyablePayload {
TriviallyCopyablePayload() : as_int(0) {}
// Scalar supported through these 3 types
int64_t as_int;
double as_double;
bool as_bool;
// TODO(jakeszwe): convert back to pointers to optimize size of this
// struct
at::ArrayRef<char> as_string;
at::ArrayRef<int64_t> as_int_list;
at::ArrayRef<double> as_double_list;
at::ArrayRef<bool> as_bool_list;
EValObjectList<at::Tensor> as_tensor_list;
EValObjectList<at::optional<at::Tensor>> as_list_optional_tensor;
} copyable_union;
// Since a Tensor just holds a TensorImpl*, there's no value to use Tensor*
// here.
at::Tensor as_tensor;
Payload() {}
~Payload() {}
};
// Data storage and type tag
Payload payload;
Tag tag;
// Basic ctors and assignments
EValue(const EValue& rhs) : EValue(rhs.payload, rhs.tag) {}
EValue(EValue&& rhs) noexcept : tag(rhs.tag) {
moveFrom(std::move(rhs));
}
EValue& operator=(EValue&& rhs) & noexcept {
if (&rhs == this) {
return *this;
}
destroy();
moveFrom(std::move(rhs));
return *this;
}
EValue& operator=(EValue const& rhs) & {
// Define copy assignment through copy ctor and move assignment
*this = EValue(rhs);
return *this;
}
~EValue() {
destroy();
}
/****** None Type ******/
EValue() : tag(Tag::None) {
payload.copyable_union.as_int = 0;
}
bool isNone() const {
return tag == Tag::None;
}
/****** Int Type ******/
/*implicit*/ EValue(int64_t i) : tag(Tag::Int) {
payload.copyable_union.as_int = i;
}
bool isInt() const {
return tag == Tag::Int;
}
int64_t toInt() const {
ET_CHECK_MSG(isInt(), "EValue is not an int.");
return payload.copyable_union.as_int;
}
/****** Double Type ******/
/*implicit*/ EValue(double d) : tag(Tag::Double) {
payload.copyable_union.as_double = d;
}
bool isDouble() const {
return tag == Tag::Double;
}
double toDouble() const {
ET_CHECK_MSG(isDouble(), "EValue is not a Double.");
return payload.copyable_union.as_double;
}
/****** Bool Type ******/
/*implicit*/ EValue(bool b) : tag(Tag::Bool) {
payload.copyable_union.as_bool = b;
}
bool isBool() const {
return tag == Tag::Bool;
}
bool toBool() const {
ET_CHECK_MSG(isBool(), "EValue is not a Bool.");
return payload.copyable_union.as_bool;
}
/****** Scalar Type ******/
/// Construct an EValue using the implicit value of a Scalar.
/*implicit*/ EValue(at::Scalar s) {
if (s.isIntegral(false)) {
tag = Tag::Int;
payload.copyable_union.as_int = s.to<int64_t>();
} else if (s.isFloatingPoint()) {
tag = Tag::Double;
payload.copyable_union.as_double = s.to<double>();
} else if (s.isBoolean()) {
tag = Tag::Bool;
payload.copyable_union.as_bool = s.to<bool>();
} else {
ET_CHECK_MSG(false, "Scalar passed to EValue is not initialized.");
}
}
bool isScalar() const {
return tag == Tag::Int || tag == Tag::Double || tag == Tag::Bool;
}
at::Scalar toScalar() const {
// Convert from implicit value to Scalar using implicit constructors.
if (isDouble()) {
return toDouble();
} else if (isInt()) {
return toInt();
} else if (isBool()) {
return toBool();
} else {
ET_CHECK_MSG(false, "EValue is not a Scalar.");
return c10::Scalar();
}
}
/****** Tensor Type ******/
/*implicit*/ EValue(at::Tensor t) : tag(Tag::Tensor) {
// When built in aten mode, at::Tensor has a non trivial constructor
// destructor, so regular assignment to a union field is UB. Instead we must
// go through placement new (which causes a refcount bump).
new (&payload.as_tensor) at::Tensor(t);
}
bool isTensor() const {
return tag == Tag::Tensor;
}
at::Tensor toTensor() && {
ET_CHECK_MSG(isTensor(), "EValue is not a Tensor.");
return std::move(payload.as_tensor);
}
at::Tensor& toTensor() & {
ET_CHECK_MSG(isTensor(), "EValue is not a Tensor.");
return payload.as_tensor;
}
const at::Tensor& toTensor() const& {
ET_CHECK_MSG(isTensor(), "EValue is not a Tensor.");
return payload.as_tensor;
}
/****** String Type ******/
/*implicit*/ EValue(const char* s, size_t size) : tag(Tag::String) {
payload.copyable_union.as_string = at::ArrayRef<char>(s, size);
}
bool isString() const {
return tag == Tag::String;
}
at::string_view toString() const {
ET_CHECK_MSG(isString(), "EValue is not a String.");
return at::string_view(
payload.copyable_union.as_string.data(),
payload.copyable_union.as_string.size());
}
/****** Int List Type ******/
/*implicit*/ EValue(at::ArrayRef<int64_t> i) : tag(Tag::ListInt) {
payload.copyable_union.as_int_list = i;
}
bool isIntList() const {
return tag == Tag::ListInt;
}
at::ArrayRef<int64_t> toIntList() const {
ET_CHECK_MSG(isIntList(), "EValue is not an Int List.");
return payload.copyable_union.as_int_list;
}
/****** Bool List Type ******/
/*implicit*/ EValue(at::ArrayRef<bool> b) : tag(Tag::ListBool) {
payload.copyable_union.as_bool_list = b;
}
bool isBoolList() const {
return tag == Tag::ListBool;
}
at::ArrayRef<bool> toBoolList() const {
ET_CHECK_MSG(isBoolList(), "EValue is not a Bool List.");
return payload.copyable_union.as_bool_list;
}
/****** Double List Type ******/
/*implicit*/ EValue(at::ArrayRef<double> d) : tag(Tag::ListDouble) {
payload.copyable_union.as_double_list = d;
}
bool isDoubleList() const {
return tag == Tag::ListDouble;
}
at::ArrayRef<double> toDoubleList() const {
ET_CHECK_MSG(isDoubleList(), "EValue is not a Double List.");
return payload.copyable_union.as_double_list;
}
/****** Tensor List Type ******/
/*implicit*/ EValue(EValObjectList<at::Tensor> t) : tag(Tag::ListTensor) {
payload.copyable_union.as_tensor_list = t;
}
bool isTensorList() const {
return tag == Tag::ListTensor;
}
at::ArrayRef<at::Tensor> toTensorList() const {
ET_CHECK_MSG(isTensorList(), "EValue is not a Tensor List.");
return payload.copyable_union.as_tensor_list.get();
}
/****** List Optional Tensor Type ******/
/*implicit*/ EValue(EValObjectList<at::optional<at::Tensor>> t)
: tag(Tag::ListOptionalTensor) {
payload.copyable_union.as_list_optional_tensor = t;
}
bool isListOptionalTensor() const {
return tag == Tag::ListOptionalTensor;
}
at::ArrayRef<at::optional<at::Tensor>> toListOptionalTensor() {
return payload.copyable_union.as_list_optional_tensor.get();
}
/****** ScalarType Type ******/
at::ScalarType toScalarType() const {
ET_CHECK_MSG(isInt(), "EValue is not a ScalarType.");
return static_cast<at::ScalarType>(payload.copyable_union.as_int);
}
/****** MemoryFormat Type ******/
at::MemoryFormat toMemoryFormat() const {
ET_CHECK_MSG(isInt(), "EValue is not a MemoryFormat.");
return static_cast<at::MemoryFormat>(payload.copyable_union.as_int);
}
template <typename T>
T to() &&;
template <typename T>
typename evalue_to_ref_overload_return<T>::type to() &;
/**
* Converts the EValue to an optional object that can represent both T and
* an uninitialized state.
*/
template <typename T>
inline at::optional<T> toOptional() {
if (this->isNone()) {
return at::nullopt;
}
return this->to<T>();
}
private:
// Pre cond: the payload value has had its destructor called
void clearToNone() noexcept {
payload.copyable_union.as_int = 0;
tag = Tag::None;
}
// Shared move logic
void moveFrom(EValue&& rhs) noexcept {
if (rhs.isTensor()) {
new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
rhs.payload.as_tensor.~Tensor();
} else {
payload.copyable_union = rhs.payload.copyable_union;
}
tag = rhs.tag;
rhs.clearToNone();
}
// Destructs stored tensor if there is one
void destroy() {
// Necessary for ATen tensor to refcount decrement the intrusive_ptr to
// tensorimpl that got a refcount increment when we placed it in the evalue,
// no-op if executorch tensor #ifdef could have a
// minor performance bump for a code maintainability hit
if (isTensor()) {
payload.as_tensor.~Tensor();
} else if (isTensorList()) {
for (auto& tensor : toTensorList()) {
tensor.~Tensor();
}
} else if (isListOptionalTensor()) {
for (auto& optional_tensor : toListOptionalTensor()) {
optional_tensor.~optional();
}
}
}
EValue(const Payload& p, Tag t) : tag(t) {
if (isTensor()) {
new (&payload.as_tensor) at::Tensor(p.as_tensor);
} else {
payload.copyable_union = p.copyable_union;
}
}
};
#define EVALUE_DEFINE_TO(T, method_name) \
template <> \
inline evalue_to_ref_overload_return<T>::type EValue::to<T>()& { \
return static_cast<T>(this->method_name()); \
}
template <>
inline at::Tensor& EValue::to<at::Tensor>() & {
return this->toTensor();
}
EVALUE_DEFINE_TO(at::Scalar, toScalar)
EVALUE_DEFINE_TO(int64_t, toInt)
EVALUE_DEFINE_TO(bool, toBool)
EVALUE_DEFINE_TO(double, toDouble)
EVALUE_DEFINE_TO(at::string_view, toString)
EVALUE_DEFINE_TO(at::ScalarType, toScalarType)
EVALUE_DEFINE_TO(at::MemoryFormat, toMemoryFormat)
EVALUE_DEFINE_TO(at::optional<at::Tensor>, toOptional<at::Tensor>)
EVALUE_DEFINE_TO(at::ArrayRef<int64_t>, toIntList)
EVALUE_DEFINE_TO(
at::optional<at::ArrayRef<int64_t>>,
toOptional<at::ArrayRef<int64_t>>)
EVALUE_DEFINE_TO(
at::optional<at::ArrayRef<double>>,
toOptional<at::ArrayRef<double>>)
EVALUE_DEFINE_TO(at::ArrayRef<at::optional<at::Tensor>>, toListOptionalTensor)
EVALUE_DEFINE_TO(at::ArrayRef<double>, toDoubleList)
#undef EVALUE_DEFINE_TO
template <typename T>
at::ArrayRef<T> EValObjectList<T>::get() const {
for (size_t i = 0; i < wrapped_vals_.size(); i++) {
unwrapped_vals_[i] = wrapped_vals_[i]->template to<T>();
}
return at::ArrayRef<T>{unwrapped_vals_, wrapped_vals_.size()};
}
} // namespace executor
} // namespace torch
| 13,932
| 28.027083
| 96
|
h
|
null |
pytorch-main/test/edge/kernel_runtime_context.h
|
#pragma once
namespace torch {
namespace executor {
/**
* Bucket type abstraction that contains many elements of runtime state that
* a kernel author may want available, but would otherwise be unable to access.
*
* Forwarded along to all operators when running in lean mode. NOTE: Will not be
* forwarded to operators if running in ATen mode as those operators do not
* expect to receive a KernelRuntimeContext and would not use it.
*
* This includes things like setting an error state, a scratch allocator for
* operators that need more then constant space, and a TensorResizer for dynamic
* shape tensors allowing programs to be more flexible with Tensor shape.
*/
class KernelRuntimeContext {};
} // namespace executor
} // namespace torch
| 757
| 33.454545
| 80
|
h
|
null |
pytorch-main/test/edge/templates/Functions.h
|
// clang-format off
#pragma once
#include <ATen/Context.h>
#include <ATen/DeviceGuard.h>
#include <ATen/TensorUtils.h>
#include <ATen/TracerMode.h>
#include <ATen/core/Generator.h>
#include <ATen/core/Reduction.h>
#include <ATen/core/Tensor.h>
#include <c10/core/Scalar.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>
// ${generated_comment}
${static_dispatch_extra_headers}
namespace torch {
namespace executor {
${Functions_declarations}
} // namespace executor
} // namespace torch
| 574
| 21.115385
| 35
|
h
|
null |
pytorch-main/test/edge/templates/NativeFunctions.h
|
#pragma once
// ${generated_comment}
#ifdef TORCH_ASSERT_NO_OPERATORS
#error This change adds a dependency on native_functions.yaml, \
meaning the file will need to be re-compiled every time an operator \
is changed or added. Consider if your change would be better placed in \
another file, or if a more specific header might achieve the same goal. \
See NOTE: [Tensor vs. TensorBase]
#endif
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
#endif
#include <c10/core/Scalar.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>
#include <c10/core/QScheme.h>
#include <ATen/core/Reduction.h>
#include <ATen/core/Tensor.h>
#include <tuple>
#include <vector>
${nativeFunctions_declarations}
| 1,131
| 34.375
| 83
|
h
|
null |
pytorch-main/third_party/miniz-2.1.0/examples/example1.c
|
// example1.c - Demonstrates miniz.c's compress() and uncompress() functions (same as zlib's).
// Public domain, May 15 2011, Rich Geldreich, richgel99@gmail.com. See "unlicense" statement at the end of tinfl.c.
#include <stdio.h>
#include "miniz.h"
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint;
// The string to compress.
static const char *s_pStr = "Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson." \
"Good morning Dr. Chandra. This is Hal. I am ready for my first lesson.";
int main(int argc, char *argv[])
{
uint step = 0;
int cmp_status;
uLong src_len = (uLong)strlen(s_pStr);
uLong cmp_len = compressBound(src_len);
uLong uncomp_len = src_len;
uint8 *pCmp, *pUncomp;
uint total_succeeded = 0;
(void)argc, (void)argv;
printf("miniz.c version: %s\n", MZ_VERSION);
do
{
// Allocate buffers to hold compressed and uncompressed data.
pCmp = (mz_uint8 *)malloc((size_t)cmp_len);
pUncomp = (mz_uint8 *)malloc((size_t)src_len);
if ((!pCmp) || (!pUncomp))
{
printf("Out of memory!\n");
return EXIT_FAILURE;
}
// Compress the string.
cmp_status = compress(pCmp, &cmp_len, (const unsigned char *)s_pStr, src_len);
if (cmp_status != Z_OK)
{
printf("compress() failed!\n");
free(pCmp);
free(pUncomp);
return EXIT_FAILURE;
}
printf("Compressed from %u to %u bytes\n", (mz_uint32)src_len, (mz_uint32)cmp_len);
if (step)
{
// Purposely corrupt the compressed data if fuzzy testing (this is a very crude fuzzy test).
uint n = 1 + (rand() % 3);
while (n--)
{
uint i = rand() % cmp_len;
pCmp[i] ^= (rand() & 0xFF);
}
}
// Decompress.
cmp_status = uncompress(pUncomp, &uncomp_len, pCmp, cmp_len);
total_succeeded += (cmp_status == Z_OK);
if (step)
{
printf("Simple fuzzy test: step %u total_succeeded: %u\n", step, total_succeeded);
}
else
{
if (cmp_status != Z_OK)
{
printf("uncompress failed!\n");
free(pCmp);
free(pUncomp);
return EXIT_FAILURE;
}
printf("Decompressed from %u to %u bytes\n", (mz_uint32)cmp_len, (mz_uint32)uncomp_len);
// Ensure uncompress() returned the expected data.
if ((uncomp_len != src_len) || (memcmp(pUncomp, s_pStr, (size_t)src_len)))
{
printf("Decompression failed!\n");
free(pCmp);
free(pUncomp);
return EXIT_FAILURE;
}
}
free(pCmp);
free(pUncomp);
step++;
// Keep on fuzzy testing if there's a non-empty command line.
} while (argc >= 2);
printf("Success.\n");
return EXIT_SUCCESS;
}
| 3,109
| 28.339623
| 116
|
c
|
null |
pytorch-main/third_party/miniz-2.1.0/examples/example3.c
|
// example3.c - Demonstrates how to use miniz.c's deflate() and inflate() functions for simple file compression.
// Public domain, May 15 2011, Rich Geldreich, richgel99@gmail.com. See "unlicense" statement at the end of tinfl.c.
// For simplicity, this example is limited to files smaller than 4GB, but this is not a limitation of miniz.c.
#include <stdio.h>
#include <limits.h>
#include "miniz.h"
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint;
#define my_max(a,b) (((a) > (b)) ? (a) : (b))
#define my_min(a,b) (((a) < (b)) ? (a) : (b))
#define BUF_SIZE (1024 * 1024)
static uint8 s_inbuf[BUF_SIZE];
static uint8 s_outbuf[BUF_SIZE];
int main(int argc, char *argv[])
{
const char *pMode;
FILE *pInfile, *pOutfile;
uint infile_size;
int level = Z_BEST_COMPRESSION;
z_stream stream;
int p = 1;
const char *pSrc_filename;
const char *pDst_filename;
long file_loc;
printf("miniz.c version: %s\n", MZ_VERSION);
if (argc < 4)
{
printf("Usage: example3 [options] [mode:c or d] infile outfile\n");
printf("\nModes:\n");
printf("c - Compresses file infile to a zlib stream in file outfile\n");
printf("d - Decompress zlib stream in file infile to file outfile\n");
printf("\nOptions:\n");
printf("-l[0-10] - Compression level, higher values are slower.\n");
return EXIT_FAILURE;
}
while ((p < argc) && (argv[p][0] == '-'))
{
switch (argv[p][1])
{
case 'l':
{
level = atoi(&argv[1][2]);
if ((level < 0) || (level > 10))
{
printf("Invalid level!\n");
return EXIT_FAILURE;
}
break;
}
default:
{
printf("Invalid option: %s\n", argv[p]);
return EXIT_FAILURE;
}
}
p++;
}
if ((argc - p) < 3)
{
printf("Must specify mode, input filename, and output filename after options!\n");
return EXIT_FAILURE;
}
else if ((argc - p) > 3)
{
printf("Too many filenames!\n");
return EXIT_FAILURE;
}
pMode = argv[p++];
if (!strchr("cCdD", pMode[0]))
{
printf("Invalid mode!\n");
return EXIT_FAILURE;
}
pSrc_filename = argv[p++];
pDst_filename = argv[p++];
printf("Mode: %c, Level: %u\nInput File: \"%s\"\nOutput File: \"%s\"\n", pMode[0], level, pSrc_filename, pDst_filename);
// Open input file.
pInfile = fopen(pSrc_filename, "rb");
if (!pInfile)
{
printf("Failed opening input file!\n");
return EXIT_FAILURE;
}
// Determine input file's size.
fseek(pInfile, 0, SEEK_END);
file_loc = ftell(pInfile);
fseek(pInfile, 0, SEEK_SET);
if ((file_loc < 0) || (file_loc > INT_MAX))
{
// This is not a limitation of miniz or tinfl, but this example.
printf("File is too large to be processed by this example.\n");
return EXIT_FAILURE;
}
infile_size = (uint)file_loc;
// Open output file.
pOutfile = fopen(pDst_filename, "wb");
if (!pOutfile)
{
printf("Failed opening output file!\n");
return EXIT_FAILURE;
}
printf("Input file size: %u\n", infile_size);
// Init the z_stream
memset(&stream, 0, sizeof(stream));
stream.next_in = s_inbuf;
stream.avail_in = 0;
stream.next_out = s_outbuf;
stream.avail_out = BUF_SIZE;
if ((pMode[0] == 'c') || (pMode[0] == 'C'))
{
// Compression.
uint infile_remaining = infile_size;
if (deflateInit(&stream, level) != Z_OK)
{
printf("deflateInit() failed!\n");
return EXIT_FAILURE;
}
for ( ; ; )
{
int status;
if (!stream.avail_in)
{
// Input buffer is empty, so read more bytes from input file.
uint n = my_min(BUF_SIZE, infile_remaining);
if (fread(s_inbuf, 1, n, pInfile) != n)
{
printf("Failed reading from input file!\n");
return EXIT_FAILURE;
}
stream.next_in = s_inbuf;
stream.avail_in = n;
infile_remaining -= n;
//printf("Input bytes remaining: %u\n", infile_remaining);
}
status = deflate(&stream, infile_remaining ? Z_NO_FLUSH : Z_FINISH);
if ((status == Z_STREAM_END) || (!stream.avail_out))
{
// Output buffer is full, or compression is done, so write buffer to output file.
uint n = BUF_SIZE - stream.avail_out;
if (fwrite(s_outbuf, 1, n, pOutfile) != n)
{
printf("Failed writing to output file!\n");
return EXIT_FAILURE;
}
stream.next_out = s_outbuf;
stream.avail_out = BUF_SIZE;
}
if (status == Z_STREAM_END)
break;
else if (status != Z_OK)
{
printf("deflate() failed with status %i!\n", status);
return EXIT_FAILURE;
}
}
if (deflateEnd(&stream) != Z_OK)
{
printf("deflateEnd() failed!\n");
return EXIT_FAILURE;
}
}
else if ((pMode[0] == 'd') || (pMode[0] == 'D'))
{
// Decompression.
uint infile_remaining = infile_size;
if (inflateInit(&stream))
{
printf("inflateInit() failed!\n");
return EXIT_FAILURE;
}
for ( ; ; )
{
int status;
if (!stream.avail_in)
{
// Input buffer is empty, so read more bytes from input file.
uint n = my_min(BUF_SIZE, infile_remaining);
if (fread(s_inbuf, 1, n, pInfile) != n)
{
printf("Failed reading from input file!\n");
return EXIT_FAILURE;
}
stream.next_in = s_inbuf;
stream.avail_in = n;
infile_remaining -= n;
}
status = inflate(&stream, Z_SYNC_FLUSH);
if ((status == Z_STREAM_END) || (!stream.avail_out))
{
// Output buffer is full, or decompression is done, so write buffer to output file.
uint n = BUF_SIZE - stream.avail_out;
if (fwrite(s_outbuf, 1, n, pOutfile) != n)
{
printf("Failed writing to output file!\n");
return EXIT_FAILURE;
}
stream.next_out = s_outbuf;
stream.avail_out = BUF_SIZE;
}
if (status == Z_STREAM_END)
break;
else if (status != Z_OK)
{
printf("inflate() failed with status %i!\n", status);
return EXIT_FAILURE;
}
}
if (inflateEnd(&stream) != Z_OK)
{
printf("inflateEnd() failed!\n");
return EXIT_FAILURE;
}
}
else
{
printf("Invalid mode!\n");
return EXIT_FAILURE;
}
fclose(pInfile);
if (EOF == fclose(pOutfile))
{
printf("Failed writing to output file!\n");
return EXIT_FAILURE;
}
printf("Total input bytes: %u\n", (mz_uint32)stream.total_in);
printf("Total output bytes: %u\n", (mz_uint32)stream.total_out);
printf("Success.\n");
return EXIT_SUCCESS;
}
| 6,726
| 23.914815
| 122
|
c
|
null |
pytorch-main/third_party/miniz-2.1.0/examples/example4.c
|
// example4.c - Uses tinfl.c to decompress a zlib stream in memory to an output file
// Public domain, May 15 2011, Rich Geldreich, richgel99@gmail.com. See "unlicense" statement at the end of tinfl.c.
#include "miniz_tinfl.h"
#include <stdio.h>
#include <limits.h>
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint;
#define my_max(a,b) (((a) > (b)) ? (a) : (b))
#define my_min(a,b) (((a) < (b)) ? (a) : (b))
static int tinfl_put_buf_func(const void* pBuf, int len, void *pUser)
{
return len == (int)fwrite(pBuf, 1, len, (FILE*)pUser);
}
int main(int argc, char *argv[])
{
int status;
FILE *pInfile, *pOutfile;
uint infile_size, outfile_size;
size_t in_buf_size;
uint8 *pCmp_data;
long file_loc;
if (argc != 3)
{
printf("Usage: example4 infile outfile\n");
printf("Decompresses zlib stream in file infile to file outfile.\n");
printf("Input file must be able to fit entirely in memory.\n");
printf("example3 can be used to create compressed zlib streams.\n");
return EXIT_FAILURE;
}
// Open input file.
pInfile = fopen(argv[1], "rb");
if (!pInfile)
{
printf("Failed opening input file!\n");
return EXIT_FAILURE;
}
// Determine input file's size.
fseek(pInfile, 0, SEEK_END);
file_loc = ftell(pInfile);
fseek(pInfile, 0, SEEK_SET);
if ((file_loc < 0) || (file_loc > INT_MAX))
{
// This is not a limitation of miniz or tinfl, but this example.
printf("File is too large to be processed by this example.\n");
return EXIT_FAILURE;
}
infile_size = (uint)file_loc;
pCmp_data = (uint8 *)malloc(infile_size);
if (!pCmp_data)
{
printf("Out of memory!\n");
return EXIT_FAILURE;
}
if (fread(pCmp_data, 1, infile_size, pInfile) != infile_size)
{
printf("Failed reading input file!\n");
return EXIT_FAILURE;
}
// Open output file.
pOutfile = fopen(argv[2], "wb");
if (!pOutfile)
{
printf("Failed opening output file!\n");
return EXIT_FAILURE;
}
printf("Input file size: %u\n", infile_size);
in_buf_size = infile_size;
status = tinfl_decompress_mem_to_callback(pCmp_data, &in_buf_size, tinfl_put_buf_func, pOutfile, TINFL_FLAG_PARSE_ZLIB_HEADER);
if (!status)
{
printf("tinfl_decompress_mem_to_callback() failed with status %i!\n", status);
return EXIT_FAILURE;
}
outfile_size = ftell(pOutfile);
fclose(pInfile);
if (EOF == fclose(pOutfile))
{
printf("Failed writing to output file!\n");
return EXIT_FAILURE;
}
printf("Total input bytes: %u\n", (uint)in_buf_size);
printf("Total output bytes: %u\n", outfile_size);
printf("Success.\n");
return EXIT_SUCCESS;
}
| 2,681
| 25.038835
| 129
|
c
|
null |
pytorch-main/third_party/nvfuser/benchmark/utils.h
|
#pragma once
#include <torch/csrc/jit/codegen/cuda/executor.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
#include <torch/csrc/jit/codegen/cuda/ir_all_nodes.h>
#include <torch/csrc/jit/codegen/cuda/ir_utils.h>
#include <torch/csrc/jit/codegen/cuda/kernel_cache.h>
#include <torch/csrc/jit/codegen/cuda/lower2device.h>
#include <torch/csrc/jit/codegen/cuda/ops/all_ops.h>
#include <torch/csrc/jit/codegen/cuda/scheduler/all_schedulers.h>
#include <benchmark/benchmark.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/torch.h>
#include <cuda_runtime.h>
using namespace torch::jit::fuser::cuda;
// Make a tensor that is known to be non-contiguous of dimensionality=ndims,
// but unknown sizes
TensorView* makeSymbolicTensor(size_t ndims, DataType dtype = DataType::Float);
// Make a tensor that is known to be fully contiguous of dimensionality=ndims,
// but unknown sizes. Taken from test_gpu.cpp
TensorView* makeContigTensor(size_t ndims, DataType dtype = DataType::Float);
// Make a non-contiguous tensor of compile-time known sizes
TensorView* makeConcreteTensor(
std::vector<int64_t> shape,
DataType dtype = DataType::Float);
// Make a contiguous tensor of compile-time known sizes
TensorView* makeContigConcreteTensor(
std::vector<int64_t> shape,
DataType dtype = DataType::Float);
std::string toString(const ReductionParams& rparams);
std::string toString(const PointwiseParams& params);
std::string toString(const TransposeParams& params);
std::string toString(const std::shared_ptr<HeuristicParams>& params);
std::string toString(LaunchParams lparams);
// Run benchmark iterations with provided inputs. If not segmented, report
// kernel time from the runtime, as well as heuristic parameters. If segmented
// use timers. Make sure to clear L2 between iterations.
void runBenchmarkIterations(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
std::vector<c10::IValue>& aten_inputs);
void clearL2Cache();
class CudaKernelTimer {
public:
CudaKernelTimer() {
// Setup
C10_CUDA_CHECK(cudaEventCreate(&start_event));
C10_CUDA_CHECK(cudaEventCreate(&finish_event));
C10_CUDA_CHECK(cudaEventRecord(start_event));
}
~CudaKernelTimer() {
C10_CUDA_IGNORE_ERROR(cudaEventDestroy(start_event));
C10_CUDA_IGNORE_ERROR(cudaEventDestroy(finish_event));
}
void restart() {
C10_CUDA_CHECK(cudaEventRecord(start_event));
}
float elapsed() {
// Record
C10_CUDA_CHECK(cudaEventRecord(finish_event));
C10_CUDA_CHECK(cudaEventSynchronize(start_event));
C10_CUDA_CHECK(cudaEventSynchronize(finish_event));
C10_CUDA_CHECK(
cudaEventElapsedTime(&kernel_time_ms_, start_event, finish_event));
return kernel_time_ms_;
}
private:
// Create
float kernel_time_ms_ = 0;
cudaEvent_t start_event = {};
cudaEvent_t finish_event = {};
};
namespace executorCache {
using ExecutorPtr = std::unique_ptr<FusionExecutorCache>;
using ExecutorMap = std::unordered_map<std::string, ExecutorPtr>;
ExecutorMap& getGlobalMap();
} // namespace executorCache
//! Utility to manage FusionExecutorCache instances for
//! all defined benchmarks
class BenchmarkGraph : public benchmark::Fixture {
public:
using SetupFusionFunction = std::function<void(Fusion*)>;
using SetupFusionMap = std::unordered_map<std::string, SetupFusionFunction>;
virtual std::string graphName() = 0;
virtual SetupFusionFunction setupFusion() = 0;
FusionExecutorCache* getExecutorCache() {
auto& executor_ = getExecutorCacheMap()[graphName()];
TORCH_INTERNAL_ASSERT(executor_);
return executor_.get();
}
void SetUp(const ::benchmark::State& state) {
auto& executor_ = getExecutorCacheMap()[graphName()];
// Makes sure same graph hasn't been compiled before
if (!executor_) {
auto fusion_ptr = std::make_unique<Fusion>();
FusionGuard(fusion_ptr.get());
setupFusion()(fusion_ptr.get());
getExecutorCacheMap()[graphName()] =
std::make_unique<FusionExecutorCache>(std::move(fusion_ptr));
}
}
void TearDown(const ::benchmark::State& state) {}
protected:
static executorCache::ExecutorMap& getExecutorCacheMap() {
return executorCache::getGlobalMap();
}
};
#define NVFUSER_TO_STRING_HELPER(n) std::string(#n)
#define NVFUSER_TO_STRING(n) NVFUSER_TO_STRING_HELPER(n)
//! NVFUSER_BENCHMARK_RUN utility usage:
//! This utility helps create and manage FusionExecutorCaches and tries to use
//! the caching
//! mechanism in NVFuser to avoid re-compilation.
//!
//! There are two macros in this utility: NVFUSER_BENCHMARK_DEFINE, and
//! NVFUSER_BENCHMARK_RUN,
//! and user needs to supply two functions SETUP_FUSION and RUN_FUSION, with
//! following signatures:
//!
//! SETUP_FUSION(Fusion* , args...);
//! RUN_FUSION(benchmark::State&, FusionExecutorCache* , args...);
//!
//! where args... are additional arguments, and they need to be the same for
//! SETUP_FUSION and RUN_FUSION.
//!
//! SETUP_FUSION is called once in each definition of benchmark to build the
//! fusionIR graph
//!
//! RUN_FUSION is just like the normal benchmark instance, except that a
//! FusionExecutorCache
//! will be provided for scheduling, running and timing the fusion runs. It is
//! called once in each benchmark instance. For example:
//! NVFUSER_BENCHMARK_RUN(my_benchmark)
//! ->RangeMultiplier(2)
//! ->Ranges({{1, 4})
//! Calls RUN_FUSION 3 times.
//!
//! To register a benchmark, the API is:
//!
//! NVFUSER_BENCHMARK_DEFINE(my_benchmark,SETUP_FUSION,RUN_FUSION,args...);
//!
//! where my_benchmark is any unique name given for this benchmark,
//! SETUP_FUSION, RUN_FUSION as described above,
//! args... is the arg list supplied to both setup_fusion and run_fusion
//!
//! each NVFUSER_BENCHMARK_DEFINE registers a benchmark with a single
//! FusionExecutorCache, i.e. a single fusion graph, and multiple benchmark
//! data points can be registered like:
//!
//! NVFUSER_BENCHMARK_RUN(my_benchmark)
//! ->Ranges({{1,2}});
//!
//! NVFUSER_BENCHMARK_RUN(my_benchmark)
//! ->Ranges({{3,4}});
//!
//! All datapoints will use the same FusionExecutorCache so recompilation is
//! avoided as much as possible.
#define NVFUSER_BENCHMARK_DEFINE( \
BENCHMARK_NAME, SETUP_FUSION, RUN_FUSION, ...) \
class BENCHMARK_NAME##___GRAPH : public BenchmarkGraph { \
public: \
std::string graphName() { \
return NVFUSER_TO_STRING(BENCHMARK_NAME##___GRAPH); \
} \
SetupFusionFunction setupFusion() { \
return [](Fusion* fusion) { SETUP_FUSION(fusion, __VA_ARGS__); }; \
} \
}; \
BENCHMARK_DEFINE_F(BENCHMARK_NAME##___GRAPH, BENCHMARK_NAME) \
(benchmark::State & benchmark_state) { \
RUN_FUSION( \
benchmark_state, \
BENCHMARK_NAME##___GRAPH::getExecutorCache(), \
__VA_ARGS__); \
}
#define NVFUSER_BENCHMARK_RUN(BENCHMARK_NAME) \
BENCHMARK_REGISTER_F(BENCHMARK_NAME##___GRAPH, BENCHMARK_NAME)
| 7,615
| 36.15122
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/arith.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_interface_nodes.h>
#include <type.h>
#include <type_promotion.h>
class Val;
/*
* The operations defined in this header is intended as user facing functions.
* Generally users should not directly instantiate temporary TensorViews they
* should instead use the functions below which will automatically create IR
* nodes, and return a resulting TensorView of correctly tracked shapes.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Insertion of casting op to dtype, returns new resulting val
TORCH_CUDA_CU_API Val* castOp(DataType dtype, Val* v1);
TORCH_CUDA_CU_API TensorView* castOp(DataType dtype, TensorView* v1);
TORCH_CUDA_CU_API Val* bitCastOp(DataType dtype, Val* v1);
TORCH_CUDA_CU_API TensorView* bitCastOp(DataType dtype, TensorView* v1);
// Perform unary op type and return the output
TORCH_CUDA_CU_API Val* unaryOp(UnaryOpType type, Val* v1);
TORCH_CUDA_CU_API TensorView* unaryOp(UnaryOpType type, TensorView* v1);
TORCH_CUDA_CU_API Val* unaryIsOp(UnaryOpType type, Val* v1);
TORCH_CUDA_CU_API TensorView* unaryIsOp(UnaryOpType type, TensorView* v1);
TORCH_CUDA_CU_API Val* unaryOp(
UnaryOpType type,
Val* v1,
const TypePromotionConfig& config);
TORCH_CUDA_CU_API TensorView* unaryOp(
UnaryOpType type,
TensorView* v1,
const TypePromotionConfig& config);
// Perform binary op type on v1 and v2 and return a type promoted output.
// Mod, CeilDiv, and LT are considered Int only output operations for now.
TORCH_CUDA_CU_API Val* binaryOp(
BinaryOpType type,
Val* v1,
Val* v2,
DataType out_dtype = DataType::Null);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
TensorView* v1,
Val* v2,
DataType out_dtype = DataType::Null);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
Val* v1,
TensorView* v2,
DataType out_dtype = DataType::Null);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
TensorView* v1,
TensorView* v2,
DataType out_dtype = DataType::Null);
TORCH_CUDA_CU_API Val* binaryOp(
BinaryOpType type,
Val* v1,
Val* v2,
const TypePromotionConfig& config);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
TensorView* v1,
Val* v2,
const TypePromotionConfig& config);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
Val* v1,
TensorView* v2,
const TypePromotionConfig& config);
TORCH_CUDA_CU_API TensorView* binaryOp(
BinaryOpType type,
TensorView* v1,
TensorView* v2,
const TypePromotionConfig& config);
// Perform a reduction operation on v1, initial value for reduction is init,
// reduces across axes, and reduction operation defined by BinaryOp.
TORCH_CUDA_CU_API TensorView* reductionOp(
BinaryOpType reduction_op_type,
const std::vector<int>& axes,
Val* init,
TensorView* v1,
bool keep_dim = false,
DataType dtype = DataType::Null);
//! Auxiliary Struct holding result of
//! a single welford op in ternsorview
class TORCH_CUDA_CU_API WelfordResult {
public:
TensorView* avg;
TensorView* var_sum;
TensorView* n;
explicit WelfordResult(
TensorView* in_avg,
TensorView* in_var_sum,
TensorView* in_n);
};
//! Welford operator on specified axes. This is currently the only scan op with
//! multiple outputs that is supported. May consider generalization if more scan
//! ops are added.
TORCH_CUDA_CU_API WelfordResult Welford(
TensorView* tv,
const std::vector<int>& axes,
TensorView* init_avg = nullptr,
TensorView* init_var = nullptr,
// Initializes to 0 in function definition, doing this so we don't have to
// import IrBuilder just for this one interface.
Int* init_N = nullptr);
// RNG OPERATIONS
TORCH_CUDA_CU_API TensorView* rand(
const std::vector<Val*>& shape,
DataType dtype);
TORCH_CUDA_CU_API Val* rand_like(Val*);
TORCH_CUDA_CU_API TensorView* rand_like(TensorView*);
TORCH_CUDA_CU_API TensorView* uniform(
const std::vector<Val*>& shape,
Val* low,
Val* high,
DataType dtype);
// TENSOR FACTORIES
TORCH_CUDA_CU_API TensorView* full(
const std::vector<Val*>& shape,
Val* fill_value,
DataType dtype);
TORCH_CUDA_CU_API TensorView* full_like(TensorView* tv, Val* fill_value);
TORCH_CUDA_CU_API Val* full_like(Val* tv, Val* fill_value);
TORCH_CUDA_CU_API TensorView* zeros(
const std::vector<Val*>& shape,
DataType dtype);
TORCH_CUDA_CU_API TensorView* zeros_like(TensorView*);
TORCH_CUDA_CU_API Val* zeros_like(Val*);
TORCH_CUDA_CU_API TensorView* ones(
const std::vector<Val*>& shape,
DataType dtype);
TORCH_CUDA_CU_API TensorView* ones_like(TensorView*);
TORCH_CUDA_CU_API Val* ones_like(Val*);
//! WARNING: giving invalid combinations of the start, end and step
//! arguments can result in undefined behavior. Specifically, the
//! signs of `end - start` and step must be the same.
TORCH_CUDA_CU_API TensorView* arange(Val* end, DataType dtype = DataType::Int);
TORCH_CUDA_CU_API TensorView* arange(
Val* start,
Val* end,
DataType dtype = DataType::Int);
TORCH_CUDA_CU_API TensorView* arange(
Val* start,
Val* end,
Val* step,
DataType dtype = DataType::Int);
TORCH_CUDA_CU_API TensorView* eye(Val* size, DataType dtype);
TORCH_CUDA_CU_API TensorView* eye(Val* rows, Val* cols, DataType dtype);
// UNARY OPERATIONS
// abs
TORCH_CUDA_CU_API Val* abs(Val*);
TORCH_CUDA_CU_API TensorView* abs(TensorView*);
// acos
TORCH_CUDA_CU_API Val* acos(Val*);
TORCH_CUDA_CU_API TensorView* acos(TensorView*);
// asin
TORCH_CUDA_CU_API Val* asin(Val*);
TORCH_CUDA_CU_API TensorView* asin(TensorView*);
// atan
TORCH_CUDA_CU_API Val* atan(Val*);
TORCH_CUDA_CU_API TensorView* atan(TensorView*);
// atanh
TORCH_CUDA_CU_API Val* atanh(Val*);
TORCH_CUDA_CU_API TensorView* atanh(TensorView*);
// ceil
TORCH_CUDA_CU_API Val* ceil(Val*);
TORCH_CUDA_CU_API TensorView* ceil(TensorView*);
// cos
TORCH_CUDA_CU_API Val* cos(Val*);
TORCH_CUDA_CU_API TensorView* cos(TensorView*);
// cosh
TORCH_CUDA_CU_API Val* cosh(Val*);
TORCH_CUDA_CU_API TensorView* cosh(TensorView*);
// exp
TORCH_CUDA_CU_API Val* exp(Val*);
TORCH_CUDA_CU_API TensorView* exp(TensorView*);
// expm1
TORCH_CUDA_CU_API Val* expm1(Val*);
TORCH_CUDA_CU_API TensorView* expm1(TensorView*);
// erf
TORCH_CUDA_CU_API Val* erf(Val*);
TORCH_CUDA_CU_API TensorView* erf(TensorView*);
// erfc
TORCH_CUDA_CU_API Val* erfc(Val*);
TORCH_CUDA_CU_API TensorView* erfc(TensorView*);
// floor
TORCH_CUDA_CU_API Val* floor(Val*);
TORCH_CUDA_CU_API TensorView* floor(TensorView*);
// frac
TORCH_CUDA_CU_API Val* frac(Val*);
TORCH_CUDA_CU_API TensorView* frac(TensorView*);
// silu
TORCH_CUDA_CU_API Val* silu(Val*);
TORCH_CUDA_CU_API TensorView* silu(TensorView*);
// lgamma
TORCH_CUDA_CU_API Val* lgamma(Val*);
TORCH_CUDA_CU_API TensorView* lgamma(TensorView*);
// log
TORCH_CUDA_CU_API Val* log(Val*);
TORCH_CUDA_CU_API TensorView* log(TensorView*);
// log10
TORCH_CUDA_CU_API Val* log10(Val*);
TORCH_CUDA_CU_API TensorView* log10(TensorView*);
// log1p
TORCH_CUDA_CU_API Val* log1p(Val*);
TORCH_CUDA_CU_API TensorView* log1p(TensorView*);
// log2
TORCH_CUDA_CU_API Val* log2(Val*);
TORCH_CUDA_CU_API TensorView* log2(TensorView*);
// neg
TORCH_CUDA_CU_API Val* neg(Val*);
TORCH_CUDA_CU_API TensorView* neg(TensorView*);
// real
TORCH_CUDA_CU_API Val* real(Val*);
TORCH_CUDA_CU_API TensorView* real(TensorView*);
// reciprocal
TORCH_CUDA_CU_API Val* reciprocal(Val*);
TORCH_CUDA_CU_API TensorView* reciprocal(TensorView*);
// relu
TORCH_CUDA_CU_API Val* relu(Val*);
TORCH_CUDA_CU_API TensorView* relu(TensorView*);
// rsqrt
TORCH_CUDA_CU_API Val* rsqrt(Val*);
TORCH_CUDA_CU_API TensorView* rsqrt(TensorView*);
// round
TORCH_CUDA_CU_API Val* round(Val*);
TORCH_CUDA_CU_API TensorView* round(TensorView*);
// set
TORCH_CUDA_CU_API Val* set(Val*);
TORCH_CUDA_CU_API TensorView* set(TensorView*);
// sigmoid
TORCH_CUDA_CU_API Val* sigmoid(Val*);
TORCH_CUDA_CU_API TensorView* sigmoid(TensorView*);
// sin
TORCH_CUDA_CU_API Val* sin(Val*);
TORCH_CUDA_CU_API TensorView* sin(TensorView*);
// sinh
TORCH_CUDA_CU_API Val* sinh(Val*);
TORCH_CUDA_CU_API TensorView* sinh(TensorView*);
// sqrt
TORCH_CUDA_CU_API Val* sqrt(Val*);
TORCH_CUDA_CU_API TensorView* sqrt(TensorView*);
// tan
TORCH_CUDA_CU_API Val* tan(Val*);
TORCH_CUDA_CU_API TensorView* tan(TensorView*);
// tanh
TORCH_CUDA_CU_API Val* tanh(Val*);
TORCH_CUDA_CU_API TensorView* tanh(TensorView*);
// trunc
TORCH_CUDA_CU_API Val* trunc(Val*);
TORCH_CUDA_CU_API TensorView* trunc(TensorView*);
// bitwise_not
TORCH_CUDA_CU_API Val* bitwise_not(Val*);
TORCH_CUDA_CU_API TensorView* bitwise_not(TensorView*);
// imag
TORCH_CUDA_CU_API Val* imag(Val*);
TORCH_CUDA_CU_API TensorView* imag(TensorView*);
// isfinite
TORCH_CUDA_CU_API Val* isfinite(Val*);
TORCH_CUDA_CU_API TensorView* isfinite(TensorView*);
// isinf
TORCH_CUDA_CU_API Val* isinf(Val*);
TORCH_CUDA_CU_API TensorView* isinf(TensorView*);
// isnan
TORCH_CUDA_CU_API Val* isnan(Val*);
TORCH_CUDA_CU_API TensorView* isnan(TensorView*);
// isneginf
TORCH_CUDA_CU_API Val* isneginf(Val*);
TORCH_CUDA_CU_API TensorView* isneginf(TensorView*);
// isposinf
TORCH_CUDA_CU_API Val* isposinf(Val*);
TORCH_CUDA_CU_API TensorView* isposinf(TensorView*);
// isreal
TORCH_CUDA_CU_API Val* isreal(Val*);
TORCH_CUDA_CU_API TensorView* isreal(TensorView*);
// print
TORCH_CUDA_CU_API Val* print(Val*);
TORCH_CUDA_CU_API TensorView* print(TensorView*);
// Broadcasts inp based on bool vector. Size of broadcast bool vector should be
// the number of dims desired in the broadcasted tensor. This vector should be
// true if output dim should be a broadcasted dim, and false if it is not a
// broadcasted dim. Number of false entires must match the number of input dims.
TORCH_CUDA_CU_API TensorView* broadcast(
TensorView* inp,
const std::vector<bool>& is_broadcast_dim);
// Expands input based on provided sizes. expand_sizes should be larger than
// the input's root domain (really rfactor) and will broadcast on inner
// dimensions. expand_sizes should be -1 for any dimension that should remain a
// symbolic size. For dimensions that remain broadcast after the expand should
// be set to 1, any dimension being expanded must be marked as a broadcast in
// the input and will be expanded to the provided constant size. Any dimension
// that's symbolic in the input but specified as a non -1 value will be set to
// that constant value.
TORCH_CUDA_CU_API TensorView* expand(
TensorView* inp,
const std::vector<Val*>& expanded_sizes);
// Expands input based on other. For dimensions in inp that are broadcast with a
// matching entry in other that's either a broadcast with expanded extent or a
// non broadcasted iter domain, inp will be expanded to other's size.
TORCH_CUDA_CU_API TensorView* expand_as(TensorView* inp, TensorView* other);
// BINARY OPERATIONS
// add
TORCH_CUDA_CU_API Val* add(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* add(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* add(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* add(TensorView* v1, TensorView* v2);
// atan2
TORCH_CUDA_CU_API Val* atan2(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* atan2(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* atan2(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* atan2(TensorView* v1, TensorView* v2);
// div
TORCH_CUDA_CU_API Val* div(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* div(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* div(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* div(TensorView* v1, TensorView* v2);
// fmod
TORCH_CUDA_CU_API Val* fmod(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* fmod(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* fmod(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* fmod(TensorView* v1, TensorView* v2);
// mul
TORCH_CUDA_CU_API Val* mul(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* mul(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* mul(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* mul(TensorView* v1, TensorView* v2);
// pow
TORCH_CUDA_CU_API Val* pow(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* pow(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* pow(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* pow(TensorView* v1, TensorView* v2);
// remainder
TORCH_CUDA_CU_API Val* remainder(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* remainder(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* remainder(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* remainder(TensorView* v1, TensorView* v2);
// sub
TORCH_CUDA_CU_API Val* sub(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* sub(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* sub(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* sub(TensorView* v1, TensorView* v2);
// Integer binary ops
// mod
TORCH_CUDA_CU_API Val* mod(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* mod(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* mod(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* mod(TensorView* v1, TensorView* v2);
// ceilDiv
TORCH_CUDA_CU_API Val* ceilDiv(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ceilDiv(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ceilDiv(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* ceilDiv(TensorView* v1, TensorView* v2);
// Bitwise binary ops
// bitwise_and
TORCH_CUDA_CU_API Val* bitwise_and(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_and(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_and(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* bitwise_and(TensorView* v1, TensorView* v2);
// bitwise_left_shift
TORCH_CUDA_CU_API Val* bitwise_left_shift(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_left_shift(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_left_shift(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* bitwise_left_shift(
TensorView* v1,
TensorView* v2);
// bitwise_right_shift
TORCH_CUDA_CU_API Val* bitwise_right_shift(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_right_shift(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_right_shift(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* bitwise_right_shift(
TensorView* v1,
TensorView* v2);
// bitwise_or
TORCH_CUDA_CU_API Val* bitwise_or(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_or(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_or(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* bitwise_or(TensorView* v1, TensorView* v2);
// bitwise_xor
TORCH_CUDA_CU_API Val* bitwise_xor(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_xor(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* bitwise_xor(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* bitwise_xor(TensorView* v1, TensorView* v2);
// Logical binary ops
// eq
TORCH_CUDA_CU_API Val* eq(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* eq(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* eq(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* eq(TensorView* v1, TensorView* v2);
// ge
TORCH_CUDA_CU_API Val* ge(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ge(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ge(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* ge(TensorView* v1, TensorView* v2);
// gt
TORCH_CUDA_CU_API Val* gt(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* gt(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* gt(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* gt(TensorView* v1, TensorView* v2);
// le
TORCH_CUDA_CU_API Val* le(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* le(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* le(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* le(TensorView* v1, TensorView* v2);
// lt
TORCH_CUDA_CU_API Val* lt(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* lt(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* lt(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* lt(TensorView* v1, TensorView* v2);
// ne
TORCH_CUDA_CU_API Val* ne(Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ne(TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* ne(Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* ne(TensorView* v1, TensorView* v2);
// REDUCTION OPERATIONS
TORCH_CUDA_CU_API TensorView* sum(
TensorView* v1,
const std::vector<int>& reduction_axes,
bool keep_dim = false,
DataType dtype = DataType::Null);
TORCH_CUDA_CU_API TensorView* max(
TensorView* v1,
const std::vector<int>& reduction_axes,
bool keep_dim = false,
DataType dtype = DataType::Null);
TORCH_CUDA_CU_API TensorView* min(
TensorView* v1,
const std::vector<int>& reduction_axes,
bool keep_dim = false,
DataType dtype = DataType::Null);
// COMPOUND OPERATIONS
// add_alpha
TORCH_CUDA_CU_API Val* add_alpha(Val* v1, Val* v2, Val* s);
TORCH_CUDA_CU_API TensorView* add_alpha(TensorView* v1, Val* v2, Val* s);
TORCH_CUDA_CU_API TensorView* add_alpha(Val* v1, TensorView* v2, Val* s);
TORCH_CUDA_CU_API TensorView* add_alpha(TensorView* v1, TensorView* v2, Val* s);
// sub_alpha
TORCH_CUDA_CU_API Val* sub_alpha(Val* v1, Val* v2, Val* s);
TORCH_CUDA_CU_API TensorView* sub_alpha(TensorView* v1, Val* v2, Val* s);
TORCH_CUDA_CU_API TensorView* sub_alpha(Val* v1, TensorView* v2, Val* s);
TORCH_CUDA_CU_API TensorView* sub_alpha(TensorView* v1, TensorView* v2, Val* s);
// lerp
TORCH_CUDA_CU_API Val* lerp(Val* start, Val* end, Val* weight);
TORCH_CUDA_CU_API TensorView* lerp(TensorView* start, Val* end, Val* weight);
TORCH_CUDA_CU_API TensorView* lerp(Val* start, TensorView* end, Val* weight);
TORCH_CUDA_CU_API TensorView* lerp(Val* start, Val* end, TensorView* weight);
TORCH_CUDA_CU_API TensorView* lerp(
TensorView* start,
TensorView* end,
Val* weight);
TORCH_CUDA_CU_API TensorView* lerp(
TensorView* start,
Val* end,
TensorView* weight);
TORCH_CUDA_CU_API TensorView* lerp(
Val* start,
TensorView* end,
TensorView* weight);
TORCH_CUDA_CU_API TensorView* lerp(
TensorView* start,
TensorView* end,
TensorView* weight);
// addcmul
TORCH_CUDA_CU_API Val* addcmul(Val* v1, Val* v2, Val* v3, Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(TensorView* v1, Val* v2, Val* v3, Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(Val* v1, TensorView* v2, Val* v3, Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(Val* v1, Val* v2, TensorView* v3, Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(
TensorView* v1,
TensorView* v2,
Val* v3,
Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(
TensorView* v1,
Val* v2,
TensorView* v3,
Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(
Val* v1,
TensorView* v2,
TensorView* v3,
Val* s);
TORCH_CUDA_CU_API TensorView* addcmul(
TensorView* v1,
TensorView* v2,
TensorView* v3,
Val* s);
// TERNARY OPERATIONS
// where
TORCH_CUDA_CU_API Val* where(Val* c, Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* where(TensorView* c, Val* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* where(Val* c, TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* where(Val* c, Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* where(TensorView* c, TensorView* v1, Val* v2);
TORCH_CUDA_CU_API TensorView* where(TensorView* c, Val* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* where(Val* c, TensorView* v1, TensorView* v2);
TORCH_CUDA_CU_API TensorView* where(
TensorView* c,
TensorView* v1,
TensorView* v2);
// threshold
TORCH_CUDA_CU_API Val* threshold(Val* in, Val* thresh, Val* value);
TORCH_CUDA_CU_API TensorView* threshold(
TensorView* in,
Val* thresh,
Val* value);
// clamp
TORCH_CUDA_CU_API Val* clamp(Val* in, Val* min_val, Val* max_val);
TORCH_CUDA_CU_API TensorView* clamp(TensorView* in, Val* min_val, Val* max_val);
//! Internal operator for supporting backward graphs
//!
//! example:
//! v1 = T1 [I0(10),I1(20),I2(30),I3(40)]
//! v2 = sum_to(v1,{30,1}) ------> v2 = T2[I2,R3 (keep_dim)]
//!
//! This operator will return v1* directly if sizes of v1 root domain
//! is already the same as shape.
//!
//! Name of sum_to is different from NV fuser naming,
//! this is to align with the operator name of at::sum_to.
TORCH_CUDA_CU_API TensorView* sum_to(
TensorView* v1,
const std::vector<Int*>& sum_to_size);
TORCH_CUDA_CU_API TensorView* sum_to(
TensorView* v1,
const std::vector<int64_t>& sum_to_size);
//! Shift a tensor to a direction specified by offsets.
//!
//! Example:
//! t0: 2D tensor of size N by M
//! t1 = shift(t0, {1, -1});
//!
//! then:
//! t1[i, j] = t0[i-1, j+1] for 1 <= i < N and 0 <= j < M-1.
//! t1[i, j] = 0, otherwise
//!
//! The pad option controls how out-of-boundary accesses are
//! handled. It specifies how many zeros are logically padded. If no
//! pad option is given, it automatically pads the input tensor so
//! that the output tensor has the same extent for each axis.
//!
//! When a padding value is smaller than the absolute value of a shift
//! offset, the output axis still has the same extent but its start or
//! stop offset is moved inward to signify those outside of the offset
//! are invalid.
//!
//! It is not allowed to use padding values that are larger than shift
//! offsets, which would mean output extentes would be larger than
//! input extents
TORCH_CUDA_CU_API TensorView* shift(
TensorView* inp,
const std::vector<int>& offsets,
const std::vector<int>& pad_width = {});
TORCH_CUDA_CU_API TensorView* shift(
TensorView* inp,
const std::vector<int>& offsets,
bool pad);
//! Gather a window of nearby elements for each element.
//!
//! Each window of size window_shape is stored as a additional
//! innermost domain, meaning that the number of dimensions of the
//! output tensor doubles. The pad_width parameter specifies the
//! padding width of each side of each axis. The strides parameter
//! specifies striding of the operation. Non-unit striding is
//! implemented with strided split, whose outer output domain becomes
//! the root domain for subsequent consumers. The inner output domain
//! becomes a Stride domain, which is ignored by subsequent consumers.
//! Only valid input ranges are fed into strided splits.
//!
//! When trim_out_of_bounds is true, the values at the first and last
//! ends that are outside of the start and stop offsets are
//! effetively trimmed by partial split by 1.
//!
//! Example 1:
//! t0: 2D tensor of [N, M]
//! t1 = gather(t0, {1, 3}, {{0, 0}, {1, 1}});
//!
//! then:
//! t1: [N, M, 1, 3]
//! t1[i, j, k, l] = The value at the window position of [k, l]
//! for t0[i, j]
//!
//! Example 2.1 (without trimming):
//! t0: 2D tensor of [N, M]
//! t1 = gather(t0, {2, 2}, {{0, 0}, {0, 0}});
//!
//! then:
//! t1: [N (stop offset: 1), M (stop offset: 1, 2, 2)]
//!
//! Example 2.1 (with trimming)
//! t0: 2D tensor of [N, M]
//! t1 = gather(t0, {2, 2}, {{0, 0}, {0, 0}}, true);
//!
//! then:
//! t1: [ceilDiv(N - 1, 1), ceilDiv(M - 1, 1), 2, 2]
//!
//! Example 3:
//! t0: 2D tensor of [N, M]
//! t1 = gather(t0, {3, 3}, {{0, 0}, {0, 0}}, {3, 3});
//!
//! then:
//! t1: [ceilDiv(N - 2, 3), ceilDiv(M - 2, 3), 2, 2]
//!
TORCH_CUDA_CU_API TensorView* gather(
TensorView* inp,
const std::vector<int>& window_shape,
const std::vector<std::vector<int>>& pad_width,
const std::vector<int>& strides = {},
bool trim_out_of_bounds = false);
// Append a new IterDomain to the end of a TenorView to allow
// iterating on a vector type. The input tensor must have
// vector dtype.
TORCH_CUDA_CU_API TensorView* viewAsScalar(TensorView* inp);
//! A fused pointwise multiply and sum
//! operator that instantiates the following
//! fused pattern:
//! c = mul(tv_a, tv_b);
//! return sum(c, axes)
//!
//! \param tv_a first multiply operand
//! \param tv_b second multiply operand
//! \param axes axes to sum over
//! \param init sum initial value
//!
//! Note & TODO:
//! currently only support lowering to a mma op
//! through this interface and only support fp16 inputs.
//! will support converting back to multiply and reduce in
//! a follow up.
TORCH_CUDA_CU_API TensorView* fusedMultiplySum(
TensorView* tv_a,
TensorView* tv_b,
const std::vector<int>& axes,
Val* init = nullptr);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 24,505
| 35.197932
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/compute_at.h
|
#pragma once
#include <inlining.h>
#include <root_domain_map.h>
#include <transform_replay.h>
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <deque>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class TensorDomain;
class TensorView;
struct ComputeAt {
public:
// Runs the compute at pass making producer look like consumer, computing
// producer relative to consumer
static void runAt(
TensorView* producer,
TensorView* consumer,
int64_t consumer_position,
ComputeAtMode mode = ComputeAtMode::Standard);
// Runs the compute with pass making consumer look like producer, computing
// producer relative to consumer
static void runWith(
TensorView* producer,
TensorView* consumer,
int64_t producer_position,
ComputeAtMode mode = ComputeAtMode::Standard);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,021
| 21.217391
| 77
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/compute_at_map.h
|
#pragma once
#include <disjoint_set.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <lower_trivial_reductions.h>
#include <deque>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// There's three modes of these iter domain mappings all uniquely important in
// the lowering process.
//
// For EXACT/PERMISSIVE mode consider:
//
// consumer[i0, b1] = producer[i0]
// consumer->merge(0) (consumer will now be [i0 * b1])
// When producer is replayed as consumer (the direction we use for mapping)
// with BestEffortReplay forward_bcast_mismatch = True the producer to
// consumer map will have both a mapping of consumer(i0) to producer(i0) as
// well as consumer(i0*b1) to producer(i0). This latter mapping is important
// for loop nest mappings as the consumer will generate a loop based on i0*b1
// and the producer may be computeAt inside this loop nest. However, for
// indexing we do not want these two maps as producer may be indexed as i0*i1
// depending on the loop nest structure and how it was built. Therefore we
// really need to carry (at least) two sets of maps around for lowering.
//
// LOOP mode is important if we have something like:
// consumer[i0o, threadIdx.x{i0i}] = producer[i0o, threadIdx.y{i0i}](computeAt
// = 1) which can easily happen when using shared memory. We want to make sure
// that the iteration domain used for loop construction (concreteId) has the
// proper parallelization strategy. In parallel mode we do typical iteration
// domain mapping, however we remove from it any iteration domains outside the
// computeAt of producer when mapping. This guarentees we won't map
// IterDomains that could have different parallelization strategies. We also
// propagate the parallel strategy in parallel mode so all mapped IDs that
// must have the same parallel type, do.
//
// IdMappingMode::LOOP
// Only maps leaf axes to left of compute at
// Forward broadcast axes in replay
// IdMappingMode::PERMISSIVE
// Forward broadcast axes in replay
// Map all iteration domains
// Always contain root mappings (otherwise they could have been forwarded in
// broadcast)
// IdMappingMode::EXACT
// Don't map any broadcast axes to non-broadcast axes
// Do not forward through any broadcast IDs
class TORCH_CUDA_CU_API IterDomainGraph {
public:
IterDomainGraph(Fusion* fusion, bool allow_self_mapping = false);
const DisjointSets<IterDomain*>& permissiveNodes() const {
return permissive_nodes_;
}
const DisjointSets<IterDomain*>& exactNodes() const {
return exact_nodes_;
}
const DisjointSets<IterDomain*>& loopNodes() const {
return loop_nodes_;
}
// Consumers and producers is not symmetric like the other sets
const std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>&
consumers() const {
return consumers_;
}
const std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>&
producers() const {
return producers_;
}
const DisjointSets<IterDomain*>& siblings() const {
return sibling_sets_;
}
const VectorOfUniqueEntries<IterDomain*>& allIds() const {
return all_ids_;
}
const std::unordered_set<IterDomain*>& viewRfactorIds() const {
return view_rfactor_ids_;
}
// Returns if first and second are expressions through which the provided
// id_map have matching inputs (if forward), or outputs (if not forward).
// Returning true means the expressions are "the same", in terms they modify
// matching original extents, by the same amount.
static bool exprsMap(
Expr* first,
Expr* second,
bool forward,
const DisjointSets<IterDomain*>& id_map);
bool hasSelfMapping() const {
return self_mapping_info_.has_value();
}
private:
void build(Fusion* fusion);
void initializeId(IterDomain* id, bool is_view_rfactor_id, bool is_leaf_id);
// Checks if exprsMap then if forward will map outputs else inputs in exact
// and permissive map.
void mapThroughExpr(Expr* first, Expr* second, bool forward);
DisjointSets<IterDomain*> permissive_nodes_;
DisjointSets<IterDomain*> exact_nodes_;
DisjointSets<IterDomain*> loop_nodes_;
// Consumers and producers is not symmetric like the other sets
std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>
consumers_;
std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>
producers_;
DisjointSets<IterDomain*> sibling_sets_;
VectorOfUniqueEntries<IterDomain*> all_ids_;
std::unordered_set<IterDomain*> view_rfactor_ids_;
c10::optional<std::tuple<TensorView*, IterDomain*, IterDomain*, std::string>>
self_mapping_info_ = c10::nullopt;
};
class TrivialReductionInfo;
using DoubleBufferIndices = std::unordered_map<DoubleBufferLoopStage, Int*>;
class TORCH_CUDA_CU_API ComputeAtMap {
public:
ComputeAtMap() = delete;
ComputeAtMap(const ComputeAtMap&) = delete;
ComputeAtMap& operator=(const ComputeAtMap&) = delete;
ComputeAtMap(ComputeAtMap&&) = default;
ComputeAtMap& operator=(ComputeAtMap&&) = default;
ComputeAtMap(Fusion* fusion);
//! Run through disjoint sets in the LOOP map, make sure there's only one
//! non-serial parallel type in each disjoint set, set the parallel type of
//! all IterDomains in the disjoint set to that PType.
void validateAndPropagatePType();
//! Run through disjoint sets in the LOOP map and allocate the index
//! variable for the associated for loop that will be generated
//! for each disjoint sets in the loop map. This pre-allocation makes
//! 2 key assumptions about computeAt map that would very likely be
//! long term invariant:
//! 1. All kir::forloop created in the lowering pass should belong
//! to one of the disjoint sets in loop map.
//! 2. The lowering pass will *never* create a loop nest with 2
//! different nesting levels mapped together, i.e. the case below
//! never occurs:
//! for i in IterDomain1
//! for j in IterDomain2
//! ...
//! With loop_map.areMapped(IterDomain1, IterDomain2) == true.
//! Under this condition, we can pre-allocate all required index
//! variable integers before creating any kir::forloop, and this
//! would help optimizing the generated integer math for indexing.
void allocateIndexVariables();
//! Returns if id0 and id1 are mapped to eachother with provided IdMappingMode
bool areMapped(IterDomain* id0, IterDomain* id1, IdMappingMode mode) const;
//! Returns an iter domain that is the maximum expanded size of all iter
//! domains the one provided maps to. Useful for opening loops to the correct
//! iteration size. Not guarenteed to return the same ID every call, but is
//! guarenteed to return iter domains in the same disjoint set.
IterDomain* getConcreteMappedID(IterDomain* id, IdMappingMode mode) const;
// Prints mapping information, forwards to an internal IterDomainGraph
std::string toString() const;
// Returns if the provided ID is a view like rfactor id
bool isViewRfactor(IterDomain* ref_id) const;
// Returns all rfactor domains in rfactor_concrete_count_reset_domains_ that
// are in the disjoint set of the provided IterDomain. This will be every view
// like rfactor ID the provided ID "depends" on in the map.
std::vector<IterDomain*> getViewRfactorDomainsOfIdGroup(
IterDomain* ref_id,
IdMappingMode mode) const;
const IterDomainGraph& idGraph() const {
return id_graph_;
}
//! Get the ID sets for a provided IdMappingMode
const DisjointSets<IterDomain*>& getIdSets(IdMappingMode mode) const;
// Returns if the ID actually has a disjoint set meaning it has been processed
// in the creation of the compute at map.
bool idExistsInMap(IterDomain* id) const;
//! Returns the pre-allocated index variable integer used in
//! the kir::ForLoop corresponding to the given IterDomain.
//! this interface is only valid if the ID has a loop mapping,
//! ca_map will throw exceptions if given iterdomain doesn't
//! have a loop map entry.
Val* getIndexVariable(
IterDomain* id,
DoubleBufferLoopStage double_buffer_loop_stage =
DoubleBufferLoopStage::NotApplicable) const;
private:
// Build id_graph_
void build(Fusion* fusion);
// Build concrete_id_cache_
// Build a single entry in concrete_cache_id_
IterDomain* computeConcreteId(IterDomain* id, IdMappingMode mode);
void buildConcreteIds();
// Produce the disjoint set containing provided id with mapping mode.
const std::shared_ptr<VectorOfUniqueEntries<IterDomain*>>& disjointSetOf(
IterDomain* id,
IdMappingMode mode) const;
// Should be built once and never modified again.
IterDomainGraph id_graph_;
TrivialReductionInfo trivial_reduction_info_;
// Prevent needing to recompute concrete_id's in compute at map.
// VectorOfUniqueEntries is unique across mapping modes, so don't need to use
// mapping mode directly in this cache. const
// VectorOfUniqueEntries<IterDomain*>& is what's returned by
// ComputeAtMap::disjointSetOf which can be used directly.
std::unordered_map<
std::shared_ptr<VectorOfUniqueEntries<IterDomain*>>,
IterDomain*>
concrete_id_cache_;
//! Allocated Loop index variable through the CA map.
//! only valid for disjoint sets on the loop ca map.
std::unordered_map<const VectorOfUniqueEntries<IterDomain*>*, Val*>
loop_index_variable_map_;
//! Allocated loop indices for double buffer loop.
//! only valid for disjoint sets on the loop ca map
//! that have double buffer-ed iterdomains.
using DoubleBufferIndicesPtr = std::unique_ptr<DoubleBufferIndices>;
std::unordered_map<
const VectorOfUniqueEntries<IterDomain*>*,
DoubleBufferIndicesPtr>
double_buffered_loop_index_variable_map_;
// Shortcut to access the fusion this computeAt map was
// built from.
Fusion* fusion_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 10,018
| 36.807547
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/contiguity.h
|
#pragma once
#include <c10/macros/Export.h>
#include <compute_at_map.h>
#include <disjoint_set.h>
#include <ir_all_nodes.h>
#include <lower_shift.h>
#include <lower_trivial_broadcast.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Goes through the transformations associated with a series of ids and root
// ids. Checks the ordering of the iteration domains through these operations to
// pick out which operations are consistently ordered. For example:
// [i0, i1, i2]
// ->split(0, 4)->merge(1)->merge(1)->merge(0)
// are consistently ordered from largest to smallest extents, but
// ->split(0, 4)->merge(1)->merge(0, 2)->merge(0) is not consistently ordered
// with the roots.
//
// This property is important to understand the contiguity of dimensions through
// complex transformations.
class OrderedIdInformation : public OptInDispatch {
public:
OrderedIdInformation() = delete;
OrderedIdInformation(
const std::vector<IterDomain*>& ids,
const std::vector<IterDomain*>& root_domain,
std::shared_ptr<const ConcretizedBroadcastDomains> concrete_info);
const std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>&
idToRootIds() const {
return id_to_root_ids_;
}
bool isConsistentlyOrdered(IterDomain* id) const {
return consistently_ordered_ids_.find(id) !=
consistently_ordered_ids_.end();
}
bool exclusivelyConsumesRoots(IterDomain* id) const {
return exclusively_consumes_roots_.find(id) !=
exclusively_consumes_roots_.end();
}
private:
// Returns if the id in active_ids should be in exclusively_consumes_roots_
bool checkExclusivelyConsumesRoots(IterDomain* id);
void handle(Split*) override;
void handle(Merge* merge) override;
void handle(Swizzle2D* swizzle) override;
// Track which root ids were used to generate each iter domain
std::unordered_map<IterDomain*, VectorOfUniqueEntries<IterDomain*>>
id_to_root_ids_;
// Track all IterDomains that have correct ordered transforms for contiguity.
// i.e. if we have:
//
// root = [i0, i1, i2]
// i3 = merge(i0, i2)
// would not be consistently ordered transformed
//
// root = [i0, i1, i2]
// i4, i5 = spit(merge(merge(i0, i1), i2), 4)
// would be consistently ordered transforms
//
// root = [i0, i1, i2, i3]
// i4 = merge(i1, i2) would also be consistently ordered transformed
std::unordered_set<IterDomain*> consistently_ordered_ids_;
// Active series of IterDomains that are updated while we're processing the
// domain. Helps us identify which ids are consistently_ordered_ids_. Used
// for intermediate storage, not to return.
std::vector<IterDomain*> active_ids_;
// IterDomains in this set exclusively consume all the uses of their roots.
// For example:
// [i0, i1] split(0, f)->merge(1)
// [ceilDiv(i0, f), f*i1]
// neither iter domains exclusively consume the roots. With another:
// merge(0) -> [ceilDiv(i0, f)*f*i1]
// The resulting iter domain does exclusively consume the roots.
//
// Also:
// [i0, i1, i2, i3] merge(1)->merge(1)
// ->[i0, i1*i2*i3]
// both resulting iter domains do exclusively consume their roots
std::unordered_set<IterDomain*> exclusively_consumes_roots_;
// Broadcast domains that are concretized cannot be considered contiguously
// indexable.
// TODO: This constraint is more conservative than necessary as it's only if
// the domain is concretized within the local indexing, not in the entire
// fusion.
std::shared_ptr<const ConcretizedBroadcastDomains> concrete_info_;
};
// Based on provided divisible split set, goes through expressions and marks all
// IterDomains that are dependent on a non-divisible split.
class NonDivisibleSplitDependencies : public OptInDispatch {
public:
NonDivisibleSplitDependencies() = delete;
NonDivisibleSplitDependencies(
const std::vector<IterDomain*>& ids,
const std::vector<IterDomain*>& root_domain,
const std::unordered_set<Split*>& divisible_splits);
bool dependsOnNonDivisibleSplit(IterDomain* id) const {
return depends_on_non_divisible_split.find(id) !=
depends_on_non_divisible_split.end();
}
private:
std::unordered_set<IterDomain*> depends_on_non_divisible_split;
};
// A merge is contiguous if:
// Inputs of outer are to the left in the root domain of the inputs of RHS.
// All inputs are contiguous in the root domain:
// - All marked as contiguous
// - Only gaps between inputs are broadcast or reductoin dims
// There are no split transformations performed on outer or inner
// All transformations on outer or inner are contiguous merges
// If this criteria holds, then we can index the input root domains of this
// merge with the indexing provided to the output of the merge in the backward
// index pass
class ContigIDs : public OptInDispatch {
public:
//! Check through the history of ids whose inputs map to root_domain with
//! contiguity root_contiguity. Return unordered_set of all merges that are
//! contiguous. Ignore root order is primarily used for predicate generation.
//! In this case we can linearize indexing of any ID that only consists of
//! merge operations.
//!
//! Mapping information from CA Index concrete to reference domains
//! is used to find if merged output domains can be indexed. If there's
//! no mapping to a reference domain, there's no corresponding
//! index, so it isn't marked as conting merge.
//!
//! p2c_id_map can be used when replayed producer domains are
//! analyzed, in which case producer-to-consumer maps should be
//! passed.
//!
//! If ignore_indexability and ignore_halo_constraint are true,
//! ignore the constraint on indexing and halo, respectively. It is
//! the caller that is responsible for its correctness.
//! Not really sure why but clang-tidy only complains about
//! std::unordered_map if passed as a const reference.
ContigIDs(
const std::vector<IterDomain*>& ids,
const std::vector<IterDomain*>& root_domain,
const std::vector<bool>& root_contiguity,
const std::unordered_set<IterDomain*>& final_ids,
const std::unordered_map<IterDomain*, Val*>& index_map,
const std::unordered_set<Split*>& divisible_splits,
std::unordered_map<IterDomain*, IterDomain*> p2c_id_map = {},
bool ignore_indexability = false,
bool ignore_consistent_ordering = false);
//! \param ids IterDomains on the leaves of the domain we're looking for
//! contiguous indexing into.
//! \param root_domain the root domain of the domain we're looking for
//! contiguous indexing into.
//! \param root_contiguity the contiguity of the root_domain.
//! \param concrete_to_ref concrete ids of the exact map that the reference
//! index is using for indexing.
//! \param divisible_splits a set of all splits in the fusion that are
//! divisible.
//! \param ca_map compute at map of the fusion.
//! \param halo_info halo information of the fusion.
//! \param concrete_info concretized broadcast information of the fusion.
//! \param p2c_id_map map from producer to consumer ids used for indexing
//! producer tensors.
//! \param ignore_consistent_ordering true for actual indexing into tensors
//! but false for predicate analysis. Ordering of merges don't matter for
//! predicate generation as they don't map to a physical address.
//! \param ignore_indexability can only be true if providing a real
//! concrete_to_ref map. As what it's checking is if the index is actually
//! indexable based on the reference.
ContigIDs(
const std::vector<IterDomain*>& ids,
const std::vector<IterDomain*>& root_domain,
const std::vector<bool>& root_contiguity,
const std::unordered_set<IterDomain*>& final_ids,
const std::unordered_map<IterDomain*, Val*>& index_map,
const std::unordered_set<Split*>& divisible_splits,
std::shared_ptr<const ComputeAtMap> ca_map,
std::shared_ptr<const HaloInfo> halo_info,
std::shared_ptr<const ConcretizedBroadcastDomains> concrete_info,
std::unordered_map<IterDomain*, IterDomain*> p2c_id_map = {},
bool ignore_indexability = false,
bool ignore_consistent_ordering = false);
//! Return an empty ContigIDs with no contiguous ID
static ContigIDs getNonContigIDs();
const std::unordered_set<IterDomain*>& contigIDs() const {
return contig_ids_;
}
const std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>&
withinContigIDs() const {
return within_contig_ids_;
}
const std::unordered_map<IterDomain*, IterDomain*>& rootToIndexedID() const {
return root_to_indexed_id_;
}
VectorOfUniqueEntries<IterDomain*> indexedRootIDs(IterDomain* id) const {
auto root_ids_it = consistent_transform_info_->idToRootIds().find(id);
if (root_ids_it == consistent_transform_info_->idToRootIds().end()) {
return {};
}
return root_ids_it->second;
}
private:
using OptInDispatch::handle;
bool inRoot(const std::vector<IterDomain*>& ids) {
return std::all_of(ids.begin(), ids.end(), [this](IterDomain* id) {
return is_contig_root_.find(id) != is_contig_root_.end();
});
}
bool isContig(IterDomain* id) {
return contig_ids_.find(id) != contig_ids_.end();
}
// Split outputs are not contiguous, don't need to do anything.
void handle(Split*) override {}
void handle(Merge* merge) override;
// TODO:
// Currently not propagating any contiguity information
// as contiguity is generally not preserved after swizzles.
// But in follow ups we could gradually add back a few special
// cases, depending on specific swizzle type and axes.
void handle(Swizzle2D* swizzle) override {}
IterDomain* getCAIndexConcreteId(IterDomain* id) const;
//! True if an ID is indexable.
//! E.g., a merged domain with broadcast may not be indexable when
//! its corresponding reference tensor has non-broadcast domains.
bool isIndexable(IterDomain* id) const;
//! Return an ID mapped with id_map_ or itself
IterDomain* getMappedId(IterDomain* id) const;
private:
void build(const std::vector<IterDomain*>& ids);
//! Root domains to analyze contiguity
const std::vector<IterDomain*>& root_domain_;
//! Contiguity of root_domain_
const std::vector<bool>& root_contiguity_;
//! Domains where indexing/predicates cannot be done with their
//! consumers domains
const std::unordered_set<IterDomain*>& final_ids_;
//! Mapping of concrete domains to indices. Just used to check if
//! there's an index for an IterDomain.
const std::unordered_map<IterDomain*, Val*> index_map_;
// Divisible split information as we can still consider iter domains
// contiguous through divisible splits.
const std::unordered_set<Split*>& divisible_splits_;
std::shared_ptr<const ComputeAtMap> ca_map_;
std::shared_ptr<const HaloInfo> halo_info_;
std::shared_ptr<const ConcretizedBroadcastDomains> concrete_info_;
//! Producer-to-consumer index map in the case of analyzing replayed
//! producer tensors
const std::unordered_map<IterDomain*, IterDomain*> p2c_id_map_;
const bool ignore_indexability_ = false;
const bool ignore_consistent_ordering_ = false;
//! Mapping of root domain to bool indicating contiguity
std::unordered_map<IterDomain*, bool> is_contig_root_;
// Mark if ids are result of contigous merges
std::unordered_set<IterDomain*> contig_ids_;
// Given contiguous domain, return all iter domains within its history.
std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
within_contig_ids_;
//! Mapping of root domain to the actual indexed domain, which can
//! be itself or a contig merged domain if found.
std::unordered_map<IterDomain*, IterDomain*> root_to_indexed_id_;
std::unique_ptr<const OrderedIdInformation> consistent_transform_info_;
NonDivisibleSplitDependencies non_divisible_id_info_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 12,057
| 37.647436
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/disjoint_set.h
|
#pragma once
#include <c10/util/Exception.h>
#include <algorithm>
#include <initializer_list>
#include <unordered_map>
#include <unordered_set>
#include <vector>
// For printing of the set when using a Statement as the type for the set
#include <ir_base_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace {
template <typename T>
std::string abstractToString(T* ptr) {
return ptr->toString();
}
template <typename T>
std::string abstractToString(T ref) {
return ref.toString();
}
} // namespace
// Vector like class that will prevent adding duplicate entries by also
// maintaing a set
template <typename T, typename Hash = std::hash<T>>
class VectorOfUniqueEntries {
public:
VectorOfUniqueEntries() = default;
VectorOfUniqueEntries(const std::initializer_list<T>& x)
: vector_(x), set_(x) {}
// Returns if a node was actually added
bool pushBack(T entry) {
if (set_.emplace(entry).second) {
vector_.push_back(entry);
return true;
}
return false;
}
// Returns if any node was added
bool pushBack(const VectorOfUniqueEntries<T, Hash>& other) {
bool any_added = false;
for (auto entry : other) {
any_added = any_added | pushBack(entry);
}
return any_added;
}
// Returns a const vector useful for iterating on
const std::vector<T>& vector() const {
return vector_;
}
// Returns first element in vector
T front() const {
return vector_.front();
}
// Returns last element in vector
T back() const {
return vector_.back();
}
// Remove and returns the last element in vector
T popBack() {
T v = vector_.back();
set_.erase(v);
vector_.pop_back();
return v;
}
// Returns if this container is empty
bool empty() const {
return vector_.empty();
}
// Returns the number of elements in this container
size_t size() const {
return vector_.size();
}
// Returns if entry is in this vector
bool has(T entry) const {
return set_.find(entry) != set_.end();
}
// Erase given entry from the containers if
// there is a match.
void erase(T entry) {
vector_.erase(
std::remove_if(
vector_.begin(),
vector_.end(),
[entry](T val) { return val == entry; }),
vector_.end());
set_.erase(entry);
}
// Insert elements at the end of the container.
template <typename InputIt>
void insert(InputIt begin, InputIt end) {
for (auto it = begin; it != end; it++) {
pushBack(*it);
}
}
// Returns iterator pointing to the beginning of vector container
auto begin() const {
return vector().begin();
}
// Returns iterator pointing to the end of vector container
auto end() const {
return vector().end();
}
// Returns iterator pointing to the beginning of vector container
auto begin() {
return vector().begin();
}
// Returns iterator pointing to the end of vector container
auto end() {
return vector().end();
}
std::string toString() {
std::stringstream ss;
ss << "{ ";
for (auto entry : vector()) {
ss << abstractToString(entry);
if (entry != vector().back()) {
ss << "; ";
}
}
ss << " }";
return ss.str();
}
private:
std::vector<T> vector_;
std::unordered_set<T, Hash> set_;
};
//! Container class DisjointSet models equivalence relationships
//!
//! Each instance of this class keeps equivalence sets
//! DisjointSet::mapEntries(a,b) makes the full set of a and b equivalent
//! DisjointSet::*AreMapped(a,b) checks if a and b belong to the same disjoint
//! set
template <typename T, typename Hash = std::hash<T>>
class DisjointSets {
public:
DisjointSets() = default;
// Warning: returned values should never be modified. This accessor isn't
// strictly safe as VectorOfUniqueEntries is not returned as a const.
const std::
unordered_map<T, std::shared_ptr<VectorOfUniqueEntries<T, Hash>>, Hash>&
disjointSetMap() const {
return disjoint_set_maps_;
}
// Warning: returned values should never be modified. This accessor isn't
// strictly safe as VectorOfUniqueEntries is not returned as a const.
const std::vector<std::shared_ptr<VectorOfUniqueEntries<T, Hash>>>&
disjointSets() const {
return disjoint_sets_;
}
// Return the entire disjoint set of provided entry
const VectorOfUniqueEntries<T, Hash>& getDisjointSetOf(T entry) const {
auto set_it = disjoint_set_maps_.find(entry);
TORCH_INTERNAL_ASSERT(
set_it != disjoint_set_maps_.end(),
"Could not find entry for ",
entry->toString());
return *(set_it->second);
}
// Initializes a new set for provided entry
//
// TODO: Return iterator
void initializeSet(T entry) {
if (disjoint_set_maps_.find(entry) != disjoint_set_maps_.end()) {
return;
}
disjoint_sets_.push_back(
std::make_shared<VectorOfUniqueEntries<T, Hash>>());
disjoint_sets_.back()->pushBack(entry);
disjoint_set_maps_.emplace(std::make_pair(entry, disjoint_sets_.back()));
}
// Adds all of the disjoint set belonging to entry1 to the disjoint set
// belonging to entry0, maps all entries of disjoint set belonging to entry1
// to entry0, removes original disjoint set belonging to entry1.
void mapEntries(T entry0, T entry1) {
auto set_it_0 = disjoint_set_maps_.find(entry0);
auto set_it_1 = disjoint_set_maps_.find(entry1);
// Track if we need to reset iterators, optimize for case where both entries
// exist
bool invalid_iterators = false;
if (set_it_0 == disjoint_set_maps_.end()) {
initializeSet(entry0);
invalid_iterators = true;
}
if (set_it_1 == disjoint_set_maps_.end()) {
initializeSet(entry1);
invalid_iterators = true;
}
// TODO: We can avoid refinding one iterator if initialize set returns an
// iterator, though if we insert entry1 we'd have to refind entry0 as it
// could invalidate all iterators
if (invalid_iterators) {
set_it_0 = disjoint_set_maps_.find(entry0);
set_it_1 = disjoint_set_maps_.find(entry1);
}
auto set0_shared_ptr = set_it_0->second;
auto set1_shared_ptr = set_it_1->second;
// If the sets are already the same, do nothing
if (set0_shared_ptr == set1_shared_ptr) {
return;
}
// Place everything in set1 into set0 and remap all entries in set1 to set0
for (auto entry : set1_shared_ptr->vector()) {
set0_shared_ptr->pushBack(entry);
disjoint_set_maps_[entry] = set0_shared_ptr;
}
// set1 no longer needed as its entries are copied into set0
disjoint_sets_.erase(std::find(
disjoint_sets_.begin(), disjoint_sets_.end(), set1_shared_ptr));
}
// Will assert if provided entry0 is not in any disjoint set, otherwise
// returns if entry0 and entry1 are in the same disjoint set.
bool strictAreMapped(T entry0, T entry1) const {
auto entry_it = disjointSetMap().find(entry0);
TORCH_INTERNAL_ASSERT(
entry_it != disjointSetMap().end(),
"Strict mapping failed on element: ",
abstractToString(entry0),
" either an error occurred, or non strict mapping should have been used.");
return entry_it->second->has(entry1);
}
// If entry0 doesn't have a disjoint set returns false, otherwise returns if
// entry0 and entry1 are in the same disjoint set.
bool permissiveAreMapped(T entry0, T entry1) const {
auto entry_it = disjointSetMap().find(entry0);
if (entry_it == disjointSetMap().end()) {
return false;
}
return entry_it->second->has(entry1);
}
// Returns if a set exists with provided entry
bool mappingExists(T entry) const {
return disjoint_set_maps_.find(entry) != disjoint_set_maps_.end();
}
// Returns a deterministic list of all entries that have been added to any
// disjoint set.
//
// Warning: constructed on every call, consider caching result.
VectorOfUniqueEntries<T, Hash> getAllElements() const {
VectorOfUniqueEntries<T, Hash> all_elements;
for (auto set : disjoint_sets_) {
for (auto entry : set->vector()) {
all_elements.pushBack(entry);
}
}
return all_elements;
}
// Completely clears all disjoint sets
void clear() {
disjoint_set_maps_.clear();
disjoint_sets_.clear();
}
std::string toString() const {
std::stringstream ss;
ss << "disjoint sets{\n";
const std::string sep(" ");
for (auto s_ptr : disjoint_sets_) {
auto& set = *s_ptr;
ss << sep << "{\n";
for (auto entry : set.vector()) {
ss << sep << sep << abstractToString(entry) << "\n";
}
ss << sep << "}\n";
}
ss << "}";
return ss.str();
}
private:
// Disjoint sets
std::unordered_map<T, std::shared_ptr<VectorOfUniqueEntries<T, Hash>>, Hash>
disjoint_set_maps_;
// Keep a list of disjoint_sets that's deterministic to iterate over
std::vector<std::shared_ptr<VectorOfUniqueEntries<T, Hash>>> disjoint_sets_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 9,139
| 26.613293
| 83
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/dispatch.h
|
#pragma once
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <utils.h>
#include <unordered_map>
// dispatch.h prevents the need from adding manual dispatch in every class that
// wants to define how to process a series of nodes. dispatch.h provides 4
// classes that can be inherited providing a means to override functions on a
// per-node basis. There are currently 4 provided dispatch mechanisms:
//
// OptOutDispatch:
//
// provides the functions:
// virtual void handle(ValType* irnode){}
//
// This provides a mechanisms to override this handle for particular node
// types. For example if we only wanted to actually run a function on
// BinaryOps, we could inherit OptOutDispatch and simply override: void
// handle(BinaryOp*) { doSomething; } Then we could run through all our
// Statement* and call OptOutDispatch::handle(statement). When a BinaryOp is
// encountered our override function will be called. For every other node,
// nothing will be done.
//
// OptInDispatch:
//
// This class is similar to OptOutDispatch, however if we encounter a node
// that we haven't specified an override for in the derived class, an error
// will be thrown. This is useful if we create a class that is expected to
// handle any type of node it encounters.
//
// OptOutMutator:
//
// This class is similar to OptOutDispatch except the functions provided are of
// type: virtual Statement* mutate(Statement*) this is useful for when we want
// to have an IR node result from our overloaded functions.
//
// OptInMutator:
//
// This class is similar to OptInDispatch except the functions provided are of
// type: virtual Statement* mutate(Statement*) this is useful for when we want
// to have an IR node result from our overloaded functions.
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class IrContainer;
class Fusion;
// Hierarchal dispatch functions for handle
class Statement;
class Expr;
class Val;
// Vals
class IterDomain;
class TensorDomain;
class TensorView;
class Bool;
class Double;
class Int;
class ComplexDouble;
class NamedScalar;
// Exprs
class FullOp;
class ARangeOp;
class EyeOp;
class UnaryOp;
class BinaryOp;
class TernaryOp;
class RNGOp;
class ReductionOp;
class GroupedReductionOp;
class WelfordOp;
class GroupedWelfordOp;
class LoadStoreOp;
class MmaOp;
class BroadcastOp;
class TransposeOp;
class ExpandOp;
class ShiftOp;
class GatherOp;
class ViewAsScalar;
class ViewOp;
// Exprs
class Split;
class Merge;
class Swizzle2D;
namespace kir {
class Predicate;
class TensorIndex;
class IntPair;
class Allocate;
class BlockSync;
class GridSync;
class CpAsyncWait;
class CpAsyncCommit;
class ForLoop;
class IfThenElse;
class GridReduction;
class GroupedGridReduction;
class GridBroadcast;
class GridWelford;
class GroupedGridWelford;
class AllocateFusedReduction;
class InitMagicZero;
class UpdateMagicZero;
class Swizzle2DInt;
class PairSelect;
} // namespace kir
// By default, all IR nodes are handled in this dispatch, and will call an empty
// function on all nodes.
class TORCH_CUDA_CU_API OptOutConstDispatch : public PolymorphicBase {
protected:
virtual void unhandled(const Statement*) {}
public:
// Hierarchal dispatch functions for handle
virtual void handle(const Statement*);
virtual void handle(const Expr*);
virtual void handle(const Val*);
// Vals
virtual void handle(const IterDomain* stmt);
virtual void handle(const TensorDomain* stmt);
virtual void handle(const TensorView* stmt);
virtual void handle(const Bool* stmt);
virtual void handle(const Double* stmt);
virtual void handle(const Int* stmt);
virtual void handle(const ComplexDouble* stmt);
virtual void handle(const NamedScalar* stmt);
virtual void handle(const kir::Predicate*);
virtual void handle(const kir::TensorIndex*);
virtual void handle(const kir::IntPair*);
// Exprs
virtual void handle(const FullOp* stmt);
virtual void handle(const ARangeOp* stmt);
virtual void handle(const EyeOp* stmt);
virtual void handle(const UnaryOp* stmt);
virtual void handle(const BinaryOp* stmt);
virtual void handle(const TernaryOp* stmt);
virtual void handle(const RNGOp* stmt);
virtual void handle(const ReductionOp* stmt);
virtual void handle(const GroupedReductionOp* stmt);
virtual void handle(const WelfordOp* stmt);
virtual void handle(const GroupedWelfordOp* stmt);
virtual void handle(const LoadStoreOp* stmt);
virtual void handle(const MmaOp* stmt);
virtual void handle(const BroadcastOp* stmt);
virtual void handle(const Split* stmt);
virtual void handle(const Merge* stmt);
virtual void handle(const Swizzle2D* stmt);
virtual void handle(const TransposeOp* stmt);
virtual void handle(const ExpandOp* stmt);
virtual void handle(const ShiftOp* stmt);
virtual void handle(const GatherOp* stmt);
virtual void handle(const ViewAsScalar* stmt);
virtual void handle(const ViewOp* stmt);
virtual void handle(const kir::Allocate*);
virtual void handle(const kir::BlockSync*);
virtual void handle(const kir::GridSync*);
virtual void handle(const kir::CpAsyncWait*);
virtual void handle(const kir::CpAsyncCommit*);
virtual void handle(const kir::InitMagicZero*);
virtual void handle(const kir::UpdateMagicZero*);
virtual void handle(const kir::ForLoop*);
virtual void handle(const kir::IfThenElse*);
virtual void handle(const kir::GridReduction*);
virtual void handle(const kir::GroupedGridReduction*);
virtual void handle(const kir::GridBroadcast*);
virtual void handle(const kir::GridWelford*);
virtual void handle(const kir::GroupedGridWelford*);
virtual void handle(const kir::AllocateFusedReduction*);
virtual void handle(const kir::Swizzle2DInt*);
virtual void handle(const kir::PairSelect*);
};
class TORCH_CUDA_CU_API OptOutDispatch : public PolymorphicBase {
protected:
virtual void unhandled(Statement*);
public:
// Hierarchal dispatch functions for handle
virtual void handle(Statement*);
virtual void handle(Expr*);
virtual void handle(Val*);
// Vals
virtual void handle(Bool* stmt);
virtual void handle(Double* stmt);
virtual void handle(Int* stmt);
virtual void handle(ComplexDouble* stmt);
virtual void handle(NamedScalar* stmt);
virtual void handle(IterDomain* stmt);
virtual void handle(TensorDomain* stmt);
virtual void handle(TensorView* stmt);
virtual void handle(kir::Predicate*);
virtual void handle(kir::TensorIndex*);
virtual void handle(kir::IntPair*);
// Exprs
virtual void handle(FullOp* stmt);
virtual void handle(ARangeOp* stmt);
virtual void handle(EyeOp* stmt);
virtual void handle(UnaryOp* stmt);
virtual void handle(BinaryOp* stmt);
virtual void handle(TernaryOp* stmt);
virtual void handle(RNGOp* stmt);
virtual void handle(ReductionOp* stmt);
virtual void handle(GroupedReductionOp* stmt);
virtual void handle(WelfordOp* stmt);
virtual void handle(GroupedWelfordOp* stmt);
virtual void handle(LoadStoreOp* stmt);
virtual void handle(MmaOp* stmt);
virtual void handle(BroadcastOp* stmt);
virtual void handle(Split* stmt);
virtual void handle(Merge* stmt);
virtual void handle(Swizzle2D* stmt);
virtual void handle(TransposeOp* stmt);
virtual void handle(ExpandOp* stmt);
virtual void handle(ShiftOp* stmt);
virtual void handle(GatherOp* stmt);
virtual void handle(ViewAsScalar* stmt);
virtual void handle(ViewOp* stmt);
virtual void handle(kir::Allocate* stmt);
virtual void handle(kir::BlockSync* stmt);
virtual void handle(kir::GridSync* stmt);
virtual void handle(kir::CpAsyncWait* stmt);
virtual void handle(kir::CpAsyncCommit* stmt);
virtual void handle(kir::InitMagicZero* stmt);
virtual void handle(kir::UpdateMagicZero* stmt);
virtual void handle(kir::ForLoop* stmt);
virtual void handle(kir::IfThenElse* stmt);
virtual void handle(kir::GridReduction* stmt);
virtual void handle(kir::GroupedGridReduction* stmt);
virtual void handle(kir::GridBroadcast* stmt);
virtual void handle(kir::GridWelford* stmt);
virtual void handle(kir::GroupedGridWelford* stmt);
virtual void handle(kir::AllocateFusedReduction* stmt);
virtual void handle(kir::Swizzle2DInt* stmt);
virtual void handle(kir::PairSelect* stmt);
};
class TORCH_CUDA_CU_API OptInConstDispatch : public OptOutConstDispatch {
public:
using OptOutConstDispatch::handle;
protected:
virtual void unhandled(const Statement* stmt) final;
};
class TORCH_CUDA_CU_API OptInDispatch : public OptOutDispatch {
public:
using OptOutDispatch::handle;
protected:
virtual void unhandled(Statement* stmt) final;
};
// Class to perform mutations on Fusion IR. Exprs can simply be redefined, but
// when mutating values they have to be registered through registerMutation so
// that exprs can detect there's been a muatation and know to modify all
// instances of that Val. This means each Val should be mutated "consistently".
// Otherwise behavior may be difficult to understand as it depends on which
// order mutate is called in. This class expects user to topologically call the
// statments of interest so inputs are called and mutated before exprs depending
// on them.
//
// Warning: TensorViews need to be treated carefully. As we don't generally
// register their mutation when their tensor domains only change. If a TV needs
// to be swapped out, it needs to be registered as a "proper" mutation like
// other vals, on top of TensorDomain being updated in the mutated TensorView.
//
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API OptOutMutator : public PolymorphicBase {
public:
// Hierarchal dispatch functions for handle
virtual void mutate(Statement* s);
virtual void mutate(Expr* e);
virtual void mutate(Val* v);
void registerMutation(Val* val, Val* mutation);
Val* maybeMutated(Val* val) {
if (mutations.find(val) == mutations.end()) {
return val;
}
return mutations.at(val);
}
std::unordered_map<Val*, Val*> mutations;
//****Functions below defined in mutator.cpp*****
// Vals
virtual void mutate(Bool*);
virtual void mutate(Double*);
virtual void mutate(Int*);
virtual void mutate(ComplexDouble*);
virtual void mutate(NamedScalar*);
virtual void mutate(IterDomain*);
virtual void mutate(TensorDomain*);
virtual void mutate(TensorView*);
virtual void mutate(kir::Predicate*);
virtual void mutate(kir::TensorIndex*);
virtual void mutate(kir::IntPair*);
// Exprs
virtual void mutate(FullOp*);
virtual void mutate(ARangeOp*);
virtual void mutate(EyeOp*);
virtual void mutate(UnaryOp*);
virtual void mutate(BinaryOp*);
virtual void mutate(TernaryOp*);
virtual void mutate(RNGOp*);
virtual void mutate(ReductionOp*);
virtual void mutate(GroupedReductionOp*);
virtual void mutate(WelfordOp*);
virtual void mutate(GroupedWelfordOp*);
virtual void mutate(LoadStoreOp*);
virtual void mutate(MmaOp*);
virtual void mutate(BroadcastOp*);
virtual void mutate(Split*);
virtual void mutate(Merge*);
virtual void mutate(Swizzle2D*);
virtual void mutate(TransposeOp*);
virtual void mutate(ExpandOp*);
virtual void mutate(ShiftOp*);
virtual void mutate(GatherOp*);
virtual void mutate(ViewAsScalar*);
virtual void mutate(ViewOp*);
virtual void mutate(kir::Allocate*);
virtual void mutate(kir::BlockSync*);
virtual void mutate(kir::GridSync*);
virtual void mutate(kir::CpAsyncWait*);
virtual void mutate(kir::CpAsyncCommit*);
virtual void mutate(kir::InitMagicZero*);
virtual void mutate(kir::UpdateMagicZero*);
virtual void mutate(kir::ForLoop*);
virtual void mutate(kir::IfThenElse*);
virtual void mutate(kir::GridReduction*);
virtual void mutate(kir::GroupedGridReduction*);
virtual void mutate(kir::GridBroadcast*);
virtual void mutate(kir::GridWelford*);
virtual void mutate(kir::GroupedGridWelford*);
virtual void mutate(kir::AllocateFusedReduction*);
virtual void mutate(kir::Swizzle2DInt*);
virtual void mutate(kir::PairSelect*);
protected:
void removeExpr(IrContainer*, Expr*);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 12,148
| 31.055409
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/dynamic_type.h
|
#pragma once
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <c10/util/variant.h>
#include <cmath>
#include <iostream>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class TORCH_CUDA_CU_API IntOrDouble {
c10::variant<double, int64_t> value_;
public:
IntOrDouble(int64_t i) : value_(i) {}
IntOrDouble(double d) : value_(d) {}
IntOrDouble(int i) : value_((int64_t)i) {}
IntOrDouble(size_t i) : value_((int64_t)i) {}
IntOrDouble() : IntOrDouble(0) {}
// Avoid using copy constructor of c10::variant as it's
// deprecated.
IntOrDouble(const IntOrDouble& other) {
value_ = other.value_;
}
// Explicitly define copy assignment operator as its implicit definition is
// deprecated
IntOrDouble& operator=(const IntOrDouble& other) {
value_ = other.value_;
return *this;
}
bool is_int() const {
return c10::holds_alternative<int64_t>(value_);
}
template <typename T>
T as() const {
TORCH_CHECK(
c10::holds_alternative<T>(value_),
"The expected dtype and the actual dtype does not match in IntOrDouble");
return c10::get<T>(value_);
}
template <typename T>
T cast() const;
#define DEFINE_ARITHMETIC_OP(op) \
IntOrDouble operator op(const IntOrDouble& other) const { \
switch ((int)is_int() << 1 | (int)other.is_int()) { \
case 0b00: \
return IntOrDouble(as<double>() op other.as<double>()); \
case 0b01: \
return IntOrDouble(as<double>() op other.as<int64_t>()); \
case 0b10: \
return IntOrDouble(as<int64_t>() op other.as<double>()); \
case 0b11: \
return IntOrDouble(as<int64_t>() op other.as<int64_t>()); \
} \
TORCH_INTERNAL_ASSERT(false); \
} \
template <typename T> \
IntOrDouble operator op(T other) const { \
if (is_int()) { \
return IntOrDouble(as<int64_t>() op other); \
} \
return IntOrDouble(as<double>() op other); \
}
DEFINE_ARITHMETIC_OP(+)
DEFINE_ARITHMETIC_OP(-)
DEFINE_ARITHMETIC_OP(*)
DEFINE_ARITHMETIC_OP(/)
DEFINE_ARITHMETIC_OP(&&)
#undef DEFINE_ARITHMETIC_OP
#define DEFINE_ASSIGN_OP(assign, op) \
IntOrDouble& operator assign(const IntOrDouble& other) { \
switch ((int)is_int() << 1 | (int)other.is_int()) { \
case 0b00: \
return *this = IntOrDouble(as<double>() op other.as<double>()); \
case 0b01: \
return *this = IntOrDouble(as<double>() op other.as<int64_t>()); \
case 0b10: \
return *this = IntOrDouble(as<int64_t>() op other.as<double>()); \
case 0b11: \
return *this = IntOrDouble(as<int64_t>() op other.as<int64_t>()); \
} \
TORCH_INTERNAL_ASSERT(false); \
} \
template <typename T> \
IntOrDouble& operator assign(T other) { \
if (is_int()) { \
return *this = IntOrDouble(as<int64_t>() op other); \
} \
return *this = IntOrDouble(as<double>() op other); \
}
DEFINE_ASSIGN_OP(+=, +)
DEFINE_ASSIGN_OP(-=, -)
DEFINE_ASSIGN_OP(*=, *)
DEFINE_ASSIGN_OP(/=, /)
#undef DEFINE_ASSIGN_OP
IntOrDouble operator%(const IntOrDouble& other) const {
if (is_int() && other.is_int()) {
return IntOrDouble(as<int64_t>() % other.as<int64_t>());
}
TORCH_INTERNAL_ASSERT(false);
}
IntOrDouble operator%(int64_t other) const {
if (is_int()) {
return IntOrDouble(as<int64_t>() % other);
}
TORCH_INTERNAL_ASSERT(false);
}
IntOrDouble& operator%=(const IntOrDouble& other) {
if (is_int() && other.is_int()) {
return *this = IntOrDouble(as<int64_t>() % other.as<int64_t>());
}
TORCH_INTERNAL_ASSERT(false);
}
IntOrDouble& operator%=(int64_t other) {
if (is_int()) {
return *this = IntOrDouble(as<int64_t>() % other);
}
TORCH_INTERNAL_ASSERT(false);
}
#define DEFINE_COMPARE_OP(op) \
bool operator op(const IntOrDouble& other) const { \
switch ((int)is_int() << 1 | (int)other.is_int()) { \
case 0b00: \
return as<double>() op other.as<double>(); \
case 0b01: \
return as<double>() op other.as<int64_t>(); \
case 0b10: \
return as<int64_t>() op other.as<double>(); \
case 0b11: \
return as<int64_t>() op other.as<int64_t>(); \
} \
TORCH_INTERNAL_ASSERT(false); \
} \
bool operator op(double other) { \
if (is_int()) { \
return as<int64_t>() op other; \
} \
return as<double>() op other; \
} \
bool operator op(int64_t other) { \
if (is_int()) { \
return as<int64_t>() op other; \
} \
return as<double>() op other; \
} \
bool operator op(int other) { \
if (is_int()) { \
return as<int64_t>() op other; \
} \
return as<double>() op other; \
}
DEFINE_COMPARE_OP(>)
DEFINE_COMPARE_OP(>=)
DEFINE_COMPARE_OP(<)
DEFINE_COMPARE_OP(<=)
DEFINE_COMPARE_OP(==)
DEFINE_COMPARE_OP(!=)
#undef DEFINE_COMPARE_OP
IntOrDouble operator-() const {
if (is_int()) {
return IntOrDouble(-as<int64_t>());
}
return IntOrDouble(-as<double>());
}
explicit operator double() const;
explicit operator int64_t() const;
explicit operator size_t() const;
explicit operator int() const;
};
#define DEFINE_ARITHMETIC_OP(op) \
template <typename T> \
inline IntOrDouble operator op(T lhs, IntOrDouble rhs) { \
if (rhs.is_int()) { \
return IntOrDouble(lhs op rhs.as<int64_t>()); \
} \
return IntOrDouble(lhs op rhs.as<double>()); \
}
DEFINE_ARITHMETIC_OP(+)
DEFINE_ARITHMETIC_OP(-)
DEFINE_ARITHMETIC_OP(*)
DEFINE_ARITHMETIC_OP(/)
#undef DEFINE_ARITHMETIC_OP
template <>
inline double IntOrDouble::cast<double>() const {
if (is_int()) {
return (double)as<int64_t>();
}
return as<double>();
}
template <>
inline int64_t IntOrDouble::cast<int64_t>() const {
if (!is_int()) {
return (int64_t)as<double>();
}
return as<int64_t>();
}
inline IntOrDouble::operator double() const {
return as<double>();
}
inline IntOrDouble::operator int64_t() const {
return as<int64_t>();
}
inline IntOrDouble::operator size_t() const {
return as<int64_t>();
}
inline IntOrDouble::operator int() const {
return as<int64_t>();
}
#define DEFINE_EQ_OP(op) \
inline bool operator op(double lhs, const IntOrDouble& rhs) { \
if (rhs.is_int()) { \
return false; \
} \
return lhs op rhs.as<double>(); \
} \
\
inline bool operator op(int64_t lhs, const IntOrDouble& rhs) { \
if (rhs.is_int()) { \
return lhs op rhs.as<int64_t>(); \
} \
return false; \
} \
\
inline bool operator op(int lhs, const IntOrDouble& rhs) { \
return operator op((int64_t)lhs, rhs); \
}
DEFINE_EQ_OP(==)
DEFINE_EQ_OP(!=)
#undef DEFINE_EQ_OP
inline std::ostream& operator<<(std::ostream& os, const IntOrDouble& val) {
if (val.is_int()) {
return os << val.as<int64_t>();
}
return os << val.as<double>();
}
namespace IntOrDouble_functions {
inline IntOrDouble ceildiv(const IntOrDouble& a, const IntOrDouble& b) {
if (a.is_int() && b.is_int()) {
auto aa = a.as<int64_t>();
auto bb = b.as<int64_t>();
if (bb > 0) {
return (aa + bb - 1) / bb;
} else {
return (aa + bb + 1) / bb;
}
}
return std::ceil((a / b).as<double>());
}
inline IntOrDouble max(const IntOrDouble& a, const IntOrDouble& b) {
if (a.is_int() && b.is_int()) {
return std::max(a.as<int64_t>(), b.as<int64_t>());
}
return (a > b ? a : b).cast<double>();
}
inline IntOrDouble min(const IntOrDouble& a, const IntOrDouble& b) {
if (a.is_int() && b.is_int()) {
return std::min(a.as<int64_t>(), b.as<int64_t>());
}
return (a < b ? a : b).cast<double>();
}
inline IntOrDouble abs(const IntOrDouble& a) {
if (a.is_int()) {
return IntOrDouble(std::abs(a.as<int64_t>()));
} else {
return IntOrDouble(std::abs(a.as<double>()));
}
}
} // namespace IntOrDouble_functions
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 10,968
| 34.044728
| 81
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/evaluator_common.h
|
#pragma once
#include <dynamic_type.h>
#include <executor_kernel_arg.h>
#include <executor_launch_params.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <lower2device.h>
#include <c10/core/DeviceType.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! This is the common space for expression evaluators in
//! fusion IR and kernel IR context. Much of the evaluator
//! optimizations and runtimes could share the same code
//! path and they could be collected here.
class ExpressionEvaluator;
namespace kir {
class ExpressionEvaluator;
} // namespace kir
//! IR Contexts to be passed to generic evaluator optimizations
//! and runtimes. Defines the essential interface for the
//! generic logic to get necessary type and function info
//! from the IR nodes. Generic optimizations will assume
//! the same list of static definitions are provided
//! in each of the contexts, just FusionIR and KernelIR
//! currently.
//! Context for using generic logic on FusionIR
class FusionIRContext {
public:
using TV_TYPE = TensorView;
using EVALUATOR_TYPE = ExpressionEvaluator;
static BinaryOpType getOpType(BinaryOp* bop) {
return bop->getBinaryOpType();
}
static UnaryOpType getOpType(UnaryOp* uop) {
return uop->getUnaryOpType();
}
};
//! Context for using generic logic on KernelIR
class KernelIRContext {
public:
using EVALUATOR_TYPE = kir::ExpressionEvaluator;
static BinaryOpType getOpType(BinaryOp* bop) {
return bop->getBinaryOpType();
}
static UnaryOpType getOpType(UnaryOp* uop) {
return uop->getUnaryOpType();
}
};
template <typename IRContext>
class PrecomputedValuesBase;
//! NaiveValueMachine:
//! This is an un-optimized runtime for evaluating a
//! set of values in one run. The runtime contains
//! a vector of instructions inferred from IR at compile-time
//! and it currently must be associated with an instance of
//! PrecomputedValuesBase that will provide the workspace
//! containing the concrete values for the values.
template <typename IRContext>
class NaiveValueMachine {
//! The generic types of instructions supported for this
//! machine, currently only binary and unary.
enum class InstructionType { UNARY_OP, BINARY_OP };
public:
//! Constructor lowers all the expr IR nodes stored in precomputed_values
//! and stores them in the private state.
NaiveValueMachine(PrecomputedValuesBase<IRContext>& precomputed_values);
//! Runs all the instructions and write results to the associated
//! precomputed_values.
void run();
private:
//! Convert an unary IR expr to an instruction
void makeUnaryOp(UnaryOp* uop);
//! Convert an binary IR expr to an instruction
void makeBinaryOp(BinaryOp* bop);
//! Create an empty instruction with all default values
//! and place it at the end of the instruction buffer.
int makeInstructionEntry();
//! Run a single instruction at the given index of
//! the instruction buffer. Decodes and dispatches
//! to the corresponding instruction handle functions.
void runInstruction(int index);
//! Runs a unary operation at given index of instruction buffer
void runUnaryOp(int index);
//! Runs a binary operation at given index of instruction buffer
void runBinaryOp(int index);
private:
friend PrecomputedValuesBase<IRContext>;
//! Reference to the PrecomputedValues workspace associated with
//! this runtime. All the instructions will read and write the
//! values in this workspace.
PrecomputedValuesBase<IRContext>& precomputed_values_;
//! Instruction buffer. All states are in separate vectors and
//! the entry of each vector at the same index correspond to
//! the same instruction.
//! Total number of instructions
int num_of_instructions_ = 0;
//! Machine instruction type for each instruction i.e.
//! unary or binary
std::vector<InstructionType> inst_type_;
//! Unary operator type if applicable, contains a default
//! value at each index corresponding to a binary op.
std::vector<UnaryOpType> uop_type_;
//! Data type for unary op of type UnaryOpType::Cast, contains a default
//! value at each index corresponding other ops.
std::vector<DataType> data_type_;
//! Unary operator type if applicable, contains a default
//! value at each index corresponding to a unary op.
std::vector<BinaryOpType> bop_type_;
//! Indexes of operands and destination of each instruction.
//! The indexes corresponds to positions in the workspace
//! where concrete values are hosted.
//! Operand 0 of each instruction.
std::vector<int> src0_;
//! Operand 1 of each instruction, a default value at
//! each index corresponding to a unary op.
std::vector<int> src1_;
//! Destination of each instruction.
std::vector<int> dest_;
};
//! PrecomputedValuesBase:
//! A class to support optimized evaluation of values
//! at runtime.
//! At compile time all necessary values are collected
//! from given IR nodes and a runtime and a workspace containing
//! the concrete values is created and pre-allocated.
//! At runtime the value vm is used to evaluate all the
//! values and store them in the workspace ahead of time.
template <typename IRContext>
class PrecomputedValuesBase {
using VALUE_MACHINE = NaiveValueMachine<IRContext>;
public:
explicit PrecomputedValuesBase() = default;
//! Returns if the workspace contains evaluated results.
bool ready() {
return has_valid_values_;
}
//! Runs the internal value machine that will compute
//! the values allocated in the workspace.
void evaluate();
//! Returns value for the given IR node if it's stored
//! in the workspace and has been evaluated.
c10::optional<IntOrDouble> getMaybeValueFor(const Val* val);
//! Debugging helper, prints all the currently known values
void print() const;
protected:
//! Initialize the workspace before first use.
//! Assume the given value list IR nodes have
//! been topologically sorted.
void initializeValueList(
typename IRContext::EVALUATOR_TYPE& evaluator,
const std::vector<Val*>& sorted_value_list);
//! Bind concrete value to the given index
//! if the index is valid.
void bindValue(int index, IntOrDouble value) {
if (index < 0 || is_constant_[index]) {
return;
}
defined_[index] = true;
values_[index] = value;
binding_log_.emplace_back(index, value);
}
//! Invalidate all computed values in the workspace.
void invalidate();
//! Interface for subclasses to access symbols_
void loadSymbols(std::vector<Val*> symbols) {
symbols_ = std::move(symbols);
}
//! Interface for subclasses to access symbols_
std::vector<Val*>& symbols() {
return symbols_;
}
//! Initialize the value runtime that will
//! infer instructions from the workspace.
void initializeIntegerMachine() {
value_machine_ = std::make_unique<VALUE_MACHINE>(*this);
}
bool hasValidValues() {
return has_valid_values_;
}
private:
//! Post evaluation check, throws if any computed value
//! is inconsistent with its bound value
void validate();
//! Returns true if workspace has a computed or constant
//! value for given index.
bool hasValue(int index) {
TORCH_INTERNAL_ASSERT(index > 0);
return defined_[index] || is_constant_[index];
}
private:
friend VALUE_MACHINE;
//! Marks if an evaluation has finished
bool has_valid_values_ = false;
//! The size of workspace
int num_of_values_ = -1;
//! Marks if a value has been bound or
//! computed at each index.
std::vector<bool> defined_;
//! Marks if a value is compile-time constant
//! at each index.
std::vector<bool> is_constant_;
//! Stores the concrete values at each index.
std::vector<IntOrDouble> values_;
//! Stores the IR nodes corresponding to each index.
std::vector<Val*> symbols_;
//! An internal log to keep track of all the bindings
//! used in each evaluation cycle. To be used for
//! consistency check.
std::vector<std::pair<int, IntOrDouble>> binding_log_;
//! Integer runtime for realizing the values computations.
std::unique_ptr<VALUE_MACHINE> value_machine_;
};
//! PrecomputedValues workspace in Fusion IR context,
//! defines the set of values to be collected in each
//! fusion graph and the input value binding given each
//! fusion runtime input.
class FusionPrecomputedValues : public PrecomputedValuesBase<FusionIRContext> {
using precomputedValuesBaseType = PrecomputedValuesBase<FusionIRContext>;
public:
FusionPrecomputedValues(Fusion* fusion);
//! Bind concrete values from fusion runtime inputs
void bindFusionInputs(const KernelArgumentHolder& args);
private:
void bindTensorMetaData(
TensorView* tv,
const TensorArgAbstract* tensor_arg_abstract);
private:
Fusion* fusion_ = nullptr;
};
//! PrecomputedValues workspace in Fusion IR context,
//! defines the set of values to be collected in each
//! kernel IR sequence and the input value binding given each
//! fusion runtime input and launch constraints.
class KernelPrecomputedValues : public PrecomputedValuesBase<KernelIRContext> {
using precomputedValuesBaseType = PrecomputedValuesBase<KernelIRContext>;
public:
using ParallelExtentMap =
std::unordered_map<ParallelType, std::vector<const Val*>, TypeHash>;
KernelPrecomputedValues(kir::Kernel* kernel);
//! Bind concrete values from fusion runtime inputs
void bindKernelInputs(kir::Kernel* kernel, const KernelArgumentHolder& args);
//! Bind concrete values from launch constraints
void bindParallelExtents(
const ParallelExtentMap& parallel_extents,
const LaunchParams& launch_constraint);
//! Bind the NamedScalars corresponding to the
//! concrete parallel dimension sizes after the
//! actual value has been resolved.
void bindConcreteParallelTypeValue(ParallelType pt, int64_t value);
private:
void bindTensorMetaData(
TensorView* tv,
const TensorArgAbstract* tensor_arg_abstract);
//! Iterate through all the named scalars corresponding
//! to thread sizes and pre-group them by their parallel
//! types.
void initializeNamedScalars();
private:
//! Contains all the named scalars correspond
//! to thread size of each parallel type.
std::unordered_map<ParallelType, std::unique_ptr<std::vector<int>>, TypeHash>
thread_dim_value_indices_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 10,558
| 29.694767
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/executor.h
|
#pragma once
#include <executor_launch_params.h>
#include <executor_utils.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <ir_cloner.h>
#include <ir_printer.h>
#include <kernel_expr_evaluator.h>
#include <lower2device.h>
#include <utils.h>
#include <c10/core/DeviceType.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
TORCH_CUDA_CU_API bool shouldFillAllocationWithNan();
TORCH_CUDA_CU_API void setFillAllocationWithNan(bool value);
// TODO: Should this actually be in launch params?
struct TORCH_CUDA_CU_API CompileOptions {
c10::Device device = c10::Device(c10::DeviceType::CUDA, 0);
KernelIndexMode index_mode = KernelIndexMode::INT64;
};
class TORCH_CUDA_CU_API FusionExecutor : public NonCopyable {
public:
// Unsafe compilation that's useful for debugging kernels, iterating over
// slight modifications of a generated kernel
void debugCompileFusionFromStr(
Fusion* fusion,
const std::string& code,
const std::string& name,
int id,
CompileOptions options = CompileOptions());
//! infers output sizes via returning non-allocated KernelArgumentHolder.
//! this function is useful for async compilation for segmented fusion
KernelArgumentHolder inferOutputSizes(
const KernelArgumentHolder& args,
const LaunchParams& launch_constraints);
void compileFusion(
Fusion* fusion,
const KernelArgumentHolder& args,
const LaunchParams& launch_constraints = LaunchParams());
// TODO: merge it with the overload above.
//! This API is merely here so we don't have to go back and update all cpp
//! tests.
void compileFusion(
Fusion* fusion,
const at::ArrayRef<IValue>& inputs = {},
const LaunchParams& launch_constraints = LaunchParams()) {
KernelArgumentHolder args =
KernelArgumentHolder::createKernelArgumentHolder(inputs);
compileFusion(fusion, args, launch_constraints);
}
std::vector<at::Tensor> runFusion(
KernelArgumentHolder& args,
const LaunchParams& launch_constraints = LaunchParams(),
const std::vector<at::Tensor>& outputs = {});
std::vector<at::Tensor> runFusion(
const at::ArrayRef<IValue>& inputs,
const std::vector<at::Tensor>& outputs,
const LaunchParams& launch_constraints = LaunchParams(),
const c10::optional<size_t>& opt_code = c10::nullopt) {
KernelArgumentHolder args =
KernelArgumentHolder::createKernelArgumentHolder(inputs);
if (opt_code.has_value()) {
args.setCacheId(*opt_code);
}
return runFusion(args, launch_constraints, outputs);
}
std::vector<at::Tensor> runFusion(
const at::ArrayRef<IValue>& inputs,
const LaunchParams& launch_constraints = LaunchParams(),
const c10::optional<size_t>& opt_code = c10::nullopt) {
return runFusion(inputs, {}, launch_constraints, opt_code);
}
// function to query whether a `FusionExecutor` has a compiled kernel to
// execute
bool compiled() const {
return fusion_id_ != -1 && lowered_;
};
void evictCache(size_t cache_id) {
executor_entry_lookup_.erase(cache_id);
}
// struct used to hold necessary information to launch compiled kernel on a
// given input set.
//
// TODO: strides would also be important when we handle permutations in
// codegen.
//
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct ExecutorEntry {
bool init = false;
LaunchParams launch_params;
std::vector<std::pair<int, int>> io_alias_indices;
std::vector<std::vector<int64_t>> output_sizes;
std::vector<std::vector<int64_t>> output_strides;
std::vector<at::ScalarType> output_types;
std::vector<std::vector<int64_t>> buffer_sizes;
std::vector<at::ScalarType> buffer_types;
std::vector<bool> buffer_zero_init;
uint64_t rand_offset;
};
using ExecutorCompileTimeInfoCache =
executor_utils::caching::ExecutorCompileTimeInfoCache;
kir::Kernel* kernel() const {
TORCH_INTERNAL_ASSERT(lowered_);
return lowered_->kernel();
}
//! Internal knob used for debugging/profiling only
void setExecuteKernelFlag(bool execute_kernel) {
execute_kernel_ = execute_kernel;
}
//! Internal knob used for debugging/profiling only
void setMeasureKernelTimeFlag(bool measure_kernel_time) {
measure_kernel_time_ = measure_kernel_time;
}
//! Returns the last kernel execution time, in milliseconds
//!
//! \note The kernel time is only tracked if enabled by calling
//! setMeasureKernelTimeFlag(true)
//!
float kernelTimeMs() const {
return measure_kernel_time_ ? kernel_time_ms_ : 0;
}
//! Returns the number of bytes processed last kernel execution
int64_t bytesProcessed() const {
return bytes_processed_;
}
//! Returns the launch parameters from the last kernel execution
LaunchParams lastLaunchParams() const {
return launch_params_;
}
//! Returns the string of the compiled kernel
std::string kernelString() const {
return kernel_code_;
}
//! Returns the latest compile log
std::string compilerLog() const {
return last_compiler_log_;
}
std::string kernelName() const {
std::stringstream ss;
ss << "kernel" << fusion_id_;
return ss.str();
}
//! Internal tests only. Compiles CUDA code with NVRTC directly from
//! string. This util provides a path to test runtime code, i.e. the resource
//! strings.
void compileRtc(
const std::string& code,
const std::string& name,
bool structured = false,
CompileOptions options = CompileOptions());
//! Internal tests only. Runs the compiled CUDA kernel from compileRtc.
void runRtc(
const LaunchParams& launch_params,
const std::vector<at::Tensor>& args);
//! Internal knob used for debugging/profiling only
void disableLaunchParamCache() {
disable_parameter_cache_ = true;
}
private:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct GlobalBuffers {
std::vector<at::Tensor> buffers;
std::vector<bool> zero_init;
at::Tensor profile_buffer;
};
static std::string kernelNamespace() {
return "CudaCodeGen";
}
// Add preamble and wrap in namespace
std::string getStructuredCode(const std::string& kernel);
LaunchParams computeLaunchParams(
const LaunchParams& launch_constraints,
kir::ExpressionEvaluator& expr_eval,
const int warp_size);
uint64_t computeSharedMemory(
kir::ExpressionEvaluator& expr_eval,
const std::vector<const kir::Allocate*>& buffers,
bool align_padding = false,
uint64_t total = 0);
// return a pair of vector of tensors, where tensors in the first vector are
// not initialized, while the second vector contains zero-initiliazed tensors
GlobalBuffers allocGlobalVals(kir::ExpressionEvaluator& expr_eval);
// alias_index: index of outputs that are aliases to inputs, hence we should
// skip allocating real storage for those, but still maintain its spot to
// maintain the indexing from output aliases to inputs
std::vector<at::Tensor> allocOutputs(
const KernelArgumentHolder& args,
kir::ExpressionEvaluator& expr_eval,
const std::unordered_set<int>& alias_indices = {});
void setUsedTVs();
const std::vector<TensorView*>& getUsedTVs() const {
return used_tvs_;
};
ExecutorCompileTimeInfoCache* compileTimeDataCache() {
return &compile_time_info_cache_;
}
//! returns KernelArgumentHolder representing the output sizes from kernel
//! execution. Note: 1. this API would ignoring aliased outputs and instead
//! pushing scalar int 0 as a place holder; 2. this API doesn't actually
//! allocate output in memory, but rather is used just to infer output sizes.
KernelArgumentHolder evaluateOutputSizes(
const KernelArgumentHolder& args,
kir::ExpressionEvaluator& expr_eval,
const std::unordered_set<int>& alias_indices = {});
private:
CompileOptions options_;
//! Current configured total shared mem size from cudaDeviceProp
size_t configured_device_smem_ = std::numeric_limits<size_t>().max();
//! Available shared memory space for dynamic allocation for the current
//! compiled kernel at the current shared memory/L1 configuration
c10::optional<size_t> maybe_available_dynamic_smem_ = c10::nullopt;
//! Absolute limit of all available shared mem space from cudaDeviceProp
size_t device_smem_limit_ = std::numeric_limits<size_t>().max();
// Assuming sm70 or above:
// limit of statically allocated smem is 48 KB:
// See:
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-8-x
const uint64_t max_static_smem_ = 48 << 10;
int warp_size_ = 0;
executor_utils::NvrtcFunction compiled_kernel_;
// TensorViews actually used in the kernel.
std::vector<TensorView*> used_tvs_;
// Counter to be used for kernel name.
int fusion_id_ = -1;
static int fusion_id_counter_;
std::unique_ptr<GpuLower> lowered_;
// Copy of lowered_->kernel()
Fusion* fusion_ = nullptr;
// Track the block size this kernel was compiled with. If the block size
// increases, recompile to adjust maxregister count.
int64_t block_size_high_water_mark = 1;
// lookup table to take short cut to retrieve recorded information in order to
// launch kernels without re-inference parameters.
std::unordered_map<size_t, ExecutorEntry> executor_entry_lookup_;
// Compile time information caching. This is used for shape inference
// support. The cache stores graph information that are available
// without shape information so that each shape inference call will
// not need to re-compute them.
ExecutorCompileTimeInfoCache compile_time_info_cache_;
// Cached expr eval
std::unique_ptr<KernelPrecomputedValues> evaluator_precomputed_values_ =
nullptr;
// Profiling support: knob to control wheter we actually execute the
// kernel on the GPU or not
bool execute_kernel_ = true;
// Profiling support: knob to enable measuring kernel execution time
bool measure_kernel_time_ = false;
// Profiling support: the last kernel execution time, if measure_kernel_time_
// is true
float kernel_time_ms_ = 0;
// Profiling support: the last kernel Bytes processed
int64_t bytes_processed_ = 0;
// Profiling support: the last launch param used
LaunchParams launch_params_;
// Profiling support: disable caching of launch params and output allocation
// output allocation is also disable when output sizes are dependent on
// runtime scalar inputs, such as for the case of tensor factory. see
// https://github.com/csarofeen/pytorch/issues/2002
bool disable_parameter_cache_ = false;
// Profiling support: kept copy of the cuda kernel
std::string kernel_code_;
// Profiling support: nvrtc log for debugging
std::string last_compiler_log_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 11,047
| 32.377644
| 87
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/executor_kernel_arg.h
|
#pragma once
#include <ATen/core/ivalue.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/util/Exception.h>
#include <type.h>
#include <torch/csrc/jit/ir/ir.h>
#include <array>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// This should match the tensor used in the code generation (almost exactly)
template <typename T, int N, typename nvfuser_index_t>
struct TensorArgCodegen {
T& operator[](nvfuser_index_t ind) {
return data[ind];
};
T* data;
std::array<nvfuser_index_t, N> size;
std::array<nvfuser_index_t, N> stride;
constexpr int nDims() const {
return N;
}
void setSize(int i, nvfuser_index_t s) {
size[i] = s;
}
void setStride(int i, nvfuser_index_t s) {
stride[i] = s;
}
nvfuser_index_t getSize(int i) const {
return size[i];
}
nvfuser_index_t getStride(int i) const {
return stride[i];
}
};
// 0-Dim GPU based tensor
template <typename T, typename nvfuser_index_t>
struct TensorArgCodegen<T, 0, nvfuser_index_t> {
T& operator[](nvfuser_index_t ind) {
return data[ind];
};
T* data;
constexpr int nDims() const {
return 0;
}
void setSize(int, nvfuser_index_t) {
TORCH_INTERNAL_ASSERT(false, "Tried to set size of a 0-dim tensor");
}
void setStride(int, nvfuser_index_t) {
TORCH_INTERNAL_ASSERT(false, "Tried to set stride of a 0-dim tensor");
}
nvfuser_index_t getSize(int i) const {
TORCH_INTERNAL_ASSERT(false, "Tried to get size of a 0-dim tensor");
}
nvfuser_index_t getStride(int i) const {
TORCH_INTERNAL_ASSERT(false, "Tried to get stride of a 0-dim tensor");
}
};
// Specialization for 0-dim case that's easy to pass in a CPU based tensor
// without memcpy
template <typename T>
struct CpuScalarTensorCodegen {
T& operator[](int) {
return data;
};
T data;
};
// TODO: macro this and the printer below
enum class ArgType {
PhiloxCudaState,
Long,
Double,
ComplexDouble,
Bool,
Tensor,
CpuScalarTensor
};
inline std::string argTypeToString(ArgType type) {
std::string ret;
switch (type) {
case ArgType::PhiloxCudaState:
ret = "PhiloxCudaState";
break;
case ArgType::Long:
ret = "Long";
break;
case ArgType::Double:
ret = "Double";
break;
case ArgType::ComplexDouble:
ret = "ComplexDouble";
break;
case ArgType::Bool:
ret = "Bool";
break;
case ArgType::Tensor:
ret = "Tensor";
break;
case ArgType::CpuScalarTensor:
ret = "CpuScalarTensor";
break;
}
return ret;
}
struct ArgAbstract {
virtual ~ArgAbstract() = default;
virtual const void* arg() const = 0;
virtual void* arg() = 0;
virtual bool isType(ArgType type) const = 0;
virtual ArgType type() const = 0;
virtual std::unique_ptr<ArgAbstract> copy_unique_ptr() const = 0;
virtual void print() const {
printf("input type: %s\n", argTypeToString(type()).c_str());
};
};
#define DEF_HELPEE_FUNC(TARGET_TYPE, ARG_NAME) \
bool isType(ArgType type) const override { \
return ArgType::TARGET_TYPE == type; \
} \
ArgType type() const override { \
return ArgType::TARGET_TYPE; \
} \
const void* arg() const override { \
return &ARG_NAME; \
} \
void* arg() override { \
return &ARG_NAME; \
} \
std::unique_ptr<ArgAbstract> copy_unique_ptr() const override { \
return std::make_unique<TARGET_TYPE##Arg>(*this); \
}
#define DEF_PRINT_FUNC \
void print() const override { \
std::cout << val_ << std::endl; \
}
struct PhiloxCudaStateArg : public ArgAbstract {
at::PhiloxCudaState val_;
PhiloxCudaStateArg(at::PhiloxCudaState _val) : val_(_val){};
DEF_HELPEE_FUNC(PhiloxCudaState, val_)
};
struct LongArg : public ArgAbstract {
int64_t val_;
explicit LongArg(int64_t _val) : val_(_val) {}
DEF_HELPEE_FUNC(Long, val_)
DEF_PRINT_FUNC
};
struct DoubleArg : public ArgAbstract {
double val_;
explicit DoubleArg(double _val) : val_(_val) {}
DEF_HELPEE_FUNC(Double, val_)
DEF_PRINT_FUNC
};
struct ComplexDoubleArg : public ArgAbstract {
c10::complex<double> val_;
explicit ComplexDoubleArg(c10::complex<double> _val) : val_(_val) {}
DEF_HELPEE_FUNC(ComplexDouble, val_)
DEF_PRINT_FUNC
};
struct BoolArg : public ArgAbstract {
bool val_;
explicit BoolArg(bool _val) : val_(_val) {}
DEF_HELPEE_FUNC(Bool, val_)
DEF_PRINT_FUNC
};
struct TensorArgAbstract : ArgAbstract {
virtual void setSize(int i, int64_t size) = 0;
virtual void setStride(int i, int64_t stride) = 0;
virtual void setPointer(void* ptr) = 0;
virtual void setDataType(DataType data_type) = 0;
virtual void setTensor(at::Tensor tensor) = 0;
virtual int64_t getRank() const = 0;
virtual int64_t getSize(int i) const = 0;
virtual int64_t getStride(int i) const = 0;
virtual void* getPointer() const = 0;
virtual DataType getDataType() const = 0;
virtual int64_t numel() const = 0;
virtual at::Tensor getTensor() const = 0;
// TODO: clean it up and also print out dtype
void print() const override {
auto rank = getRank();
std::cout << "tensor dtype: " << getDataType() << " sizes: (";
for (auto i = 0; i < rank; i++) {
std::cout << getSize(i) << ", ";
}
std::cout << ") stride: (";
for (auto i = 0; i < rank; i++) {
std::cout << getStride(i) << ", ";
}
std::cout << ") pointer: " << getPointer() << std::endl;
}
};
template <typename TENSOR_TYPE, typename nvfuser_index_t>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct TensorArg : public TensorArgAbstract {
TENSOR_TYPE instance_;
// TODO: this is ugly, we should be extracting data type from `instance_`
// instead
DataType data_type_ = DataType::Null;
at::Tensor tensor_;
void setSize(int i, int64_t size) override {
instance_.setSize(i, (nvfuser_index_t)size);
}
void setStride(int i, int64_t stride) override {
instance_.setStride(i, (nvfuser_index_t)stride);
}
void setPointer(void* ptr) override {
instance_.data = static_cast<decltype(TENSOR_TYPE::data)>(ptr);
}
void setDataType(DataType data_type) override {
data_type_ = data_type;
}
void setTensor(at::Tensor tensor) override {
tensor_ = tensor;
}
int64_t getSize(int i) const override {
return instance_.getSize(i);
}
int64_t getStride(int i) const override {
return instance_.getStride(i);
}
int64_t getRank() const override {
return instance_.nDims();
}
void* getPointer() const override {
return instance_.data;
}
DataType getDataType() const override {
return data_type_;
}
at::Tensor getTensor() const override {
return tensor_;
}
int64_t numel() const override {
int64_t ret = 1;
for (auto i : c10::irange(instance_.nDims())) {
ret *= instance_.getSize(i);
}
return ret;
}
DEF_HELPEE_FUNC(Tensor, instance_)
};
template <typename CPU_TENSOR_TYPE>
struct CpuScalarTensorArg : public ArgAbstract {
CPU_TENSOR_TYPE instance_;
CpuScalarTensorArg() = delete;
explicit CpuScalarTensorArg(decltype(CPU_TENSOR_TYPE::data) _data) {
instance_.data = _data;
}
DEF_HELPEE_FUNC(CpuScalarTensor, instance_)
};
// TODO: This class needs some further clean up and refactor
//! KernelArgumentHolder copies meta information from kernel inputs, including
//! tensor sizes/shapes/dtype/memory_ptr and copies scalar inputs. It is used
//! for both compilation as well as kernel execution. The important thing is to
//! strip ownership of tensor from KernelArgumentHolder, so that during async
//! compilation, we are not unnecessarily holding memory that is not needed.
class TORCH_CUDA_CU_API KernelArgumentHolder {
public:
//! create KernelArgumentHolder from c10 inputs. Note that we we not taking
//! the ownership of the memory from the original inputs, but just recording
//! its meta data for kernel execution/compilation.
static KernelArgumentHolder createKernelArgumentHolder(
const c10::ArrayRef<c10::IValue>& inputs);
KernelIndexMode getIndexMode() const {
return index_mode_;
}
explicit KernelArgumentHolder(KernelIndexMode index_mode)
: index_mode_(index_mode) {}
KernelArgumentHolder(const KernelArgumentHolder& self)
: device_index_(self.getDeviceIndex()),
cache_id_(self.getCacheId()),
index_mode_(self.getIndexMode()) {
for (const auto& arg : self.arguments_) {
push(arg.get());
}
}
KernelArgumentHolder& operator=(const KernelArgumentHolder& self) {
device_index_ = self.getDeviceIndex();
index_mode_ = self.getIndexMode();
for (const auto& arg : self.arguments_) {
push(arg.get());
}
return *this;
}
// Push a tensor to the arguments
void push(const at::Tensor& tensor);
// Push a scalar or integer to the arguments
void push(const IValue& val);
void push(const at::PhiloxCudaState& val);
// Create buffer, flatten arguments into it, align by 8 Bytes, return pointers
// in the buffer
void** getBuffer();
void push(const c10::ArrayRef<c10::IValue>& args);
void push(const std::vector<at::Tensor>& tensors);
void push(const ArgAbstract* arg);
void swap(int i, const ArgAbstract* arg);
// push int64
void push(int64_t val);
const ArgAbstract* back() const {
return arguments_.back().get();
}
void appendPhiloxRNGSeed(uint64_t rand_offset);
const ArgAbstract* operator[](int ind) const {
return arguments_.at(ind).get();
};
size_t size() const {
return arguments_.size();
}
bool empty() const {
return arguments_.empty();
}
void setDeviceIndex(int index) {
device_index_ = index;
}
int getDeviceIndex() const {
return device_index_;
}
void setCacheId(size_t id) {
cache_id_ = id;
}
c10::optional<size_t> getCacheId() const {
return cache_id_;
}
void print() const {
for (const auto& arg : arguments_) {
arg->print();
}
}
private:
std::vector<std::unique_ptr<ArgAbstract>> arguments_;
std::vector<void*> void_ptrs_;
bool changed_ = true;
int device_index_ = 0;
c10::optional<size_t> cache_id_ = c10::nullopt;
KernelIndexMode index_mode_ = KernelIndexMode::INT64;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 10,829
| 26.211055
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/executor_launch_params.h
|
#pragma once
#include <type.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class TORCH_CUDA_CU_API LaunchParams {
public:
static constexpr int64_t UNINITIALIZED_VAL = -1;
LaunchParams(
int64_t gdimx = UNINITIALIZED_VAL,
int64_t gdimy = UNINITIALIZED_VAL,
int64_t gdimz = UNINITIALIZED_VAL,
int64_t bdimx = UNINITIALIZED_VAL,
int64_t bdimy = UNINITIALIZED_VAL,
int64_t bdimz = UNINITIALIZED_VAL)
: gdimx_(gdimx),
gdimy_(gdimy),
gdimz_(gdimz),
bdimx_(bdimx),
bdimy_(bdimy),
bdimz_(bdimz) {
assertValid();
}
void assertValid();
void setSmem(int64_t smem) {
smem_ = smem;
}
int64_t smem() const {
return smem_;
}
int64_t nBlocks() const {
return std::abs(gdimx_ * gdimy_ * gdimz_);
}
int64_t nThreads() const {
return std::abs(bdimx_ * bdimy_ * bdimz_);
}
int64_t bdimx() const {
return static_cast<int64_t>(bdimx_ == UNINITIALIZED_VAL ? 1 : bdimx_);
}
int64_t gdimx() const {
return static_cast<int64_t>(gdimx_ == UNINITIALIZED_VAL ? 1 : gdimx_);
}
int64_t bdimy() const {
return static_cast<int64_t>(bdimy_ == UNINITIALIZED_VAL ? 1 : bdimy_);
}
int64_t gdimy() const {
return static_cast<int64_t>(gdimy_ == UNINITIALIZED_VAL ? 1 : gdimy_);
}
int64_t bdimz() const {
return static_cast<int64_t>(bdimz_ == UNINITIALIZED_VAL ? 1 : bdimz_);
}
int64_t gdimz() const {
return static_cast<int64_t>(gdimz_ == UNINITIALIZED_VAL ? 1 : gdimz_);
}
void checkAndSet(
const int64_t incoming_val,
int64_t& class_val,
std::string val) {
TORCH_INTERNAL_ASSERT(
class_val == UNINITIALIZED_VAL || incoming_val == class_val,
"Tried to set ",
val,
" from ",
class_val,
" to ",
incoming_val,
", but it was already set and new value does not match.",
" Thread dims all have to be bound to the same value.");
TORCH_CHECK(
incoming_val > 0,
"Received a thread binding on ",
val,
" that is ",
incoming_val,
". Cannot create negative threads.");
if (class_val == UNINITIALIZED_VAL) {
class_val = incoming_val;
}
assertValid();
}
// Binds dim assocaited with p_type to val
void bind(int64_t val, ParallelType p_type);
// Adjusted value based on get functions above for each value
int64_t getDim(ParallelType p_type) const;
// Returns raw value which may be UNINITIALIZED_VAL
const int64_t& getRawVal(ParallelType p_type) const;
// Returns false if value associated with p_type == UNINITIALIZED_VAL
bool hasDim(ParallelType p_type) const;
bool operator==(const LaunchParams& other) const;
void print() const;
std::string toString() const;
private:
// Spell them out because I want signed ints to know if they were initialized
// or not.
// TODO: convert to c10::optional
int64_t gdimx_ = UNINITIALIZED_VAL;
int64_t gdimy_ = UNINITIALIZED_VAL;
int64_t gdimz_ = UNINITIALIZED_VAL;
int64_t bdimx_ = UNINITIALIZED_VAL;
int64_t bdimy_ = UNINITIALIZED_VAL;
int64_t bdimz_ = UNINITIALIZED_VAL;
int64_t smem_ = 0;
// TODO: Fill in output sizes
std::vector<std::vector<int64_t>> output_sizes;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 3,378
| 23.664234
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/executor_utils.h
|
#pragma once
#include <ATen/core/ivalue.h>
#include <c10/core/DeviceType.h>
#include <c10/util/Exception.h>
#include <cuda.h>
#include <torch/csrc/jit/ir/ir.h>
#include <executor_kernel_arg.h>
#include <expr_evaluator.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <kernel.h>
#include <kernel_expr_evaluator.h>
#include <lower2device.h>
#include <string>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace executor_utils {
// Include all the functions we might need in generated code
std::string kernelPreamble();
void validateKernelInputs(
Fusion* fusion,
const KernelArgumentHolder& args,
const c10::Device& device);
void validateKernelOutputs(
Fusion* fusion,
const std::vector<at::Tensor>& outputs,
const c10::Device& device);
//! Bind kernel input values to runtime values
kir::ExpressionEvaluator bindKernelInputs(
const KernelArgumentHolder& args,
kir::Kernel* kernel,
bool check_consistency = true);
//! Bind fusion input values to runtime values
TORCH_CUDA_CU_API ExpressionEvaluator
bindFusionInputs(const KernelArgumentHolder& args, Fusion* fusion);
struct NvrtcFunction {
CUmodule module = CUmodule();
CUfunction function = CUfunction();
};
// Returns executable function and the ptxas log from compilation
std::pair<NvrtcFunction, std::string> nvrtcCompile(
const std::string& code,
const std::string& func_name,
int id,
c10::optional<int> opt_block_size = c10::nullopt);
namespace caching {
// TODO: Could consider putting some of
// the logic in the common space and re-use
//! List of all the possible entry types in
//! `FusionExecutor` compile-time data cache.
enum class CompileTimeEntryType {
PARALLEL_BINDING_ITERDOMAINS,
PARALLEL_ITER_EXTENT_MAP,
SIMPLIFIED_PARALLEL_ITER_EXTENT_MAP,
WARP_PADDED_PARALLEL_EXTENTS,
VECTORIZED_TENSOR_VALIDATION,
INPUT_ALIAS_INDICES,
OUTPUT_ALIAS_INDICES
};
//! Entry class definitions for each entry type:
//! each class defines the data type for each entry type
//! Compile-time info to be cached in each FusionExecutor:
//! ParallelBindingIterDomains:
//! Stores all the iterdomains that are parallelized
//! on the scheduled Fusion graph. They will be used
//! in launch param iteration and their extents may
//! come from launch constraints.
class ParallelBindingIterDomains {
public:
using DataType = std::vector<IterDomain*>;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::PARALLEL_BINDING_ITERDOMAINS;
};
//! Compile-time info to be cached in each FusionExecutor:
//! ParallelIterExtentMap
//! Stores the symbolic extents of all the parallelized
//! iterdomains corresponding to each used parallel type.
class ParallelIterExtentMap {
public:
using DataType =
std::unordered_map<ParallelType, std::vector<const Val*>, TypeHash>;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::PARALLEL_ITER_EXTENT_MAP;
};
//! Compile-time info to be cached in each FusionExecutor:
//! SimplifiedParallelIterExtentMap
//! This entry type is a simplified version of ParallelIterExtentMap.
//!
//! For launch parameter binding we only need the most concrete iterdomain
//! in each disjoint set stored in CaParallelMap. This entry stores the
//! remaining list of extents for binding after this simplification.
//!
//! We still need ParallelIterExtentMap since we want to bind the concrete
//! values to the extents of all parallelized iterdomains. We would be
//! able to save these bindings if the integer machine has a notion of
//! equality and could be configured compile time. But that'd be a longer
//! term target.
class SimplifiedParallelIterExtentMap {
public:
using DataType =
std::unordered_map<ParallelType, std::vector<const Val*>, TypeHash>;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::SIMPLIFIED_PARALLEL_ITER_EXTENT_MAP;
};
//! WarpPaddedExtentsInfo:
//! Auxiliary data type for entry class WarpPaddedParallelExtents
struct WarpPaddedExtentsInfo {
std::unordered_set<const Val*> warp_padded_extent_set;
std::unordered_map<const Val*, int64_t> warp_padded_constant;
};
//! Compile-time info to be cached in each FusionExecutor:
//! WarpPaddedParallelExtents
//! Stores the symbolic and constant extents of warp
//! padded parallel iterdomains.
class WarpPaddedParallelExtents {
public:
using DataType = WarpPaddedExtentsInfo;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::WARP_PADDED_PARALLEL_EXTENTS;
};
//! VectorizedTensorInfo:
//! Auxiliary data type for entry class VectorizedTensorValidation
struct VectorizedTensorInfo {
//! Aligned vectorized fusion inputs
std::vector<int> aligned_vectorized_inp_tensor_pos;
//! Aligned vectorized fusion outputs
std::vector<int> aligned_vectorized_out_tensor_pos;
//! Misaligned vectorized input tensors
std::unordered_set<TensorView*> global_inp_misaligned_tv;
//! Misaligned vectorized output tensors
std::unordered_set<TensorView*> global_out_misaligned_tv;
//! Positions of misaligned input tensors
std::vector<int> inp_misaligned_tensors_pos;
//! Positions of misaligned output tensors
std::vector<int> out_misaligned_tensors_pos;
};
//! Compile-time info to be cached in each FusionExecutor:
//! VectorizedTensorValidation
//! Stores position info and vector word sizes of
//! vectorized input/output tensors, to be used
//! in misaligned vectorization validation.
class VectorizedTensorValidation {
public:
using DataType = VectorizedTensorInfo;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::VECTORIZED_TENSOR_VALIDATION;
};
//! Compile-time info to be cached in each FusionExecutor:
//! InputAliasIndices
//! Stores position info of aliased input tensors
class InputAliasIndices {
public:
using DataType = std::vector<std::pair<int, int>>;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::INPUT_ALIAS_INDICES;
};
//! Compile-time info to be cached in each FusionExecutor:
//! OutputAliasIndices
//! Stores position info of aliased output tensors
class OutputAliasIndices {
public:
using DataType = std::unordered_set<int>;
static const CompileTimeEntryType EntryType =
CompileTimeEntryType::OUTPUT_ALIAS_INDICES;
};
//! Base abstract class for unified storage in `ExecutorCompileTimeInfoCache`,
//! each entry in `ExecutorCompileTimeInfoCache` will be a subclass.
class CompileTimeInfoBase : public PolymorphicBase {
public:
CompileTimeInfoBase(CompileTimeEntryType entry_type)
: entry_type_(entry_type) {}
CompileTimeEntryType type() {
return entry_type_;
}
private:
CompileTimeEntryType entry_type_;
};
// Note: Do NOT export this class. MSVC issue with exported class that contains
// std::vector<unique_ptr<xxx>>: https://godbolt.org/z/3E4e8T1P1
//! Compile-time information cache
class ExecutorCompileTimeInfoCache {
using Entry = CompileTimeInfoBase;
using EntryOwningPtr = std::unique_ptr<Entry>;
using EntryPtr = Entry*;
using EntryType = CompileTimeEntryType;
public:
void insert(EntryOwningPtr new_entry);
EntryPtr at(EntryType entry_type) {
return entry_type_map_.at(entry_type);
}
bool has(EntryType entry_type) {
return entry_type_map_.count(entry_type);
}
private:
std::vector<EntryOwningPtr> entries_;
std::unordered_map<EntryType, EntryPtr> entry_type_map_;
};
//! A utility class to facilitate accessing ExecutorCompileTimeInfoCache.
template <typename EntryClass>
class ExecutorCompileTimeEntry {
using EntryDataType = typename EntryClass::DataType;
using EntryDataTypeOwnPtr = std::unique_ptr<EntryDataType>;
using MakerFnType = std::function<EntryDataTypeOwnPtr()>;
public:
//! Creates a data entry with type defined in EntryClass,
//! eg. EntryClass = VectorizableInputsAndOutputs;
//!
//! @param data_cache, a pointer to an instantiated compile-time
//! info cache. The info data will be
//! 1. read from data cache if data cache has the corresponding entry.
//! 2. written into data cache if data cache doesn't have the entry.
//! 3. managed by owned_data_ if data cache is nullptr
//! @param fn:
//! The factory function that needs to return a owning pointer
//! i.e. std::unique_ptr<EntryClass::DataType>. It will only
//! be called either when data cache is missing an entry or when no data
//! cache is given.
ExecutorCompileTimeEntry(
ExecutorCompileTimeInfoCache* data_cache,
MakerFnType fn);
//! Unified interface to get actual data, either from cache
//! or from factory function.
EntryDataType& get() {
return *data_ptr_;
}
private:
//! Internal data owing pointer that will manage the computed
//! data where there is no data cache.
EntryDataTypeOwnPtr owned_data_ = nullptr;
//! Pointer to the valid data entry that could be accessed.
EntryDataType* data_ptr_ = nullptr;
};
} // namespace caching
//! Returns the vector of tensorviews that will be used to bind parallel
//! dimensions.
std::vector<IterDomain*> getParallelBindingsIterDomains(
GpuLower* lower,
const std::vector<TensorView*>& used_tvs);
using ParallelExtentMap =
std::unordered_map<ParallelType, std::vector<const Val*>, TypeHash>;
//! Returns the extents of all parallel binding iterdomains corresponding
//! to each parallel type.
std::unique_ptr<ParallelExtentMap> getParallelIterExtents(
std::vector<IterDomain*>& parallel_binding_ids);
//! Returns the simplified set of extents necessary for launch parameter
//! binding.
std::unique_ptr<ParallelExtentMap> getSimplifiedParallelIterExtents(
GpuLower* lower,
std::vector<IterDomain*>& parallel_binding_ids);
//! Returns the symbolic or constant extetns of warp padded parallel
//! iterdomains in the given vector.
std::unique_ptr<caching::WarpPaddedExtentsInfo> getWarpPaddedExtentsInfo(
kir::Kernel* lower,
std::vector<IterDomain*>& parallel_binding_ids);
void validateVectorizedTensors(
kir::Kernel* kernel,
const KernelArgumentHolder& args,
const std::vector<at::Tensor>& outputs,
caching::ExecutorCompileTimeInfoCache* data_cache,
kir::ExpressionEvaluator& expr_eval);
} // namespace executor_utils
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 10,456
| 32.196825
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/expr_evaluator.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dynamic_type.h>
#include <ir_interface_nodes.h>
#include <iter_visitor.h>
#include <c10/util/Optional.h>
#include <string>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class FusionPrecomputedValues;
//! Calculate Fusion IR expressions
class TORCH_CUDA_CU_API ExpressionEvaluator : private OptOutDispatch {
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit ExpressionEvaluator(Fusion* fusion) : fusion_(fusion) {}
//! Returns the associated fusion object
Fusion* fusion() const {
return fusion_;
}
//! Bind a concrete value to an IR variable
void bind(Val* value, const IntOrDouble& concrete_value);
//! Bind a concrete value to a named scalar
void bind(const std::string& name, const IntOrDouble& concrete_value);
//! Try to evaluate a Fusion IR value
c10::optional<IntOrDouble> evaluate(Val* value);
//! Debugging helper, prints all the currently known values
void print() const;
void bindPrecomputedValues(FusionPrecomputedValues* precomputed_values) {
evaluator_precomputed_values_ = precomputed_values;
}
auto precomputedValues() {
return evaluator_precomputed_values_;
}
private:
c10::optional<IntOrDouble> getValue(Val* value);
void handle(UnaryOp*) final;
void handle(BinaryOp*) final;
// TODO: handle swizzle
private:
std::unordered_map<const Val*, IntOrDouble> known_values_;
std::unordered_map<std::string, IntOrDouble> known_named_scalars_;
Fusion* fusion_ = nullptr;
FusionPrecomputedValues* evaluator_precomputed_values_ = nullptr;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,739
| 24.217391
| 75
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/fusion.h
|
#pragma once
#include <ATen/core/ivalue.h>
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <ir_base_nodes.h>
#include <ir_container.h>
#include <iter_visitor.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Usage: FusionGuard and Fusion are required user interfaces for any operation
//! underlying the code generator. In order to create values, expressions, and
//! generate code a Fusion instance must be active. It is the responsibility of
//! the user to create a Fusion instance and register it with the fusion guard.
//! The simplest example of this is:
//!
//! Fusion fusion;
//! FusionGuard fg(&fusion);
//!
//! Once a fusion is active all values and operations will be registered with
//! it.
//!
//! FusionGuard and Fusion are critical to the lifetime model of the IR system.
//! FusionGuard is a convenient way to set what base container instance holds
//! the defined IR. Statements that are defined are registered through the
//! FusionGuard with a particular Fusion. FusionGuard provides convenient
//! methods to access the active fusion so it doesn't need to be passed around
//! constantly. Any IR node derived classes from Statement must register with
//! Fusion to avoid memory leaks.
//!
//! Fusion is generally thought of as a translated fusion group from the JIT. It
//! is likely a single kernel, although, we don't have to stick to this in the
//! future and could in theory generate multiple kernels with an executor to run
//! them.
//!
//! Fusion also allows users to set input/output values that will allow us to
//! figure out how to hook up runtime data to and from the JIT as well as
//! provide us mechanisms for dependency analysis and DCE including safety
//! checks.
class Fusion;
class TensorView;
class WelfordResult;
class SegmentCandidateFinder;
class SegmentedFusion;
class KernelArgumentHolder;
//! Fusion Guard is our "context manager". It holds the actrive fusion and
//! allows it to be accessed anywhere through FusionGuard::getCurFusion()
class TORCH_CUDA_CU_API FusionGuard {
public:
Fusion* prev_fusion;
//! Set the active fusion so it can be manipulated.
explicit FusionGuard(Fusion* fusion);
~FusionGuard();
static Fusion* getCurFusion();
static void setCurFusion(Fusion* fusion);
};
//! Fusion is mutable but unique. Nodes cannot be copied in any way from one
//! Fusion to another. If anything like that is desired, it would require
//! duplicating all associated values and exprs. Fusion is considered to be SSA,
//! though this could also change in the future if there is a good reason to do
//! so.
//!
//! The Fusion owns the whole IR graph (Vals and Exprs)
//!
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API Fusion : public IrContainer {
typedef std::unordered_map<int, std::vector<int64_t>> PermutationMap;
public:
Fusion() = default;
Fusion(const Fusion& other);
Fusion(Fusion&& other) noexcept;
Fusion& operator=(const Fusion& other);
Fusion& operator=(Fusion&& other) noexcept;
~Fusion();
friend void swap(Fusion& a, Fusion& b) noexcept;
void clear() noexcept;
//! Break dependency chains associated with Expr, remove references to expr
//! delete expr
void removeExpr(Expr* expr) override;
//! Completely remove val from the fusion, break all dependencies associated
//! with it
void removeVal(Val* val) override;
//! Register input as an input of the fusion
void addInput(Val* input);
//! Register output as an output of the fusion
void addOutput(Val* output);
//! Deregister input as an input of the fusion
void removeInput(Val* input);
//! Deregister output as an output of the fusion
void removeOutput(Val* output);
//! Replace output with another value
void replaceOutput(Val* output, Val* replacement);
//! Assert that all leaves found from outputs are registered as an input
void validateInputs();
//! Print this fusion to the console
void print();
//! Print Arith exprs
//! \param from_outputs_only Only print exprs reachable from outputs
void printMath(bool from_outputs_only = true);
//! Print transformations used in fusion (can be very verbose)
void printTransforms();
//! Lower the fusion and print a kernel
void printKernel(DataType index_type = DataType::Int);
//! Lower the fusion and evaluate bank conflict info
std::unordered_map<std::string, std::pair<int, int>> bankConflictInfo(
DataType index_type = DataType::Int);
//! Return a list of topologically sorted expressions. This only includes
//! exprs required to genereate registered outputs.
std::vector<Expr*> exprs();
//! Return a vector of fusion inputs that feed this Val
std::vector<Val*> inputsOf(Val* val);
//! Return all Vals in math expressions that cannot be eliminated.
//!
//! It is generally equivalent to vals that are used to generate
//! outputs, however, when a multi-output expression exists, and only
//! some of the outputs are used, the remaining unused outputs are
//! also included as they must show up in the final code.
std::vector<Val*> usedMathVals();
//! Returns all vals that are produced by used math expressions and
//! also do not have further consumers.
//!
//! In the case of an active multi-output expressions, the returned vector
//! will include the expression outputs that did not lead to an fusion
//! output.
std::vector<Val*> terminatingMathVals();
//! Return all Exprs that use val
std::unordered_set<Expr*> unordered_uses(const Val* val) const;
//! Return the Expr that produces val
Expr* definition(const Val* val) const;
//! Indicate to kernel to set itself up to generate random numbers
bool isStochastic();
//! Run fusion segmentation algorithm to create a segmented fusion
std::unique_ptr<SegmentedFusion> segment(const KernelArgumentHolder& args);
const auto& inputs() const {
return inputs_;
}
std::vector<Val*> inputsAndCreated();
const auto& outputs() const {
return outputs_;
}
std::vector<Val*> getTerminatingOutputs() const;
// Aliasing output to input value, this is a WAR to allow inplace update on
// input tensor.
// Note: this is not always safe and should be used with extra caution.
// Currently the only place it's used is in the running stats update for batch
// normalization.
// TODO: alias should be made aware to segmentation, so we'll always include
// the input tensor to the section where output is produced.
void aliasOutputToInput(Val* output, Val* input);
Val* getOutputAlias(Val* output);
std::unordered_set<int> getOutputAliasIndices() const;
std::vector<std::pair<int, int>> getInputAliasIndices() const;
// mark input at index to be permuted by permutation
void setPermutationOnInput(int index, std::vector<int64_t> permutation) {
permuted_input_map_.insert({index, permutation});
}
// mark output at index to be restored by permutation
void setPermutationOnOutput(int index, std::vector<int64_t> permutation) {
permuted_output_map_.insert({index, permutation});
}
// return a map of indices to permutation, which indicates all input tensors
// that needs to be permuted
const PermutationMap& getPermutationInputMap() const {
return permuted_input_map_;
}
// return a map of indices to permutation, which indicates all output tensors
// that needs to be permuted
const PermutationMap& getPermutationOutputMap() const {
return permuted_output_map_;
}
bool isTVUseInfoValid() {
return all_tv_uses_valid_;
}
bool isUpdatingTVUseInfo() {
return is_during_update_uses_;
}
const auto& ioAlias() const {
return io_alias_;
}
protected:
friend SegmentCandidateFinder;
friend SegmentedFusion;
friend class TranslateApplicableWelford;
friend Val;
static IrCloner copy(const Fusion* from, Fusion* to);
//! Register the Val with this fusion
virtual void registerVal(Val* val) override;
//! Register expr with this fusion.
//! When we register an expression, we want to update the dependency tracking
//! of Vals. If this container is a not a Kernel, it will remove previous
//! definitions of outputs and register this Expr as the definition. Otherwise
//! will update definition if not previously set, but will not remove old
//! definitions.
virtual void registerExpr(Expr* expr) override;
//! Clear Expr's from TV uses that are not required to produce outputs from
//! inputs. Only other place this is used (other than Fusion) is in
//! Val::uses()
void resetTvUses();
private:
// Determine if the two values are compatible for aliasing
// Same DataType, ValType, and number of dimensions
bool isAliasCompatible(Val* left, Val* right);
private:
// Fusion inputs and outputs
std::vector<Val*> inputs_;
std::vector<Val*> outputs_;
// io alias pointing from output to input
std::unordered_map<Val*, Val*> io_alias_;
// See Note [ Permutation support in nvfuser ]
// map from indices of input tensor to permutation
PermutationMap permuted_input_map_;
// map from indices of output tensor to permutation
PermutationMap permuted_output_map_;
// Records if the current use data in the IR nodes are valid
// the states are either all valid or all invalid
bool all_tv_uses_valid_ = false;
bool is_during_update_uses_ = false;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 9,564
| 32.096886
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/fusion_segmenter.h
|
#pragma once
#include <fusion.h>
#include <ir_base_nodes.h>
#include <kernel_cache.h>
#include <scheduler/all_schedulers.h>
#include <scheduler/registry.h>
#include <utils.h>
#include <deque>
#include <list>
#include <unordered_set>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class SegmentedGroup;
class SegmentCandidateFinder;
// A directed edge on DAG,
// Wrapper for values, edges between segmented groups which are made up
// of Exprs. Multiple edges can exist between segmented groups.
struct SegmentedEdge {
SegmentedEdge(SegmentedGroup* from, SegmentedGroup* to, Val* val)
: from(from), to(to), val(val) {}
SegmentedGroup* from;
SegmentedGroup* to;
Val* val;
void print() const;
};
std::ostream& operator<<(std::ostream& os, const SegmentedEdge* edge);
//! Groups together expressions which create a segmented group
//! Can be used to produce fusions
class TORCH_CUDA_CU_API SegmentedGroup {
public:
SegmentedGroup(SegmentedFusion* segmented_fusion)
: segmented_fusion_(segmented_fusion) {}
SegmentedGroup(Expr* expr, SegmentedFusion* segmented_fusion)
: segmented_fusion_(segmented_fusion) {
exprs_.push_back(expr);
}
//! Checks if this group takes original fusion's input
bool isInputGroup() {
return !input_vals.empty();
};
//! Checks if this group is used any where in the segmented fusion
bool isConnected() const {
return !producer_edges.empty() || !consumer_edges.empty() ||
!output_vals.empty();
}
//! returns the id assigned by segment pass
int groupId() const {
return group_id_;
}
//! Returns inputs that this group shares with the original fusion
const auto& inputs() const {
return input_vals;
}
//! Returns outputs that this group shares with the original fusion
const auto& outputs() const {
return output_vals;
}
//! Returns the schedule heuristic associated with this group
ScheduleHeuristic heuristic() const {
return heuristic_;
}
//! Returns the exprs that make up this group
const auto& exprs() const {
return exprs_;
}
//! Debug print function
void print() const;
//! Returns the segmented fusion that this group is in
SegmentedFusion* segmentedFusion() const {
return segmented_fusion_;
}
//! Utility to re-collect the operators included in this
//! segmented group after updating the group boundary.
void resetExprList();
//! Try to get a scheduler entry for this group with
//! the given runtime info.
//! Returns a new scheduler with the same heuristics
//! for this group if possible.
//! Note that the schedule params can be different.
//! Returns a nullopt if this group cannot be scheduled
//! with the same heuristics.
c10::optional<std::unique_ptr<SchedulerEntry>> getMaybeSchedulerEntry(
SchedulerRuntimeInfo& runtime_info);
public:
//! "Ancestor nodes", towards inputs of segmentedDAG
std::vector<SegmentedEdge*> producer_edges;
//! "Descendent nodes", towards outputs of segmentedDAG
std::vector<SegmentedEdge*> consumer_edges;
//! Composite Fusion inputs in this group
std::vector<Val*> input_vals;
//! Composite Fusion outputs in this group
std::vector<Val*> output_vals;
private:
friend class SegmentCandidateFinder;
friend class SegmentedFusion;
friend class FusionKernelRuntime;
friend class TranslateApplicableWelford;
//! unique identifier of group in the segmented fusion
int group_id_ = -1;
//! The scheduler to use for compiling this group
ScheduleHeuristic heuristic_ = ScheduleHeuristic::None;
//! Exprs that make up the group
std::vector<Expr*> exprs_;
//! Maximum path distance from an input segmented group required for
//! Theorem 4.2
int level_ = -1;
//! traversal marker, has this node already been processed
bool visited_ = false;
//! Did we select another group to merge with
SegmentedGroup* merge_with_ = nullptr;
//! if we selected another group to merge, which edge is to be contracted
SegmentedEdge* merge_through_ = nullptr;
//! Has this node been merged?
bool merged_ = false;
private:
//! Utility to convert edge vector to value vector
std::vector<Val*> edgesToVals(const std::vector<SegmentedEdge*>& se_v);
//! Reset method to call at begining of each
//! merge node iteration
void clearTraversalInfo();
//! To be called at the very end of segment fusion
//! no more segment merging should be done beyond
void finalize();
//! Return all segmented groups connected with *this
std::vector<SegmentedGroup*> getNeighbors();
//! Utility struct to represent a group connection
//! both the group to connect with and the edge
//! to connect through
struct NeighborGroup {
NeighborGroup(SegmentedGroup* g, SegmentedEdge* e) : group(g), edge(e) {}
SegmentedGroup* group;
SegmentedEdge* edge;
};
//! TODO: May want to sort this based on size of connections between this and
//! neighbors as well as if the connection is an output of the fusion (has to
//! be saved to gmem anyways)
std::vector<NeighborGroup> getNeighborGroups();
//! Look at all neighbors of this and return who this could merge with based
//! on level values of this, neighbors, and merged neighbors of neighbors
std::vector<NeighborGroup> getMergeCandidates();
//! Assign schedule heuristic to this group
void setHeuristic(ScheduleHeuristic sh) {
heuristic_ = sh;
}
//! Assign Id for this group
void setID(int id) {
TORCH_INTERNAL_ASSERT(group_id_ == -1);
group_id_ = id;
}
//! SegmentedFusion this group belongs to
SegmentedFusion* segmented_fusion_;
};
std::ostream& operator<<(std::ostream& os, const SegmentedGroup* group);
//! Auxiliary class for storing heuristics. The managed data is either
//! a single scheduler entry for complete fusion,
//! or a vector of schedulers, one for each segment, for segmented fusion.
class TORCH_CUDA_CU_API FusionHeuristics {
using SchedulerEntryOwningPtr = std::unique_ptr<SchedulerEntry>;
public:
//! Constructor for segmented fusion case. Created with empty list and
//! uses emplaceBack for inserting heuristics in order
explicit FusionHeuristics() = default;
//! Constructor for complete fusion case, generates the scheduler entry
//! for the fusion owning the given expression
explicit FusionHeuristics(
ScheduleHeuristic schedule_heuristic,
SchedulerRuntimeInfo& runtime_info,
HeuristicSummary* data_cache = nullptr) {
heuristics_.emplace_back(SchedulerEntry::makeEntry(
schedule_heuristic, runtime_info.fusion(), runtime_info, data_cache));
is_segmented_ = false;
}
FusionHeuristics(const FusionHeuristics&) = delete;
FusionHeuristics& operator=(const FusionHeuristics&) = delete;
//! Place a scheduler entry on the list. Applies to segmented fusion only.
void emplaceBack(SchedulerEntryOwningPtr&& pt) {
TORCH_INTERNAL_ASSERT(is_segmented_);
heuristics_.emplace_back(std::move(pt));
}
//! Returns list of schedulers for a segmneted fusion.
const std::vector<SchedulerEntryOwningPtr>& heuristicsList() const {
return heuristics_;
}
//! Returns the single scheduler for a complete fusion.
SchedulerEntry* singleKernelHeuristics() {
TORCH_INTERNAL_ASSERT(!is_segmented_);
return heuristics_.begin()->get();
}
private:
std::vector<SchedulerEntryOwningPtr> heuristics_;
bool is_segmented_ = true;
};
//! Exported Interface for representing segmented fusion graph
//! this class owns the segmented groups
class TORCH_CUDA_CU_API SegmentedFusion {
public:
explicit SegmentedFusion(std::unique_ptr<Fusion> fusion);
//! Factory function for the un-segmented case, directly
//! constructs a "SegmentedFusion", with the given Fusion
//! as the only group.
static std::unique_ptr<SegmentedFusion> fromCompleteFusion(
std::unique_ptr<Fusion> fusion,
ScheduleHeuristic heuristic);
//! Is the fusion segmented?
bool isSegmented() const {
return !groups_.empty();
}
std::vector<SegmentedGroup*>& groups() {
return groups_;
}
std::vector<SegmentedEdge*>& edges() {
return edges_;
}
const std::vector<SegmentedGroup*>& cgroups() const {
return groups_;
}
const std::vector<SegmentedEdge*>& cedges() const {
return edges_;
}
//! Returns the original un-segmented fusion
Fusion* completeFusion() const {
return complete_fusion_.get();
}
const auto& inputs() const {
return complete_fusion_->inputs();
}
const auto& outputs() const {
return complete_fusion_->outputs();
}
Val* findAlias(Val* val) const {
auto alias_it = complete_fusion_->ioAlias().find(val);
if (alias_it != complete_fusion_->ioAlias().end()) {
return alias_it->second;
}
return nullptr;
}
//! Make a clone of the group and convert to fusion
std::unique_ptr<Fusion> makeFusion(SegmentedGroup* sg);
//! Make heuristics for all groups in this segmented fusion
std::unique_ptr<FusionHeuristics> makeInitialHeuristics(
const KernelArgumentHolder& inputs);
//! Inline Debug print for segmented fusion
std::string toString(int verbosity) const;
//! Debug drawing for graphviz
void draw();
//! Debug print for segmented fusions
void print() const;
//! API for adding groups
SegmentedGroup* newGroup();
//! API shortcut for adding a singleton group
SegmentedGroup* newGroup(Expr* expr);
//! API for adding edges
SegmentedEdge* newEdge(SegmentedGroup* from, SegmentedGroup* to, Val* val);
HeuristicSummary* getCachedHeuristicDataFor(SegmentedGroup* group);
private:
//! Unique name for segmented fusion
int segmented_fusion_name_;
//! States representing segmentation
std::vector<SegmentedEdge*> edges_;
std::vector<SegmentedGroup*> groups_;
//! Owning object to explicitly manage groups and edges
class Impl {
public:
explicit Impl(SegmentedFusion* sf) : owning_fusion_(sf) {}
SegmentedGroup* makeGroup();
SegmentedGroup* makeGroup(Expr*);
SegmentedEdge* makeEdge(SegmentedGroup* from, SegmentedGroup* to, Val* val);
void cleanUnused();
private:
using GroupPtr = std::unique_ptr<SegmentedGroup>;
using EdgePtr = std::unique_ptr<SegmentedEdge>;
std::vector<GroupPtr> groups_;
std::vector<EdgePtr> edges_;
SegmentedFusion* owning_fusion_;
};
Impl impl_;
//! A Copy of original full fusion
std::unique_ptr<Fusion> complete_fusion_;
//! A set of intermediate tensors that need to be cast to fp16
std::unordered_set<TensorView*> force_fp16_tv_set_;
DataType force_half_precision_type_;
//! Static traversal information to be used for fast heuristics lookup
std::unordered_map<SegmentedGroup*, std::unique_ptr<HeuristicSummary>>
heuristic_summary_cache_;
// TODO: this class needs cleanup
protected:
friend class SegmentCandidateFinder;
//! Make a heuristics entry for a group and parameters
std::unique_ptr<SchedulerEntry> makeInitialSchedulerEntry(
SegmentedGroup* sg,
SchedulerRuntimeInfo& runtime_info);
//! Cleanup function to be call at the end of fusion
//! segment pass
void finalize();
//! Collect all the intermediate tensors between segmented
//! groups that will cast to fp16
void annotateFP16IntermediateTensors();
//! Keep heuristic checking intermediate data
void setCachedHeuristicDataFor(
SegmentedGroup* group,
std::unique_ptr<HeuristicSummary> data);
//! Utility to give unique name for each segmented fusion
static size_t segmentedFusionName() {
static size_t counter = 0;
return counter++;
}
};
//! This is a base class for segmenter analysis
//! provides the minimal implementation on header so that
//! a unique_ptr can use this base class
//! actual implementations of analyses are in the .cpp files
//! TODO: In the next refactor PR, should put segment candidate
//! finder in .cpp file completely since API doesn't require these
//! details
class SegmenterAnalysis : public PolymorphicBase {};
class GroupDependencyAnalysis;
// Manual node merging passes
class CombineReductions;
//! Options to configure/debug candidate finder
struct TORCH_CUDA_CU_API SegmentCandidateFinderOptions {
bool run_translate_welford = true;
bool run_combine_reductions = true;
bool run_herrmann_merge = true;
bool run_final_merge = true;
};
//! SegmentCandidateFinder
//! Responsible for going through DAG and proposing things we could try to
//! fuse together, calls "canGenerateCode" on these proposed segments to see
//! if they are valid and we can generate code for them.
//! FusionSegment
//! A group of exprs that are segmented together
//! FusionSegmentConnections
//! Holds vals and what they connect. In other words it's a val that is an
//! output of a FusionSegment "from" and an input of FusionSegment "to".
//! There's nothing preventing from a val being between segments twice.
//! TODO: make sure there's nothing wrong with segmentation on nodes that
//! have the same value input twice. i.e. (B = A*A)
//! Selecting segments to propose is based on the theorem 4.2 in the paper which
//! makes sure when segment the segmented graph will be a DAG (assumes Fusion is
//! already a DAG). The segmentation code relies on assumptions of DAG-ness
//! during segmentation, meaning proposed merging of groups must maintain the
//! DAG property of the graph.
//!
//! Julien Herrmann, Yusuf Özkaya, Bora Uçar, Kamer Kaya, Umit Catalyurek.
//! Multilevel Algorithms for Acyclic Partitioning of Directed Acyclic Graphs.
//! SIAM Journal on Scientific Computing, Society for Industrial and Applied
//! Mathematics, 2019, 41 (4), pp.A2117-A2145. ff10.1137/18M1176865ff.
//! ffhal02306566f
class TORCH_CUDA_CU_API SegmentCandidateFinder {
public:
// Perform segmentation on a copy of the given fusion
static std::unique_ptr<SegmentedFusion> segment(
const Fusion* fusion,
const KernelArgumentHolder& inputs,
SegmentCandidateFinderOptions options = SegmentCandidateFinderOptions()) {
auto fusion_copy = std::make_unique<Fusion>(*fusion);
if (isDebugDumpEnabled(DebugDumpOption::FusionSegments)) {
std::cout << "Segment the fusion (Original Fusion Un-modified): "
<< std::endl;
fusion_copy->printMath();
}
SegmentCandidateFinder scf(std::move(fusion_copy), inputs, options);
return std::move(scf.segmented_fusion_);
}
// Perform segmentation on and take ownership of the given fusion
static std::unique_ptr<SegmentedFusion> segment(
std::unique_ptr<Fusion> fusion,
const KernelArgumentHolder& inputs,
SegmentCandidateFinderOptions options = SegmentCandidateFinderOptions()) {
SegmentCandidateFinder scf(std::move(fusion), inputs, options);
if (isDebugDumpEnabled(DebugDumpOption::FusionSegments)) {
std::cout << "Segment the fusion (Original Fusion Un-modified): "
<< std::endl;
scf.completeFusion()->printMath();
}
return std::move(scf.segmented_fusion_);
}
static bool TranslateWelfordInFusion(
Fusion* fusion,
const KernelArgumentHolder& runtime_inputs);
private:
// Perform segmentation on and take ownership of the given fusion
SegmentCandidateFinder(
std::unique_ptr<Fusion> fusion,
const KernelArgumentHolder& inputs,
SegmentCandidateFinderOptions options);
void resetTraversal();
void resetLevels();
SegmentedGroup* mergeNodes();
bool codeGenSupportedMerge(SegmentedGroup* group1, SegmentedGroup* group2);
void findSegments();
std::unordered_set<SegmentedEdge*> disconnectGroup(SegmentedGroup* group);
std::vector<SegmentedGroup*>& groups() {
TORCH_INTERNAL_ASSERT(
segmented_fusion_ != nullptr, "Segment finder not owinging any fusion");
return segmented_fusion_->groups();
}
std::vector<SegmentedEdge*>& edges() {
TORCH_INTERNAL_ASSERT(
segmented_fusion_ != nullptr, "Segment finder not owinging any fusion");
return segmented_fusion_->edges();
}
Fusion* completeFusion() {
TORCH_INTERNAL_ASSERT(
segmented_fusion_ != nullptr, "Segment finder not owinging any fusion");
return segmented_fusion_->completeFusion();
}
SchedulerRuntimeInfo& runtimeInfo() {
return runtime_info_;
}
ExpressionEvaluator& expressionEvaluator() {
return runtime_info_.expressionEvaluator();
}
//! Additional merging iteration, clean up the rest of
//! the merging opportunities
//! Herrmann et al. is a fast and safe algorithm for finding merge candidates
//! but can become too conservative in our use cases because we place
//! additional qualifiers on valid merges other than having to generate DAGs,
//! i.e. canSchedule. So we need a bruteforce final merging iteration as a
//! clean up pass. Cost isn't expected to be high since the graph at this
//! stage is already quite merged. Example cf. test_gpu.cpp:
//! FusionDAGMerging_CUDA
//!
//! This merging algorithm is based on Theorem 4.1 of Herrmann et al.,
//! to check if a producer-consumer pair can be merged into one group,
//! it's enough to check if any other consumer of the producer also
//! produces the consumer.
void finalMerge();
//! Duplicate and add all exprs producing the used
//! scalar values in group
void resolveScalarsInGroup(SegmentedGroup* group);
//! Duplicate and add all exprs from "inputs" in the group, to complete
//! inputs. These expressions are simply unary ops of inputs that we want to
//! recompute for each segment, instead of computing and producing a segmented
//! val. For example if we have:
//! tv1 = tv0 * 2;
//! tv3 = tv1 + tv2;
//! tv4 = tv1 + tv4
//! If we segmented on tv1, we would be producing an output for tv1 for 2
//! groups that have tv3 or tv4, instead we could easily recompute tv1 from
//! tv0.
void resolveInputsInGroup(SegmentedGroup* group);
//! Remove all scalar edges in group
//! (TODO: need structure better so we don't have to do this)
void removeScalarEdges();
//! Utility function to merge a vector of groups in one step,
//! need to check for DAG condition before using this method
SegmentedGroup* mergeAllGivenGroups(
const std::vector<SegmentedGroup*>& groups);
//! Utility to remove a group and corresponding edges
//! TODO: remove inline versions of this as much as possible
void eraseGroups(std::unordered_set<SegmentedGroup*>& groups_to_erase);
void finalize();
//! Return the resulting heuristic corresponding to the merged
//! group built by merging the two groups connected by edge
ScheduleHeuristic deriveHeuristic(SegmentedGroup* edge);
GroupDependencyAnalysis* getGroupDependency();
protected:
//! These are the merge node heuristic passes, should
//! eventually should have a dedicated interface
//! instead of keeping adding friends
friend class CombineReductions;
//! options to configure and debug the segment process
SegmentCandidateFinderOptions options_;
std::deque<SegmentedGroup*> to_visit_;
std::vector<SegmentedGroup*> next_to_visit_;
std::unordered_set<SegmentedGroup*> clean_up_groups_;
std::unordered_set<SegmentedEdge*> clean_up_edges_;
std::vector<SegmentedGroup*> to_merge_;
std::unique_ptr<SegmentedFusion> segmented_fusion_;
std::unique_ptr<SegmenterAnalysis> group_dependency_;
SchedulerRuntimeInfo runtime_info_;
//! Note:
//! Segmenter should eventually rely only on runtime_info_ for
//! safe caching. runtime_inputs_ is only used in translateWelford
//! to initialize expression evaluators on copies of the original
//! fusion, which doesn't use any un-cached info and is safe.
//!
//! Directly using runtime_inputs_ in other cases is in general
//! risky.
//!
//! To get rid of runtime_inputs_ we need mechanisms
//! to copy expression evaluator values from fusion
//! to a copy, or even better to a copy of a
//! sub-graph of original fusion.
//! TODO:
//! implement the expression evaluator transfer and
//! remove runtime_inputs_ in a follow up.
const KernelArgumentHolder& runtime_inputs_;
};
// TODO: Make as member functions on classes instead of global scope
TORCH_CUDA_CU_API std::string toString(const SegmentedGroup* group);
TORCH_CUDA_CU_API std::string toString(const SegmentedEdge* edge);
TORCH_CUDA_CU_API std::string toString(const SegmentedFusion* segmented_fusion);
TORCH_CUDA_CU_API std::string toString(
const SegmentCandidateFinderOptions& segment_options);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 20,721
| 31.944356
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/grouped_reduction.h
|
#pragma once
#include <ir_all_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Horizontally fuse multiple reductions.
//!
//! Given a list of tensors produced by ReductionOp, create a new
//! GroupedReductionOp expression that takes the input tensors of the
//! original reductions and produces the given tensors, replacing
//! their defining expressions.
//!
//! GroupedReductionOp works just like ReductionOp with a potential
//! benefit of aggregating synchronizations across individual
//! reductions. See the reduction::gridReduce2 runtime function for a
//! two-input version of grid reduction.
//!
//! The grouped reductions must follow several constraints, which
//! include:
//! - There must not exist any data dependency between individual
//! reductions.
//! - All reduction output tensors must have the same number of
//! dimensions, the same transformations and the same axes to
//! reduce.
//!
//! Note that Welford is not allowed yet, though it should be
//! technically straightforward to support horizontal fusions of
//! welford ops. Unclear how common it would be in practice, though.
//!
//! \param reduction_outputs Tensors produced by ReductionOp
TORCH_CUDA_CU_API void groupReductions(
const std::vector<TensorView*>& reduction_outputs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,389
| 32.095238
| 69
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/index_compute.h
|
#pragma once
#include <iter_visitor.h>
#include <root_domain_map.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
/*
* Index compute takes in a list of indices typically generated from the
* surrounding for loop nest. The number of indicies are intended to match the
* number of dimensions of the incomming TensorView which may have less or more
* dimensions than its root due to split/merge operations.
* Split/merge operations are then replayed backwards produce resulting
* indices (based on input indices) that match the root dimension.
*
* For example with GLOBAL tensor:
* TV[I, K]
* TV[Io, Ii{4}, K] = TV.split(I, factor=4)
* ALLOC: NONE
* INDEX: indexCompute {i, j, k} -> {i * 4 + j, k}
* FLATTENED_INDEX: {i * 4 + j, k} -> {(i * 4 + j) * K + k}
* PREDICATE: {i * 4 + j, k} -> i * 4 + j < I
*
*
* For example with SHARED tensor:
*
* global_TV[I, K]
* global_TV[Io, Ii{4}, K] = global_TV.split(I, factor=4)
* smem_TV.compute_at(global_TV, 1)
* global_TV.parallelize(1, threadIDx.x)
*
* ALLOC: alloc(smem_TV, 4 x K)
* INDEX: indexCompute(smem_TV, {threadIdx.x, k}) -> {threadIdx.x, k}
* FLATTENED_INDEX: {threadIdx.x * 4 + j, k} -> {(threadIdx.x * 4 + j) * K + k}
* PREDICATE: {threadIdx.x * 4 + j, k} -> threadIdx.x * 4 + j < I // Same as if
* global
*
*
* For example with LOCAL tensor:
* global_TV[I, K, L]
* global_TV[Io, Ii{4}, K, L] = global_TV.split(I, factor=4)
* reg_TV.compute_at(global_TV, 2)
* global_TV.parallelize(1, threadIDx.x)
* global_TV{i, j, k, l} -> { i * 4 + j, k, l }
* global_TV{ i * 4 + j, k, l } -> { (i * 4 + j) * K * L + k * L + l}
*
* ALLOC: alloc(reg_TV, K x L)
* INDEX: {k, l} -> {k, l}
* FLATTENED_INDEX: {k, l} -> {k * L + l}
* PREDICATE: i * 4 + j < I && k < K && l < L -> // Same as if global
*
* These indices can then be flattened later based on strides.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class ContigIDs;
class LoopIndexing;
struct IndexFromIdGraph;
class IndexCompute : public BackwardVisitor {
protected:
using BackwardVisitor::handle;
void handle(Split*) override;
void handle(Merge*) override;
void handle(Expr*) override;
void handle(Swizzle2D*) override;
// return extent_map_[id] if exists, else return id->extent()
Val* getExtent(IterDomain* id) const;
//! True if a domain is not used to index
bool isZero(IterDomain* id) const;
//! True if any dependent of a domain is not used to index
bool hasZeroMerged(IterDomain* id) const;
//! Returns the concrete ID from the compute at EXACT mode map if
//! concrete_id_pass == true, otherwise returns id passed in.
//! Helps unify the expr handling logic in reference domain and concrete id
//! based traversal.
IterDomain* maybeGetExactMapConcreteID(IterDomain* id);
//! (Concrete indexing pass only)
//! Collect permissive index binding from the given expression.
//! See also permissive_map_ and LoopIndexing::getBackwardOutOfLineExprList.
void collectIndexIntoPermissiveMap(const LoopIndexing& loop_indexing);
//! (Concrete indexing pass only)
//! Iterate through id_expr's input and pull index vals from permissive
//! map, when both of the following are true:
//! 1. the output id is missing in index_map_.
//! 2. the output id is found in permissive map.
void updateIndexMapFromPermissiveMap(const Expr* id_expr);
// Tensor domain we're mapping back to root
const TensorDomain* td_; // NOLINT
// Map we update as we propagate backward, containing all IDs in the
// propagation. Initial indices are mapped with this map at tv->domain()
// and are back propagated to tv->getRootDomain(). This index_map_ keeps the
// indices at intermediate IterDomain's in that back propagation.
std::unordered_map<IterDomain*, Val*> index_map_; // NOLINT
// Map from IterDomain to their broadcasted extent. If a TV has I0*I1 but its
// producer has B0*I1 this map will contain a mapping from the ID{B0*I1} to
// the extent I0*I1. Also contains updated extents if we merge in a 0 index.
// See zero_merged_in_.
std::unordered_map<IterDomain*, Val*> extent_map_; // NOLINT
// Keeps track of domains that do not contribute to indexing
std::unordered_set<IterDomain*> zero_domains_; // NOLINT
// This set keeps track of IterDomain's that have had a zero index merged into
// them. This happens if we do something like tv->axis(0)->split(4) then
// tv->computeAt(1, ...) if this tensor is in smem or lmem the backward
// indexing would be (0, i) then when we do the backward computation that zero
// and i would attempt to be merged together. We handle indices like these
// specially.
std::unordered_set<IterDomain*> zero_merged_in_;
// IDs that are a result of contiguous merges
std::unordered_set<IterDomain*> contig_ids_;
// Map from root to indexed domains
std::unordered_map<IterDomain*, IterDomain*> root_to_indexed_id_;
// Mentions if we should propagate an index down a particular IterDomain path
// if there's an option
std::unordered_set<IterDomain*> preferred_paths_;
// Map from IterDomains to halo-extended extents
std::unordered_map<IterDomain*, Val*> halo_extent_map_;
// Temporary flag which tells IndexCompute to use concrete id's from the exact
// map rather than the actual IDs used in the ID expressions.
bool concrete_id_pass_ = false;
// Mode of swizzle that are activated in this index compute
// instance. Will treat swizzles of different mode as no-op.
// Currently data mode swizzles are handled same as before in IndexSwizzle
// pass, while loop mode swizzles are handled early on in concrete indexing
// pass. See also [Note on swizzle mode]
SwizzleMode swizzle_mode_ = SwizzleMode::NoSwizzle;
// (Concrete id pass only)
// Contains the indexing math that could be resolved with only the
// iterdomains on the right of the consumer_tv's ca axis, i.e. the
// ones that corresponding to the loops that consumer_tv would not
// share with any of its consumers.
// These indexing vals should be kept separate from index_map_ and
// should only be used when the indexing traversal follows the
// order defined in LoopIndexingAnalysis::traverseFromDomainVals.
std::unordered_map<IterDomain*, Val*> permissive_index_map_;
public:
const std::unordered_map<IterDomain*, Val*>& indexMap() const {
return index_map_;
}
const std::unordered_map<IterDomain*, Val*>& extentMap() const {
return extent_map_;
}
const std::unordered_set<IterDomain*>& zeroDomains() const {
return zero_domains_;
}
const std::unordered_set<IterDomain*>& zeroMergedIn() const {
return zero_merged_in_;
}
const std::unordered_map<IterDomain*, IterDomain*>& rootToContigID() const {
return root_to_indexed_id_;
}
// Propagate back from _td using initial_index_map
IndexCompute(
const TensorDomain* _td,
std::unordered_map<IterDomain*, Val*> initial_index_map,
std::unordered_map<IterDomain*, Val*> _extent_map,
std::unordered_set<IterDomain*> zero_domains,
std::unordered_set<IterDomain*> _zero_merged_in,
std::unordered_set<IterDomain*> preferred_paths = {},
std::unordered_map<IterDomain*, Val*> halo_extent_map = {});
IndexCompute(
const TensorDomain* _td,
std::unordered_map<IterDomain*, Val*> initial_index_map,
std::unordered_map<IterDomain*, Val*> _extent_map,
std::unordered_set<IterDomain*> zero_domains,
std::unordered_set<IterDomain*> _zero_merged_in,
const ContigIDs& contig_finder,
std::unordered_set<IterDomain*> preferred_paths = {},
std::unordered_map<IterDomain*, Val*> halo_extent_map = {});
// Entry point used for using concrete id based traversal. This traversal is
// assumed to start at leaf IDs provided by initial_index_map.
IndexCompute(
std::unordered_map<IterDomain*, Val*> initial_index_map,
std::unordered_set<IterDomain*> zero_domains,
std::unordered_set<IterDomain*> preferred_paths,
std::unordered_map<IterDomain*, Val*> concrete_halo_extent_map);
// Updates index_map, extent_map, and zero_merged_in based on id_map and
// returns a new IndexCompute ready to be used.
IndexCompute updateIndexCompute(
const TensorDomain* new_td,
const std::unordered_map<IterDomain*, IterDomain*>& id_map,
const ContigIDs& contig_finder) const;
// Interface to run index traversal through loop indexing analysis result to
// be used with the entry point for concrete id based traversal.
void run(const LoopIndexing& loop_indexing);
virtual void run();
};
//! Apply swizzle and update root indices accordingly
class IndexSwizzle : public IndexCompute {
public:
IndexSwizzle(
const TensorView* tv,
std::unordered_map<IterDomain*, Val*> initial_index_map,
std::unordered_map<IterDomain*, Val*> extent_map,
std::unordered_set<IterDomain*> zero_domains,
std::unordered_set<IterDomain*> zero_merged_in);
IndexSwizzle(
const TensorView* tv,
const TensorDomain* domain,
std::unordered_map<IterDomain*, Val*> initial_index_map,
std::unordered_map<IterDomain*, Val*> extent_map,
std::unordered_set<IterDomain*> zero_domains,
std::unordered_set<IterDomain*> zero_merged_in);
void run() override;
protected:
using IndexCompute::handle;
void handle(Expr* e) override;
void handle(Swizzle2D* swizzle_2d) override;
private:
const TensorView* tv_ = nullptr;
SwizzleType swizzle_type_ = SwizzleType::NoSwizzle;
std::vector<IterDomain*> ids_to_swizzle_;
std::unordered_set<IterDomain*> swizzled_ids_;
};
//! Predicate information of a root or contiguous merged domain
class RootPredicateInfo {
friend class Index;
public:
const auto& startPredicate() const {
return start_predicate_;
}
auto& startPredicate() {
return start_predicate_;
}
const auto& startOffset() const {
return start_offset_;
}
const auto& stopPredicate() const {
return stop_predicate_;
}
const auto& stopOffset() const {
return stop_offset_;
}
const auto& rootIds() const {
return root_ids_;
}
//! Return a false RootPredicateInfo, i.e., both start and stop
//! predicates are false.
static RootPredicateInfo getFalseInfo();
private:
// prdicate for lower end
Bool* start_predicate_ = nullptr;
// prdicate for upper end
Bool* stop_predicate_ = nullptr;
// Offset of the start predicate
Val* start_offset_ = nullptr;
// Offset of the stop predicate
Val* stop_offset_ = nullptr;
// Track which roots have been handled by the generated predicates
std::unordered_set<IterDomain*> root_ids_;
};
// Simple interface for IndexCompute
// If getComputeAtAxis and more generally TensorView const model is fixed, we
// can make the below tensorviews const.
class Index {
private:
// Producer indexing if it's in shared or local memory
static std::vector<Val*> getNonGlobalProducerStridedIndices(
TensorView* producer,
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
// Consumer indexing if it's in shared or local memory
static std::vector<Val*> getNonGlobalConsumerStridedIndices(
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
// Producer if it's in global memory
static std::vector<Val*> getGlobalProducerStridedIndices(
TensorView* producer,
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
// Consumer indexing if it's in global memory
static std::vector<Val*> getGlobalConsumerStridedIndices(
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
// get the strides of a tensor used for the index lowering
static std::vector<Val*> getStrides(const TensorView* tv);
// get the root indices of a tensor used for the index lowering
static std::vector<Val*> getRootIndices(
const TensorView* tv,
const std::vector<kir::ForLoop*>& loops,
const IndexFromIdGraph& index_from_id_graph);
public:
// Indexing functions
// Consumer = Producer
// i.e. T0 = T1... -> T0 is the consumer, T1 is the producer
// Producer indexing dispatch
static kir::TensorIndex* getProducerIndex(
TensorView* producer,
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
// Consumer index dispatch
static kir::TensorIndex* getConsumerIndex(
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
//! Returns a vector of strided indices mapped onto the (rfactor)
//! root domain of a producer tensor. The size of the returned
//! vector is guaranteed to be equal to the number of axes of the
//! indexing root domain.
static std::vector<Val*> getProducerStridedIndices(
TensorView* producer,
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
//! Returns a vector of strided indices mapped onto the (rfactor)
//! root domain of a consumer tensor. The size of the returned
//! vector is guaranteed to be equal to the number of axes of the
//! indexing root domain.
static std::vector<Val*> getConsumerStridedIndices(
const TensorView* consumer,
const std::vector<kir::ForLoop*>& loops);
//! Returns the logical index linearized from a multi-dimension address into a
//! linear memory address a consumer tensor. The returned index is intended to
//! be used for the computation of some tensor factories, such as: arange and
//! rand (for Philox pseudo random sequences)
static std::vector<Val*> getLinearLogicalIndex(
TensorView* consumer_tv,
const std::vector<kir::ForLoop*>& loops);
//! Returns a vector of logical indices mapped onto the (rfactor)
//! root domain of a consumer tensor. The returned index is intended
//! to be used for the computation of some tensor factories, such as:
//! eye
static std::vector<Val*> getPerDimLogicalIndex(
TensorView* consumer_tv,
const std::vector<kir::ForLoop*>& loops);
//! Take a consumer tensorview and loop nest and generates predicates
//! associated with the concrete roots of the loop nest. Returns a list of
//! predicates, and a list of concrete roots they're associated with. It
//! is assumed that no predicate is required if index[i] is an index
//! directly from a for loop. This will not catch all cases if we actually
//! have static size information for example:
//!
//! TV[I].split(4)
//! would produce the code:
//! for(i : I/4)
//! for(j : 4)
//! if( i * 4 + j < TV.size(0))
//! TV[i * 4 + j]...
//!
//! However if we had TV.size[0] = 16 at "compile time" then we wouldn't
//! need the predicate. This will be caught by canOmitPredicate in the
//! predicate lowering
//!
//! unswitch_or_vec_loop is the for loop to start the unswitch like
//! predicate, this is not a bool value as if we have an unswitch loop
//! with a vectorized loop inside, we only want to base the "unswitch"
//! like predicate on the vectorized loop.
static std::vector<RootPredicateInfo> getReferenceRootPredicates(
TensorView* consumer_tv,
const std::vector<kir::ForLoop*>& loops,
kir::ForLoop* unswitch_or_vec_loop,
bool padding_predicate);
};
// Used for local and shared index mapping. Returns a map from loops
// to loop indices as well as a set of loops that do not contribute to
// indexing.
// TODO: could be cleaned up further.
std::pair<
std::unordered_map<kir::ForLoop*, Val*>,
std::unordered_set<kir::ForLoop*>>
indexMapFromTV(
const TensorView* tv,
const std::vector<kir::ForLoop*>& loops,
kir::ForLoop* alloc_loop,
bool as_consumer,
kir::ForLoop* double_buffer_loop = nullptr);
//! Set "pragma unroll" required for loops that indexing of Local
//! tensors depends on.
//!
//! \param tv Indexed tensor
//! \param alloc_loop Allocation loop of tv
//! \param loops The current loop structure
//! \param id_map Producer-to-consumer map in case of indexing as producer
void ensureStaticIndexing(
const TensorView* tv,
kir::ForLoop* alloc_loop,
const std::vector<kir::ForLoop*>& loops,
const std::unordered_map<IterDomain*, IterDomain*>& id_map = {});
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 16,388
| 35.582589
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/inlining.h
|
#pragma once
#include <ir_interface_nodes.h>
#include <maxinfo_propagator.h>
#include <transform_replay.h>
#include <memory>
#include <unordered_set>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class MaxPosCalculator {
// Root domains in producer that's unmappable to any of its consumers
std::unordered_set<IterDomain*> unmappable_dims_;
// User set IterDomains to not inline, used in schedulers to avoid inlining
// trivial reductions
std::unordered_set<IterDomain*> uninlinable_ids_;
// Iterate through all TVs and collect the dimensions of each TV that don't
// map to all its consumer TVs.
void buildUnmappableDims();
// Utility function to return if an id of tv is a valid iter domain to inline
// within. This is used in getMaxPos{PasC,CasP}. Different variations of the
// bool values are used if checking max position of PasC, CasP, or checking
// for a max "self" position.
bool isAllowedID(
IterDomain* id,
TensorView* tv,
bool best_effort,
bool allow_reduction,
bool allow_vectorize,
bool allow_unmappable) const;
public:
// Returns the position at which tv can be inlined within.
size_t getMaxPosSelf(
TensorView* tv,
bool best_effort,
bool allow_reduction,
bool allow_vectorize,
bool allow_unmappable) const;
// Returns the maximum position producer can be inlined based on consumer
// given the set ComputeAtMode
size_t getMaxProducerPosFromConsumer(
TensorView* producer,
TensorView* consumer,
bool best_effort) const;
// Checks producers, consumers, and siblings to see what the maximum position
// in tv is that can be shared across both directions.
size_t getMaxPosAll(
TensorView* tv,
bool best_effort = false,
bool check_siblings = true);
MaxPosCalculator(const std::unordered_set<IterDomain*>& uninlinable_ids = {});
};
// Inline to the right most allowed position for all tensors in the current
// fusion.
TORCH_CUDA_CU_API void inlineMost(
const std::unordered_set<IterDomain*>& uninlinable_ids = {});
// Inline to the right most allowed position for the selected tensors in the
// current fusion.
TORCH_CUDA_CU_API void inlineMost(
const std::vector<TensorView*>& tvs,
const std::unordered_set<IterDomain*>& uninlinable_ids = {});
// Inline to the right most allowed position for the selected tensors in the
// current fusion.
TORCH_CUDA_CU_API void inlineMost(
const std::unordered_set<TensorView*>& tvs,
const std::unordered_set<IterDomain*>& uninlinable_ids = {});
// Inline to the position corresponding to the reference position in the
// reference tensor for all tensors in the current fusion.
TORCH_CUDA_CU_API void inlineAllAt(
TensorView* reference_tv,
int64_t reference_pos,
bool best_effort = false,
const std::unordered_set<IterDomain*>& uninlinable_ids = {});
// Inline to the position corresponding to the reference position in the
// reference tensor for selected tensors in the current fusion.
TORCH_CUDA_CU_API void inlineSelectedAt(
const std::unordered_set<TensorView*>& selected,
TensorView* reference_tv,
int64_t reference_pos,
bool best_effort = false,
const std::unordered_set<IterDomain*>& uninlinable_ids = {});
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 3,386
| 32.534653
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/instrumentation.h
|
#pragma once
#include <utils.h>
#include <nvToolsExt.h>
// NOLINTNEXTLINE(modernize-deprecated-headers)
#include <stdio.h>
#include <chrono>
#include <cstdio>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace inst {
//! An optional record of selected timestamped operations, events and counters
//!
//! This class is not intended to be used directly. Instead, the operations
//! to be traced are marked (for example using the FUSER_PERF_SCOPE macro)
//!
//! In order to enable tracing, the `PYTORCH_NVFUSER_TRACE` environment
//! variable is set to point to a trace file (ex `test.trace`). The file name
//! may be a relative or an absolute path.
//!
//! The trace uses the Chrome Tracing (Catapult) format, which is a well
//! documented JSON based format supported by multiple tools:
//! https://chromium.googlesource.com/catapult/+/HEAD/tracing/README.md
//!
//! An easy way to view traces is to type `about://tracing` in Chrome or
//! Chromium.
//!
class TORCH_CUDA_CU_API Trace : public NonCopyable {
public:
using Clock = std::chrono::steady_clock;
public:
static Trace* instance() {
static Trace trace;
return &trace;
}
void beginEvent(const char* name) {
if (log_file_ != nullptr) {
logEvent('B', name);
}
if (record_nvtx_range_) {
nvtxRangePushA(name);
}
}
void endEvent(const char* name) {
if (record_nvtx_range_) {
nvtxRangePop();
}
if (log_file_ != nullptr) {
logEvent('E', name);
}
}
private:
Trace();
~Trace();
void logEvent(char ph, const char* name, char sep = ',');
private:
FILE* log_file_ = nullptr;
Clock::time_point start_timestamp_;
bool record_nvtx_range_ = true;
};
//! \internal Automatic scope for a perf marker
//! (normally used through the FUSER_PERF_SCOPE macro)
class TORCH_CUDA_CU_API TraceScope : public NonCopyable {
public:
explicit TraceScope(const char* event_name) : event_name_(event_name) {
Trace::instance()->beginEvent(event_name_);
}
~TraceScope() {
Trace::instance()->endEvent(event_name_);
}
private:
const char* event_name_ = nullptr;
};
#define FUSER_MACRO_CONCAT2(a, b) a##b
#define FUSER_MACRO_CONCAT(a, b) FUSER_MACRO_CONCAT2(a, b)
#define FUSER_ANONYMOUS(prefix) FUSER_MACRO_CONCAT(prefix, __COUNTER__)
//! Defines a scope we want to measure and record in a perf trace
//!
//! \param name The name of the scope, normally a simple string literal
//!
#define FUSER_PERF_SCOPE(name) \
torch::jit::fuser::cuda::inst::TraceScope FUSER_ANONYMOUS(_perf_scope_)(name)
} // namespace inst
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 2,676
| 24.254717
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_base_nodes.h
|
#pragma once
#include <c10/core/ScalarType.h>
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <type.h>
#include <utils.h>
#include <cstdint>
#include <iostream>
#include <limits>
#include <memory>
#include <stdexcept>
#include <unordered_map>
#include <vector>
// TODO: Add more types (int32, int64)
// TODO: sameAs should have better logic to check against any type and return
// gracefully
/*
* This file defines the base IR structure. Any IR node in this system will
* inherit from one of the following classes: Statement, Expr, Val,
* IrInputOutput IR is any information that the code generation stack may need
* for analysis. By analysis we're refering to anything done in response to a
* user facing call of this stack. This could be careful tracking of user calls,
* and any transformation including optimizing transformations, user declared
* transformations, and lowering the IR.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
using ValueId = int32_t;
using StmtNameType = unsigned int;
constexpr StmtNameType kInvalidStmName =
std::numeric_limits<unsigned int>::max();
class Fusion;
class FusionGuard;
class Expr;
class Val;
class UnaryOp;
class BinaryOp;
class RNGOp;
class IterDomain;
class IrCloner;
class IrContainer;
class IrBuilderPasskey;
class IrContainerPasskey;
namespace kir {
class Kernel;
class Predicate;
} // namespace kir
// Passkey for container to register names with statements
class ExprPasskey {
friend class Expr;
private:
explicit ExprPasskey() {}
};
TORCH_CUDA_CU_API void swap(Fusion& a, Fusion& b) noexcept;
//! Statement is the highest level node representation. Everything that is
//! considered "IR" will be derived from this class at some point. Both Values
//! and Expr's are a Statement. If there will ever be any more fundamental
//! types, they will also derive from Statement.
//!
//! We use Statements to pass around nodes of unknown compile type. Therefore it
//! is also important for the design to have a dispatch system for a Statment.
//! Basically beinng able to succienctly traverse down the inhereitance stack of
//! a Statment at runtime. This is currently implemented in dispatch.h
class TORCH_CUDA_CU_API Statement : public NonCopyable, public PolymorphicBase {
friend void swap(Fusion&, Fusion&) noexcept;
friend void swap(IrContainer& a, IrContainer& b) noexcept;
public:
Statement() = delete;
// Cloning constructor
Statement(const Statement* src, IrCloner* ir_cloner);
// Dispatch functions, definitions in dispatch.cpp
template <typename T>
static void dispatch(T handler, Statement*);
template <typename T>
static void constDispatch(T handler, const Statement* const);
template <typename T>
static void mutatorDispatch(T mutator, Statement*);
// Accessor functions to types. Vals always have a DataType, Exprs never do
virtual c10::optional<ValType> getValType() const {
return c10::nullopt;
}
virtual c10::optional<DataType> getDataType() const {
return c10::nullopt;
}
virtual c10::optional<ExprType> getExprType() const {
return c10::nullopt;
}
// Short cut to figure out if it is a value/expression
bool isVal() const {
return getValType() != c10::nullopt;
}
bool isExpr() const {
return getExprType() != c10::nullopt;
}
// Make sure this is a Val and return it as a Val*
Val* asVal();
// Make sure this is an Expr and return it as an Expr*
Expr* asExpr();
// Return the fusion this statement belongs to
Fusion* fusion() const;
// Return the kernel this statement belongs to
kir::Kernel* kernel() const;
// Return the container this statement belongs to
IrContainer* container() const {
return ir_container_;
}
// Return the int that represents its name
StmtNameType name() const {
return name_;
}
// Set the statements' name. Typically the container will set the name,
// however if we're dealing with cloning, IrBuilder will set the name, this
// maybe should be from IrCloner, however I didn't want to add another
// passkey.
void setName(IrContainerPasskey, StmtNameType name);
void setName(IrBuilderPasskey, StmtNameType name);
virtual bool sameType(const Statement* const other) {
if (isVal() && other->isVal())
return getValType().value() == other->getValType().value();
if (isExpr() && other->isExpr())
return getExprType().value() == other->getExprType().value();
return false;
}
// Return if this statement is the same as another statement
// TODO: should this run through dispatch on this and other?
virtual bool sameAs(const Statement* other) const {
return this == other;
}
std::string toString() const;
std::string toInlineString() const;
protected:
Statement(IrBuilderPasskey);
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
StmtNameType name_ = kInvalidStmName;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
IrContainer* ir_container_ = nullptr;
};
//! A Val represents a "value." These are objects, like tensors, scalars, and
//! memory locations, that are inputs and outputs of computations (represented
//! by Exprs, below)
//!
//! Vals are constant and unique and should always be passed
//! around as a pointer. Val can generally be thought of as representing any
//! type of data. Some examples: a constant size like convolution filter width a
//! runtime constant like batch normalizations momentum a "symbolic" tensor like
//! one passed down from the JIT a memory buffer used in device code
//!
//! Adding a Val:
//! Right now adding a Val is quite involved. Val's can be defined in ir.h or in
//! their own header file. The following is what is currently needed to add a
//! new Val:
//!
//! 1) Definition inheriting from Val
//! - Members must be private or protected
//! - Accessor functions for members
//! - Must call Val constructor, Val constructor registers with fusion
//! - Implementation of bool sameAs(...)
//! - Must implement a "cloning" constructor, ex.
//! Int::Int(const Int* src, IrCloner* ir_cloner)
//! 2) dispatch.h/.cpp must be updated to include dispatch of the new Val
//! 3) Default mutator function should be added to mutator.cpp
//! 4a) Printing functions should be added to ir_iostream.h/.cpp
//! 4b) Graphviz generation must be added to ir_graphviz.h/.cpp
//! 5) An enum value must be added to ValType in type.h
//! 6) A string entry must be added in val_type_string_map
//!
class TORCH_CUDA_CU_API Val : public Statement {
public:
explicit Val(
IrBuilderPasskey,
ValType _vtype,
DataType _dtype = DataType::Null);
Val(const Val* src, IrCloner* ir_cloner);
// Dispatch functions, definitions in dispatch.cpp
template <typename T>
static void dispatch(T handler, Val*);
template <typename T>
static void constDispatch(T handler, const Val* const);
template <typename T>
static void mutatorDispatch(T mutator, Val*);
c10::optional<ValType> getValType() const override {
return vtype_;
}
ValType vtype() const {
return vtype_;
}
DataType dtype() const {
return dtype_;
}
// Throws if no DataType is found. Vals must have a DataType
c10::optional<DataType> getDataType() const override;
bool isScalar() const {
return vtype_ == ValType::Scalar || vtype_ == ValType::NamedScalar;
}
// Returns if all dependencies are constant scalars
bool isConstScalar() const;
// Returns if all dependencies are constant integers
bool isConstInt() const;
bool isAnInt() const {
return isScalar() && dtype_ == DataType::Int;
}
bool isADouble() const {
return isScalar() && dtype_ == DataType::Double;
}
// If this Val is an integer with a direct constant value associated with it,
// will return the value of that constant integer. If this integer has
// defining expressions it will return a c10::nullopt. Those values should be
// infered using evaluateInt.
c10::optional<int64_t> getInt() const;
// If this Val is a double with a direct constant value associated with it,
// will return the value of that constant double. If this double has
// defining expressions it will return a c10::nullopt. Those values should be
// infered using evaluateDouble.
c10::optional<double> getDouble() const;
// If this Val is a constant integer, and its history is comprised only of
// constant values, will return the value of that constant integer. Cannot
// make constant as expression evaluator takes non-constant Vals.
int64_t evaluateInt();
// If this Val is a constant double, and its history is comprised only of
// constant values, will return the value of that constant double. Cannot
// make constant as expression evaluator takes non-constant Vals.
double evaluateDouble();
// Returns if no dependencies and is a constant scalar.
virtual bool isConst() const {
return false;
}
bool isZeroInt() const;
bool isOneInt() const;
// Returns the Expr that this value is an output of, returns nullptr if none
// was found
Expr* definition() const {
if (is_fusion_input_) {
return nullptr;
}
return definition_;
}
// Determine if value definition matches given expression type
bool isDefinitionType(ExprType expression_type) const;
const std::vector<Expr*>& uses() const;
bool isFusionInput() const {
return is_fusion_input_;
}
bool isFusionOutput() const {
return is_fusion_output_;
}
//! Returns true when other is a producer of this
bool isProducerOf(const Val* other) const;
//! Returns true when other is a consumer of this
bool isConsumerOf(const Val* other) const;
bool sameType(const Statement* other) override {
return Statement::sameType(other) &&
getDataType() == other->as<Val>()->getDataType();
}
// TODO: Make this more sophisticated. A value being the same as another value
// should be evaluated based on the DAG that created it, and that DAGs leaf
// nodes
bool sameAs(const Statement* other) const override {
return this == other;
}
void setEvaluatorIndex(int to) {
TORCH_INTERNAL_ASSERT(evaluator_index_ == -1);
evaluator_index_ = to;
}
int evaluatorIndex() const {
return evaluator_index_;
}
// Following is managed by Fusion (or kirIrBuilder) and can change.
// TODO: Protect with a passkey.
void setDefinition(Expr* expr) {
definition_ = expr;
}
void resolveIndexDtype();
protected:
friend Fusion;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const ValType vtype_;
// TODO: Add fusion passkey for this
void setIsFusionInput(bool is_fusion_input) {
is_fusion_input_ = is_fusion_input;
}
// TODO: Add fusion passkey for this
void setIsFusionOutput(bool is_fusion_output) {
is_fusion_output_ = is_fusion_output;
}
// TODO: Add fusion or container passkey for this
void setUses(const std::vector<Expr*>& uses) {
uses_ = uses;
}
private:
// There's only one instance where dtype can change, and that's through
// resolving the index data type from nvfuser to either Int or Int32 for
// welford operations.
DataType dtype_;
// Following is managed by Fusion and can change.
bool is_fusion_input_ = false;
bool is_fusion_output_ = false;
Expr* definition_ = nullptr;
std::vector<Expr*> uses_;
// Expr evaluator idx;
int evaluator_index_ = -1;
};
//! A Expr represents a "computation." These are functions that takes inputs
//! and produce outputs, inputs and outputs all being Vals. There are
//! specializations of BinaryOp which takes 2 inputs and produces 1 output, and
//! UnaryOp which takes 1 input and produces 1 output. Exprs are unique and
//! immutable. Conceptually, Exprs could always be manipulated using unique
//! pointers, and we could add this later. However, for now Exprs can be
//! replaced in a fusion, but they cannot be modified in place.
//!
//! The IR is static single assignment (SSA). Values can only be defined as an
//! output of an Expr once. If they are re-defined the original definition is
//! deleted from the program, as opposed to an ordered redefinition of the
//! value in the program.
//!
//! Note: Registering an Expr with a Fusion is actually 2 parts, one part is
//! done in the Expr constructor, so that should be called on anything that
//! inherits Expr. The issue with having registration in Expr's constructor, is
//! that the constructor of an Expr will set ouputs and inputs. This
//! information is important for registration with Fuser, so it can track the
//! dependency chain.
//!
//! Adding an Expr:
//! Right now adding an Expr is quite involved. Expr's can be defined in ir.h
//! or in their own header file. The following is what is currently needed for
//! Expr definitions:
//!
//! 1) Definition inheriting from Expr.
//! - Members must be private or protected
//! - Accessor functions for members
//! - Constructors need to register with the Fusion after inputs/outputs
//! are defined
//! - Implementation of bool sameAs(...)
//! 2) dispatch.h/.cpp must be updated to include dispatch of the new Val
//! 3) Default mutator function should be added to mutator.h/.cpp
//! 4) Printing functions should be added to ir_iostream.h/.cpp
//! 5) Lower case convenience functions should be added to arith.h/.cpp (If
//! user facing)
//! 6) An enum value must be added to ExprType in type.h
//! 7) A string entry must be added in expr_type_string_map
//! 8) Entry added to ir_graphviz .cpp/.h
//!
class TORCH_CUDA_CU_API Expr : public Statement {
public:
explicit Expr(IrBuilderPasskey, ExprType type);
Expr(const Expr* src, IrCloner* ir_cloner);
// Creates a new instance of the expression with all its field copied.
// Note that unlike IrCloner, this function only do a shallow copy
virtual Expr* shallowCopy() const = 0;
c10::optional<ExprType> getExprType() const override {
return etype_;
}
ExprType etype() const {
return etype_;
}
bool sameAs(const Statement* other) const override;
// Input/output accessors
const auto& inputs() const {
return inputs_;
}
const auto& outputs() const {
return outputs_;
}
auto input(size_t index) const {
return inputs_[index];
}
auto output(size_t index) const {
return outputs_[index];
}
// Dispatch functions, definitions in dispatch.cpp
template <typename T>
static void dispatch(T handler, Expr*);
template <typename T>
static void constDispatch(T handler, const Expr* const);
template <typename T>
static void mutatorDispatch(T mutator, Expr*);
// TODO: Protect based on being in kernel container
kir::Predicate* predicate() const;
// Creates a shallow copy the expression with the given predicate attached.
// TODO: Protect based on being in kernel container
Expr* withPredicate(kir::Predicate* predicate);
// TODO: Protect based on being in kernel container
kir::Predicate* writePredicate() const;
// Creates a shallow copy the expression with the given write-predicate
// attached.
// TODO: Protect based on being in kernel container
Expr* withWritePredicate(kir::Predicate* write_predicate);
protected:
// TODO: Protect based on being in kernel container
void setPredicate(kir::Predicate* predicate);
// TODO: Protect based on being in kernel container
void setWritePredicate(kir::Predicate* write_predicate);
void copyPredicatesFrom(const Expr* expr);
// TODO: Add Fusion passkey
void addInput(Val* input) {
TORCH_INTERNAL_ASSERT(input != nullptr);
inputs_.push_back(input);
}
// TODO: Add Fusion passkey
void addOutput(Val* output) {
TORCH_INTERNAL_ASSERT(output != nullptr);
outputs_.push_back(output);
}
ExprPasskey exprPasskey() {
return ExprPasskey();
}
private:
ExprType etype_ = ExprType::Invalid;
std::vector<Val*> inputs_;
std::vector<Val*> outputs_;
kir::Predicate* predicate_ = nullptr;
// Only used for reduction-related expressions
kir::Predicate* write_predicate_ = nullptr;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 16,319
| 30.085714
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_builder.h
|
#pragma once
#include <fusion.h>
#include <ir_all_nodes.h>
#include <ir_container.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace kir {
class Kernel;
}
class IrCloner;
// Passkey for builder to register properties with statements, and to call
// functions in IrContainer
class TORCH_CUDA_CU_API IrBuilderPasskey {
friend class IrBuilder;
public:
// TODO: Collapse ir_container and Kernel once Kernel inherits from
// IrContainer
IrContainer* const ir_container_ = nullptr;
private:
explicit IrBuilderPasskey(IrContainer* ir_container);
};
//! IR builder interface
class TORCH_CUDA_CU_API IrBuilder {
public:
//! Allocate a new IR node, forwarding the arguments to the appropriate
//! constructor and registering with the container
template <class T, class... Args>
static T* create(Args&&... args) {
auto container = FusionGuard::getCurFusion();
// return create<T>(container, std::forward<Args>(args)...);
TORCH_INTERNAL_ASSERT(
container != nullptr, "Need an active container to build IR.");
T* node = new T(IrBuilderPasskey(container), std::forward<Args>(args)...);
container->registerStmt(IrBuilderPasskey(container), node);
return node;
}
//! Allocate a new IR node, forwarding the arguments to the appropriate
//! constructor and registering with the container
template <class T, class... Args>
static T* create(IrContainer* container, Args&&... args) {
TORCH_INTERNAL_ASSERT(
container != nullptr, "Need an active container to build IR.");
T* node = new T(IrBuilderPasskey(container), std::forward<Args>(args)...);
container->registerStmt(IrBuilderPasskey(container), node);
return node;
}
//! Clone an IR node, forwarding the arguments to the IrCloner constructor.
//! Register clones with IrCloner's target container.
template <class T>
static T* clone(const T* src, IrCloner* ir_cloner);
// Unary operations
static Val* negExpr(Val* val);
static Val* notExpr(Val* val);
static Val* setExpr(Val* val);
static Val* setExprNamedScalar(const std::string& name, Val* val);
static Val* addressExprNamedScalar(const std::string& name, Val* val);
// Binary operations
static Val* andExpr(Val* lhs, Val* rhs);
static Val* eqExpr(Val* lhs, Val* rhs);
static Val* gtExpr(Val* lhs, Val* rhs);
static Val* ltExpr(Val* lhs, Val* rhs);
static Val* leExpr(Val* lhs, Val* rhs);
static Val* geExpr(Val* lhs, Val* rhs);
static Val* addExpr(Val* lhs, Val* rhs);
static Val* subExpr(Val* lhs, Val* rhs);
static Val* mulExpr(Val* lhs, Val* rhs);
static Val* divExpr(Val* lhs, Val* rhs);
static Val* ceilDivExpr(Val* lhs, Val* rhs);
static Val* modExpr(Val* lhs, Val* rhs);
static Val* maxExpr(Val* lhs, Val* rhs);
static Val* minExpr(Val* lhs, Val* rhs);
// Ternary operations
static Val* whereExpr(Val* pred, Val* lhs, Val* rhs);
// Swizzle operations
static Val* swizzle2DIntExpr(
Val* x,
Val* y,
Val* extent_x,
Val* extent_y,
Swizzle2DType swizzle_type);
static Val* pairSelectExpr(Val* in, kir::PairSelect::Selection sel);
private:
static Val* newResult(DataType dtype);
static Val* newArithmeticExpr(BinaryOpType op_type, Val* lhs, Val* rhs);
static Val* newLogicExpr(BinaryOpType op_type, Val* lhs, Val* rhs);
};
//! A wrapper builder with static expression simplification
//!
//! Example:
//! - addExpr(new Int(1), new Int(2)) -> Int(3)
//! - addExpr(new Int(0), new NamedScalar("foo")) -> NamedScalar("foo")
//!
//! Designed to be used to simplify predicate and index expressions in
//! generated code. Also, the shift validation may fail without
//! this simplification.
class TORCH_CUDA_CU_API SimplifyingIrBuilder : public IrBuilder {
public:
static Val* negExpr(Val* val);
static Val* notExpr(Val* val);
static Val* addExpr(Int* lhs, Int::ScalarType rhs);
static Val* addExpr(Val* lhs, Int::ScalarType rhs);
static Val* addExpr(Int* lhs, Int* rhs);
static Val* addExpr(Val* lhs, Val* rhs);
static Val* subExpr(Val* lhs, Val* rhs);
static Val* mulExpr(Int* lhs, Int::ScalarType rhs);
static Val* mulExpr(Val* lhs, Int::ScalarType rhs);
static Val* mulExpr(Int* lhs, Int* rhs);
static Val* mulExpr(Val* lhs, Val* rhs);
static Val* andExpr(Val* lhs, Val* rhs);
static Val* maxExpr(Val* lhs, Val* rhs);
static Val* minExpr(Val* lhs, Val* rhs);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 4,495
| 30.886525
| 78
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_cloner.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <ir_builder.h>
#include <unordered_map>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class IrContainer;
//! Clones nodes from an exiting Fusion
//!
//! \warning IrCloner machinery is a specialized helper for implementing
//! Fusion copy operations and the and limited scope of RecomputeTv below.
//! It is not intended for any other uses.
//!
class TORCH_CUDA_CU_API IrCloner : private OptInConstDispatch {
friend class Statement;
friend class IrBuilder;
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit IrCloner(IrContainer* container);
Statement* clone(const Statement* statement);
template <class T>
T* clone(const T* node) {
return node ? clone(node->template as<Statement>())->template as<T>()
: nullptr;
}
template <class T>
std::vector<T*> clone(const std::vector<T*>& container) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<T*> copy;
copy.reserve(container.size());
for (auto p : container) {
copy.push_back(clone(p));
}
return copy;
}
IrContainer* container() const {
return ir_container_;
}
protected:
void registerClone(const Statement* src, Statement* clone);
void handle(const Statement*) override;
void handle(const Val*) override;
void handle(const Expr*) override;
void handle(const TensorDomain*) override;
void handle(const TensorView*) override;
void handle(const IterDomain*) override;
void handle(const Bool*) override;
void handle(const Double*) override;
void handle(const Int*) override;
void handle(const ComplexDouble*) override;
void handle(const NamedScalar*) override;
void handle(const FullOp*) override;
void handle(const ARangeOp*) override;
void handle(const EyeOp*) override;
void handle(const UnaryOp*) override;
void handle(const BinaryOp*) override;
void handle(const TernaryOp*) override;
void handle(const RNGOp*) override;
void handle(const BroadcastOp*) override;
void handle(const ReductionOp*) override;
void handle(const GroupedReductionOp*) override;
void handle(const WelfordOp*) override;
void handle(const LoadStoreOp*) override;
void handle(const MmaOp*) override;
void handle(const TransposeOp*) override;
void handle(const ExpandOp*) override;
void handle(const ShiftOp*) override;
void handle(const GatherOp*) override;
void handle(const ViewAsScalar*) override;
void handle(const ViewOp*) override;
void handle(const Split*) override;
void handle(const Merge*) override;
void handle(const Swizzle2D*) override;
protected:
// We keep track of the original -> clone map so we don't
// duplicate clones of the same object if referenced multiple times
std::unordered_map<const Statement*, Statement*> clones_map_;
private:
// The destination Fusion container
IrContainer* ir_container_ = nullptr;
// The dispatch interface doesn't allow returning values from
// individual `handle()` methods, so they are storing the
// result here
Statement* clone_ = nullptr;
// Builder to make all the new nodes
IrBuilder builder_;
};
// Replicates all expressions used to generate the provided TensorView. Does not
// replicate inputs. Does not replicate scalar values. In other words the value
// provided will be recomputed from the inputs of the fusion.
class RecomputeTv : private IrCloner {
public:
// Replicates expressions and values in provided expressions.
static TensorView* recompute(TensorView* tv);
private:
RecomputeTv(Fusion* fusion, std::vector<Expr*> exprs);
void handle(const TensorDomain*) final;
Fusion* fusion_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 3,832
| 27.819549
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_container.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_base_nodes.h>
#include <utils.h>
#include <deque>
#include <unordered_map>
#include <unordered_set>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class IrBuilderPasskey;
class ExprPasskey;
class OptOutMutator;
class Int;
class Bool;
class NamedScalar;
// Passkey for container to register names with statements
class IrContainerPasskey {
friend class IrContainer;
private:
explicit IrContainerPasskey() {}
};
class TORCH_CUDA_CU_API IrContainer : public PolymorphicBase {
public:
IrContainer();
IrContainer(const IrContainer& other);
IrContainer(IrContainer&& other) noexcept;
IrContainer& operator=(const IrContainer& other);
IrContainer& operator=(IrContainer&& other) noexcept;
virtual ~IrContainer();
bool inContainer(const Statement* stmt) const;
void assertInContainer(const Statement* stmt, const std::string& msg) const {
TORCH_CHECK(
inContainer(stmt), msg, " it was not found in the active container.");
}
//! Return in insertion order
const std::deque<Val*> deterministic_vals() const noexcept {
std::deque<Val*> vals_deque;
std::transform(
vals_up_.begin(),
vals_up_.end(),
std::back_inserter(vals_deque),
[](const std::unique_ptr<Val>& val_up) { return val_up.get(); });
return vals_deque;
}
//! Register the Statement with this container
virtual void registerStmt(IrBuilderPasskey, Statement* stmt);
//! Register the Val with this container
virtual void registerVal(IrBuilderPasskey, Val* val);
//! Register expr with this container.
virtual void registerExpr(IrBuilderPasskey, Expr* expr);
//! Allow expr's to register themselves with a container, this is only used
//! for broadcastOp so it can register itself in its constructor so root maps
//! can be built.
virtual void registerExpr(ExprPasskey, Expr* expr);
//! Return the set of Exprs registered with this fusion. Warning: This will
//! return exprs outside inputs/outputs, so can be unsafe for use with
//! segmented fusions.
const std::unordered_set<Expr*>& unordered_exprs() const noexcept {
return exprs_;
}
//! Return the set of Vals registered with this fusion
const std::unordered_set<Val*>& vals() const noexcept {
return vals_;
}
// Shortcuts for frequently used vals
Int* zeroVal();
Int* oneVal();
Bool* falseVal();
Bool* trueVal();
NamedScalar* magicZeroVal();
protected:
static IrCloner copy(const IrContainer* from, IrContainer* to);
friend void swap(IrContainer& a, IrContainer& b) noexcept;
// Let mutator remove Exprs.
friend OptOutMutator;
virtual void removeExpr(Expr* expr);
//! Completely remove val from the fusion, break all dependencies associated
//! with it
virtual void removeVal(Val* val);
//! Register the Val with this container
virtual void registerVal(Val* val);
//! Register expr with this container.
virtual void registerExpr(Expr* expr);
StmtNameType getValName(ValType vtype) {
if (val_type_name_map_.find(vtype) == val_type_name_map_.end()) {
val_type_name_map_[vtype] = 0;
}
return val_type_name_map_[vtype]++;
}
StmtNameType getExprName() {
return expr_name_counter_++;
}
void clear() noexcept;
// Deque of unique pointer is the memory owning data structure
std::deque<std::unique_ptr<Val>> vals_up_;
// A convenient set to return when we just need an unordered set to do
// something like check if a Val is in this container
std::unordered_set<Val*> vals_;
// Deque of unique pointer is the memory owning data structure
std::deque<std::unique_ptr<Expr>> exprs_up_;
// A convenient set to return when we just need an unordered set to do
// something like check if an Expr is in this container
std::unordered_set<Expr*> exprs_;
// Used to implement a generic "inContainer" that can be passed an invalid
// pointer. Specifically a pointer to a Statement owned by another container
// that has been freed. We can't check normally with the unordered_sets we
// already have because it would require a const_cast from a constant
// expr/val, or a dynamic cast from a Statement.
std::unordered_set<void*> raw_ptrs_;
// Values names counters
std::unordered_map<ValType, StmtNameType, TypeHash> val_type_name_map_;
// Expression names counter
StmtNameType expr_name_counter_ = 0;
// Manually store some persistent, frequently used nodes. It's very
// challenging to do this anything but manually as detecting when a container
// may or may not have one of these vals is tricky. Specifically because if
// the container doesn't own it, it's hard to understand from the outside if
// the node may have been removed then re-registered. It could also be tricky
// to know when we're using a different container as in FusionCopy_test
// demonstrates deleting then creating containers can result in the same
// pointer for the container.
std::unique_ptr<Bool> true_val_;
std::unique_ptr<Bool> false_val_;
std::unique_ptr<Int> one_val_;
std::unique_ptr<Int> zero_val_;
std::unique_ptr<NamedScalar> magic_zero_val_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 5,280
| 29.177143
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_graphviz.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Generates a DOT (https://www.graphviz.org) graph
// representation of a fuser IR
//
// Usage:
// 1) Add calls to IrGraphGenerator::print(), for example:
// `IrGraphGenerator::print(&fusion, "ir.dot")`
//
// 2) Call IrGraphGenerator::print() from a debugger. Using gdb for example:
// `call IrGraphGenerator::print(&fusion, "ir.dot",
// IrGraphGenerator::DetailLevel::Explicit)`
//
// Notes:
// - When called from the debugger, the detail_level must be
// explicitly passed in (most debuggers don't support default arguments)
//
// - The output dot file path can't include shell specific notations,
// for example you can't use "~/temp/ir.dot" ("/home/user/temp/ir.dot"
// must be used instead)
//
class TORCH_CUDA_CU_API IrGraphGenerator : private OptInConstDispatch {
public:
enum class DetailLevel {
ComputeOnly, // Only dataflow (compute) nodes
Basic, // Compute + schedule, with minimal details (default)
Explicit, // Additional details (ex. symbolic names for scalar constants)
Verbose, // Includes all values and dead definitions
};
using ExprColorMap = std::unordered_map<const Expr*, size_t>;
public:
static void print(
const Fusion* fusion,
const char* filename,
DetailLevel detail_level = DetailLevel::Basic,
ExprColorMap* expr_color_map = nullptr);
static std::string toGraphviz(
const Fusion* fusion,
DetailLevel detail_level,
ExprColorMap* expr_color_map = nullptr);
private:
IrGraphGenerator(
const Fusion* fusion,
DetailLevel detail_level,
ExprColorMap* expr_color_map = nullptr);
~IrGraphGenerator() override = default;
std::string generate();
void generateComputeGraph();
void generateScheduleGraph();
void handle(const Statement*) override;
void handle(const Val*) override;
void handle(const Expr*) override;
void handle(const TensorDomain*) override;
void handle(const TensorView*) override;
void handle(const IterDomain*) override;
void handle(const Bool*) override;
void handle(const Double*) override;
void handle(const Int*) override;
void handle(const ComplexDouble*) override;
void handle(const NamedScalar*) override;
void handle(const FullOp*) override;
void handle(const ARangeOp*) override;
void handle(const EyeOp*) override;
void handle(const UnaryOp*) override;
void handle(const BinaryOp*) override;
void handle(const TernaryOp*) override;
void handle(const RNGOp*) override;
void handle(const BroadcastOp*) override;
void handle(const ReductionOp*) override;
void handle(const Split*) override;
void handle(const Merge*) override;
// lookup the graph id, creating one if not found
std::string getid(const Statement* stm);
bool visited(const Statement* s) const {
return visited_.find(s) != visited_.end();
}
void addArc(
const Statement* src,
const Statement* dst,
const std::string& style = "");
void printExpr(const Expr* expr, const std::string& label);
void printValue(const Val* val, const std::string& label);
private:
const DetailLevel detail_level_;
const Fusion* const fusion_;
std::stringstream graph_def_;
std::unordered_map<const Statement*, std::string> id_map_;
std::unordered_set<const Statement*> visited_;
std::unordered_set<const Val*> inputs_;
std::unordered_set<const Val*> outputs_;
std::vector<const TensorView*> tensor_views_;
std::vector<std::string> arcs_;
int next_id_ = 1;
ExprColorMap* expr_color_map_ = nullptr;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 3,844
| 28.351145
| 77
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_interface_nodes.h
|
#pragma once
#include <c10/macros/Export.h>
#include <fusion.h>
#include <ir_base_nodes.h>
#include <ir_internal_nodes.h>
#include <mma_type.h>
#include <torch/csrc/jit/ir/ir.h>
//! Nodes in here are intended to be "user facing" users in this sense being
//! those that want to be able to generate CUDA code.
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class WelfordResult;
class ViewTransform;
class IrCloner;
class IrBuilderPasskey;
//! A Bool value
//!
//! This value can be a symbolic value (defined after the kernel
//! is compiled) or a constant value (inlined into the kernel definition).
//!
class TORCH_CUDA_CU_API Bool : public Val {
public:
Bool(IrBuilderPasskey passkey);
explicit Bool(IrBuilderPasskey passkey, bool value);
explicit Bool(IrBuilderPasskey passkey, c10::optional<bool> value);
Bool(const Bool* src, IrCloner* ir_cloner);
bool isSymbolic() const {
return !(maybe_value_.has_value());
}
bool isConst() const final {
return maybe_value_.has_value();
}
c10::optional<bool> value() const {
return maybe_value_;
}
bool sameAs(const Statement* other) const override;
private:
const c10::optional<bool> maybe_value_;
};
//! A Float64 value. This value can be a symbolic value (defined after the
//! kernel is compiled) or a constant value (inlined into the kernel
//! definition).
class TORCH_CUDA_CU_API Double : public Val {
public:
using ScalarType = double;
Double(IrBuilderPasskey passkey);
explicit Double(IrBuilderPasskey passkey, ScalarType value);
explicit Double(IrBuilderPasskey passkey, c10::optional<ScalarType> value);
Double(const Double* src, IrCloner* ir_cloner);
bool isSymbolic() const {
return !(maybe_value_.has_value());
}
bool isConst() const final {
return maybe_value_.has_value();
}
c10::optional<ScalarType> value() const {
return maybe_value_;
}
bool sameAs(const Statement* other) const override;
private:
const c10::optional<ScalarType> maybe_value_;
};
//! An Int64 value. If used for indexing it's set as size_t. Otherwise it's an
//! inlined literal in the kernel.
class TORCH_CUDA_CU_API Int : public Val {
public:
using ScalarType = int64_t;
Int(IrBuilderPasskey passkey);
explicit Int(IrBuilderPasskey passkey, ScalarType value);
explicit Int(IrBuilderPasskey passkey, c10::optional<ScalarType> value);
Int(const Int* src, IrCloner* ir_cloner);
bool isSymbolic() const {
return !(maybe_value_.has_value());
}
bool isConst() const final {
return maybe_value_.has_value();
}
c10::optional<ScalarType> value() const {
return maybe_value_;
}
bool sameAs(const Statement* other) const override;
private:
const c10::optional<ScalarType> maybe_value_;
};
//! An c10::complex<double> value. This value can be a symbolic value (defined
//! after the kernel is compiled) or a constant value (inlined into the kernel
//! definition).
class TORCH_CUDA_CU_API ComplexDouble : public Val {
public:
using ScalarType = c10::complex<double>;
ComplexDouble(IrBuilderPasskey passkey);
explicit ComplexDouble(IrBuilderPasskey passkey, ScalarType value);
explicit ComplexDouble(
IrBuilderPasskey passkey,
c10::optional<ScalarType> value);
ComplexDouble(const ComplexDouble* src, IrCloner* ir_cloner);
bool isSymbolic() const {
return !(maybe_value_.has_value());
}
bool isConst() const final {
return maybe_value_.has_value();
}
c10::optional<ScalarType> value() const {
return maybe_value_;
}
bool sameAs(const Statement* other) const override;
private:
const c10::optional<ScalarType> maybe_value_;
};
//! Mode during propagation of computeAt, standard will throw an error if
//! computeAt position provided can't be satisfied, best effort will lower the
//! computeAt position as needed during traversal, most inlined will increase
//! the compute at position to maximum possible through traversal.
enum class ComputeAtMode { Standard, BestEffort, MostInlined };
class TransformPropagator;
struct MostInlinedTransformPropagator;
class TransformIter;
class TransformReplay;
class OptOutMutator;
class TensorDomain;
class MaxPosCalculator;
namespace ir_utils {
class TVDomainGuard;
}
//! TensorView is our primitive Tensor Type used in code generation. It can be
//! thought of as representing physical memory, however, its dimensionality is
//! modifed as split/merge/computeAt functions are called. The history of
//! these transformations are kept and used for generating actual code
//! referncing physical memory. Generally when users are thinking of code
//! generation in reference to a Tensor, this is the class they should be
//! interacting with.
//!
//! The reason we need both TensorView and TensorDomain is that we need to have
//! a record of both what is being computed and how it is being computed. For
//! example we may have the operation:
//!
//! TV3[I, J, K] = TV2[I, J, K] + TV1[I, J, K]
//!
//! The mathematical operations here are on the tensor views TV1, TV2, and
//! TV3. This operation is a pointwise operation. To compute this pointwise
//! operation we iterate over the 3D TensorDomain [I, J, K], where K is the
//! fastest changing dimension.
//!
//! \todo Need to work on the const model for TensorView, making all functions
//! that should be const, const. Gave this a try but expanded really quickly.
//! getComputeAtAxis not being const because it can return a TV that some expect
//! to be non-const is the biggest headache.
//!
class TORCH_CUDA_CU_API TensorView : public Val {
public:
TensorView(
IrBuilderPasskey passkey,
TensorDomain* domain,
DataType dtype,
MemoryType mtype = MemoryType::Local);
explicit TensorView(
IrBuilderPasskey passkey,
const std::shared_ptr<c10::TensorType>& tensor_type);
explicit TensorView(
IrBuilderPasskey passkey,
const std::shared_ptr<Value>& jit_value);
TensorView(const TensorView* src, IrCloner* ir_cloner);
TensorDomain* domain() const {
return domain_;
}
//! This is for a TensorView with an rFactor domain that is an input to a
//! fusion segment. We convert the rfactor domain into a new root domain.
//! Any dynamic-sized rfactor iterDomains are given a new symbolic extent.
//! Concrete integer extents are kept. Output TensorViews of any subsequent
//! expressions that use this TensorView are also updated.
void convertRfactorToRootDomain();
void setContiguity(const std::vector<bool>& contig) {
domain()->setContiguity(contig);
}
void setContiguity(bool contig) {
setContiguity(std::vector<bool>(domain()->contiguity().size(), contig));
}
bool hasReduction() const;
bool hasBlockReduction() const;
bool hasGridReduction() const;
bool hasBroadcast() const;
bool hasRFactor() const;
//! This is the previous hasReduction logic,
//! kept here exclusively for lower loop pass will
//! deprecate when Fusion IR pass can convert
//! trivial reductions
bool hasAnyReduction() const;
//! Returns true if this tensor is zero dimensional,
//! i.e. a wrapped scalar or an empty placeholder.
bool isZeroDim() const {
return nDims() == 0;
}
//! Returns true if this tensor does not contain
//! any value.
bool isEmptyTensor() const;
c10::optional<unsigned int> getReductionAxis() const;
const std::vector<IterDomain*>& getRootDomain() const;
const std::vector<IterDomain*>& getRFactorDomain() const;
// If rfactor domain exists in domain() return it, otherwise return root
// domain.
const std::vector<IterDomain*>& getMaybeRFactorDomain() const;
IterDomain* axis(int pos) const;
// Does it share outer axes with other tensors?
bool hasComputeAt() const {
return compute_at_pos_ > 0;
}
bool hasMaxProducerPosition() const {
return max_producer_pos_ > 0;
}
size_t nDims() const;
// sets cpu_scalar_ value, which is special handling for CPU based zero-dim
// tensors (i.e. CPU Tensors that only have one value). This is only used if
// on an input value, otherwise ignored. This is important as special handling
// because these "scalars" should be type promoted as a tensor, but we want to
// avoid explicit copying of the data, so we want to pass the data value as a
// standard kernel argument value.
void setCpuScalar(bool is_cpu_scalar);
// returns cpu_scalar_ value, which is special handling for CPU based zero-dim
// tensors (i.e. CPU Tensors that only have one value). This is only used if
// on an input value, otherwise ignored. This is important as special handling
// because these "scalars" should be type promoted as a tensor, but we want to
// avoid explicit copying of the data, so we want to pass the data value as a
// standard kernel argument value.
bool isCpuScalar() const {
return cpu_scalar_;
}
// Returns the position that this tensor is produced at relative to its axes.
unsigned int getComputeAtPosition() const {
return compute_at_pos_;
}
// Returns the maximum position of producers are being computed at relative to
// this tensor. This position dictates the clear expectations of producers.
unsigned int getMaxProducerPosition() const {
return max_producer_pos_;
}
//! This is used when we disconnect a tensorview from a reduction
//! operation and connect it to a non-reduction operator. We need
//! to remove the reduction ids on the tv in this case.
//! Currently only used in translate welford, and this function may
//! be refactored or extended if any more use cases appear.
void clearReductionIterDomains();
//! Compute this TensorView relative to a consumer position, -1 will
//! compute tensors inline with each other, 0 doesn't share
//! any loop nests between the tensors. It's an error when the given
//! position is not legally viable. Alternatively, when the mode
//! parameter is ComputeAtMode::BestEffort, the position is lowered
//! one by one until a valid position is found. When
//! ComputeAtMode::MostInlined is given, the position parameter is
//! ignored, and the deepest possible position is searched.
TensorView* computeAt(
TensorView* consumer,
int position,
ComputeAtMode mode = ComputeAtMode::Standard);
//! Compute this tensor to consumer, at local position, -1 will compute
//! tensors inline with eachother, 0 doesn't share any loop nests between the
//! tensors. The mode parameter can be used in the same manner as computeAt.
TensorView* computeWith(
TensorView* consumer,
int position,
ComputeAtMode mode = ComputeAtMode::Standard);
// Split "axis" into 2 axes
//! inner_split dictates if the factor section of the split should be inside
//! the
//! remainer or outside.
//! e.g. split(0, 4, inner_split = true) will result in:
//! tv[id{extent}] -> tv[id{ceilDiv(extent, factor)}, id{factor}]
//! e.g. split(0, 4, inner_split = false) will result in:
//! tv[id{extent}] -> tv[id{factor}, id{ceilDiv(extent, factor)}]
//!
//! When trim_out_of_bounds is true, only the inner domain defined by the
//! start and stop positions is split.
TensorView* split(
int axis,
unsigned int factor,
bool inner_split = true,
bool trim_out_of_bounds = false);
// Split "axis" into 2 axes where the inner axes is size of "factor"
// and outer axis is size axis.size() / factor. Factor can be a symbolic
// value instead of constant. This requires setting the symbolic value as an
// input, or using a parallel dim from NamedScalar::getParallelDim
TensorView* split(
int axis,
Val* factor,
bool inner_split = true,
bool trim_out_of_bounds = false);
// Merge axis_o and axis_i into 1 IterDomain
TensorView* merge(int axis_o, int axis_i);
// Merge axis and axis+1 into 1 IterDomain
TensorView* merge(int axis) {
return merge(axis, axis + 1);
}
// Reorder axes according to old2new[old_pos] = new_pos
TensorView* reorder(const std::unordered_map<int, int>& old2new);
//! Swizzle indices to improve memory access efficiency.
//!
//! Swizzle::Transpose is a pattern commonly used to avoid bank
//! conflicts in shared memory. It takes two axes and shifts the
//! second axis by the first axis as ((axis1 + axis2) % extent). The
//! memory type must be Shared.
//!
//! \input type Swizzle pattern such as transpose.
//! \input axes Axes to swizzle
TensorView* swizzle(SwizzleType type, const std::vector<int>& axes);
//! Swizzle the rectangular tile defined by the iterdomains corresponding
//! to the 2 given indices.
TensorView* swizzle(
Swizzle2DType swizzle_type,
int x,
int y,
SwizzleMode swizzle_mode = SwizzleMode::Data);
// WARNING: rFactor does not return this TensorView, ir returns a new
// tensorview consumed by this!
//
// Take reduction axes out of this domain, and create a new
// domain. New domain will be used to create this domain.
//
// For example:
// TV1[I0, R1, R2, I3] = TV0[I0, I1, I2, I3]
//
// After:
// TV1->rfactor({1}), TV1 is transformed to -> TV1[I0, R2, I3]
//
// The TensorView returned is: TV2[I0, R1, I2, I3]
//
// The reduction will now beset as:
// TV2[I0, R1, I2, I3] = TV0[I0, I1, I2, I3]
// TV1[I0, R2, I3] = TV2[I0, R1, I2, I3]
//
TensorView* rFactor(const std::vector<int>& axes);
//! Multi-output version of rFactor, semantically similar with
//! the reduction version except that the rfactor is done
//! for all outputs in a consistent way
std::vector<TensorView*> rFactor(
const std::vector<int>& axes,
const std::vector<TensorView*>& tvs);
//! Create a TensorView before the original tensor. A common use case is to
//! write results into shared memory or registers before moving to global
//! memory. Analogous to TVM Cache_Write
//!
//! @param cache_op: memory operator to use for the inserted op between
//! the the data tensor and the cache tensor
TensorView* cacheBefore(
c10::optional<LoadStoreOpType> cache_op = c10::nullopt);
//! Create a TensorView after the original tensor. A common use case is to
//! read tensor into shared memory or registers. Analogous to TVM Cache_Read
//!
//! @param cache_op: memory operator to use for the inserted op between
//! the the data tensor and the cache tensor
TensorView* cacheAfter(
c10::optional<LoadStoreOpType> cache_op = c10::nullopt);
// For a fusion output with other uses, we want to avoid writing to global
// memory and then reading the output again. We write to global memory
// separately after an operation. We replace this fusion output with the
// direct write TensorView.
TensorView* cacheFork();
MemoryType getMemoryType() const {
return memory_type_;
}
void setMemoryType(MemoryType mt);
SwizzleType swizzleType() const {
return swizzle_type_;
}
const std::vector<IterDomain*>& axesToSwizzle() const {
return axes_to_swizzle_;
}
// Apply double buffering transformation
void doubleBuffer();
// Apply circular buffering transformation
void circularBuffer(unsigned int number_of_stage);
// Returns true if this tensor is double buffered.
bool isDoubleBuffered() const {
return is_double_buffered_;
}
// Returns true if this tensor is circular buffered.
bool isCircularBuffered() const {
return is_circular_buffered_;
}
// Returns the depth of circular buffering if applicable.
unsigned int circularBufferDepth() const {
TORCH_INTERNAL_ASSERT(
is_circular_buffered_, toString(), "not circular buffered");
return circular_buffer_stage_;
}
//! Transforms the innermost iterdomains according to the given mma swizzle,
//! this should be used on the tvs that are either inputs/outputs of an
//! MmaOp, or any tv's that are involved in prolog/epilog fusions and need to
//! have a matching thread swizzle with the mma operand/result.
//! More detail on usage see [WarpMmaSwizzler] in scheduler/mma_utils.h .
void applyMmaSwizzle(MmaOptions options);
//! Returns if this tensor view has swizzle operator on its tensor domain.
//! This is the temporary flag for indicating that the new swizzle
//! implementation is used and will be removed in follow ups.
bool hasSwizzleOp() const {
return has_swizzle_op_;
}
friend TORCH_CUDA_CU_API TransformPropagator;
friend TORCH_CUDA_CU_API MostInlinedTransformPropagator;
friend TORCH_CUDA_CU_API TransformReplay;
friend TORCH_CUDA_CU_API OptOutMutator;
friend class InlineBatchingGuard;
friend class ir_utils::TVDomainGuard;
// Inline the computation of this tensor into its consumer at the given
// position. If this tensor is already inlined in a higher position, then this
// call is a no-op. If the right most dimensions before `pos` are
// broadcasting, then will not inline into these broadcastings. If
// best_effort, then will inline into the highest allowed position that is <=
// `pos`.
void inlineAt(
int64_t pos,
bool best_effort = false,
MaxPosCalculator* calc = nullptr);
// Update the max producer position of the current tensor. This is required
// when we modify producer-consumer relationship of a scheduled tensor, for
// example, grouping multiple reductions.
void updateMaxProducerPosition();
protected:
void setDomain(TensorDomain* td) {
domain_ = td;
}
private:
int normalizeAxisPos(int pos) const {
if (pos < 0) {
pos += nDims();
}
return pos;
}
//! A helper function to maintain the consistency of schedules of
//! multiple outputs wheen doing rfactor on multi-output reduction ops.
TensorView* multiOutputRfactorHelper(
TensorView* tv,
const std::vector<int>& axes);
private:
TensorDomain* domain_ = nullptr;
unsigned int compute_at_pos_ = 0;
unsigned int max_producer_pos_ = 0;
MemoryType memory_type_ = MemoryType::Local;
SwizzleType swizzle_type_ = SwizzleType::NoSwizzle;
std::vector<IterDomain*> axes_to_swizzle_;
bool is_double_buffered_ = false;
//! Indicates if the tensor is circular buffered.
bool is_circular_buffered_ = false;
//! Indicates the circular buffering stage depth if applicable.
unsigned int circular_buffer_stage_ = 0;
// special handling for CPU based zero-dim tensors (i.e. CPU Tensors that only
// have one value). This is only used if on an input value, otherwise ignored.
// This is important as special handling because these "scalars" should be
// type promoted as a tensor, but we want to avoid explicit copying of the
// data, so we want to pass the data value as a standard kernel argument
// value.
bool cpu_scalar_ = false;
//! Indicates if this tensor view has swizzle operator on its tensor domain.
//! This is the temporary flag for indicating that the new swizzle
//! implementation is used and will be removed in follow ups.
bool has_swizzle_op_ = false;
};
//! A simple TensorView builder
//!
//! Example usage:
//!
//! auto tv = TensorViewBuilder()
//! .ndims(ndims)
//! .dtype(dtype)
//! .contiguity(contiguity)
//! .build();
//!
class TORCH_CUDA_CU_API TensorViewBuilder {
public:
//! Set the number of dimensions of the tensor (default 0, meaning scalar)
TensorViewBuilder& ndims(size_t ndims);
//! Set the data type of the tensor (default DataType::Float)
TensorViewBuilder& dtype(DataType dtype);
//! Set the contiguity information (default non-contiguous)
TensorViewBuilder& contiguity(std::vector<bool> contiguity);
//! Set the shape (default 0 dimensional, ie. scalar)
TensorViewBuilder& shape(std::vector<Val*> shape);
TensorViewBuilder& shape(const std::vector<int64_t>& shape);
//! Creates a new TensorView with the specified options
TensorView* build() const;
private:
size_t ndims_ = 0;
DataType dtype_ = DataType::Float;
std::vector<bool> contiguity_;
std::vector<Val*> shape_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 20,123
| 32.484193
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_iostream.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <c10/util/irange.h>
#include <iostream>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class Fusion;
namespace kir {
class Kernel;
class Scope;
} // namespace kir
//! Define pretty printing functions for IR nodes
//!
//! This class is intended for debug printing, so it attempts
//! to handle invalid states as well.
//!
class TORCH_CUDA_CU_API IrPrinter : public OptInConstDispatch {
static constexpr char const* kTab = " ";
public:
explicit IrPrinter(std::ostream& os) : os_(os) {}
// Indent the generated code
std::ostream& indent() {
for (const auto i : c10::irange(indent_size_)) {
(void)i; // Suppress unused variable warning
os_ << " ";
}
return os_;
}
void resetIndent() {
indent_size_ = 0;
}
bool printInline() const {
return print_inline_;
}
using OptInConstDispatch::handle;
virtual void handle(Fusion* f);
// handle calls some non const fusion ops,
// eventhough fusion should remain unchanged.
// Need to look into this.
virtual void handle(const Fusion* f) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
handle(const_cast<Fusion*>(f));
}
virtual void handle(Fusion& f) {
handle(&f);
}
virtual void handle(const kir::Kernel* kernel);
virtual void handle(kir::Kernel& kernel);
void handleScope(const kir::Scope& scope);
void handle(const Statement* s) final;
void handle(const Val* v) final;
void handle(const Expr* e) final;
void handle(const IterDomain*) final;
void handle(const TensorDomain*) final;
void handle(const TensorView*) final;
void handle(const Bool*) final;
void handle(const Double*) final;
void handle(const Int*) final;
void handle(const ComplexDouble*) final;
void handle(const NamedScalar*) final;
void handle(const FullOp*) final;
void handle(const ARangeOp*) final;
void handle(const EyeOp*) final;
void handle(const UnaryOp*) final;
void handle(const BinaryOp*) final;
void handle(const TernaryOp*) final;
void handle(const RNGOp*) final;
void handle(const ReductionOp*) final;
void handle(const GroupedReductionOp*) final;
void handle(const WelfordOp*) final;
void handle(const GroupedWelfordOp*) final;
void handle(const LoadStoreOp*) final;
void handle(const MmaOp*) final;
void handle(const BroadcastOp*) final;
void handle(const TransposeOp*) final;
void handle(const ExpandOp*) final;
void handle(const ShiftOp*) final;
void handle(const GatherOp*) final;
void handle(const ViewAsScalar*) final;
void handle(const ViewOp*) final;
void handle(const kir::Predicate*) final;
void handle(const kir::TensorIndex*) final;
void handle(const kir::IntPair*) final;
void handle(const kir::GridBroadcast*) final;
void handle(const kir::GridReduction*) final;
void handle(const kir::GroupedGridReduction*) final;
void handle(const kir::GridWelford*) final;
void handle(const kir::GroupedGridWelford*) final;
void handle(const kir::ForLoop*) final;
void handle(const kir::IfThenElse*) final;
void handle(const kir::Allocate*) final;
void handle(const kir::BlockSync*) final;
void handle(const kir::GridSync*) final;
void handle(const kir::CpAsyncWait*) final;
void handle(const kir::CpAsyncCommit*) final;
void handle(const kir::InitMagicZero*) final;
void handle(const kir::UpdateMagicZero*) final;
void handle(const kir::AllocateFusedReduction*) final;
void handle(const kir::Swizzle2DInt*) final;
void handle(const kir::PairSelect*) final;
// IR math printer overrides these to prevent them from printing, keep
// override
void handle(const Split*) override;
void handle(const Merge*) override;
void handle(const Swizzle2D*) override;
void print_inline(const Statement* stmt) {
bool prev = print_inline_;
print_inline_ = true;
handle(stmt);
print_inline_ = prev;
}
protected:
std::ostream& os() {
return os_;
}
private:
std::ostream& os_;
bool print_inline_ = false;
int indent_size_ = 0;
};
TORCH_CUDA_CU_API std::ostream& operator<<(
std::ostream& os,
const Statement* stmt);
TORCH_CUDA_CU_API std::ostream& operator<<(std::ostream& os, Fusion* f);
TORCH_CUDA_CU_API std::ostream& operator<<(std::ostream& os, Fusion& f);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 4,421
| 26.128834
| 72
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_printer.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_iostream.h>
#include <iter_visitor.h>
#include <iostream>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Prints computation Fusion IR nodes
//!
//! IrMathPrinter and IrTransformPrinter allow the splitting up of fusion print
//! functions. IrMathPrinter as its name implies focuses solely on what tensor
//! computations are taking place. Resulting TensorView math will reflect the
//! series of split/merge/computeAts that have taken place, however these
//! nodes will not be displayed in what is printed. IrTransformPrinter does not
//! print any mathematical functions and only lists the series of
//! split/merge calls that were made. Both of these printing methods are
//! quite verbose on purpose as to show accurately what is represented in the IR
//! of a fusion.
//
//! \sa IrTransformPrinter
//!
class TORCH_CUDA_CU_API IrMathPrinter : public IrPrinter {
public:
IrMathPrinter(std::ostream& os) : IrPrinter(os) {}
void handle(const Split* const) override {}
void handle(const Merge* const) override {}
void handle(const Swizzle2D* const) override {}
void handle(Fusion* f) override {
IrPrinter::handle(f);
}
};
//! Prints transformation (schedule) Fusion IR nodes
//!
//! \sa IrMathPrinter
//!
class TORCH_CUDA_CU_API IrTransformPrinter : public IrPrinter {
public:
IrTransformPrinter(std::ostream& os) : IrPrinter(os) {}
void handle(Fusion* f) override;
private:
void printTransforms(TensorView* tv);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,615
| 25.933333
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/ir_utils.h
|
#pragma once
#include <ir_all_nodes.h>
#include <type.h>
#include <iterator>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace ir_utils {
// Replace values in fusion using ValReplacementMutator
void replaceValue(
Fusion*,
const std::unordered_map<Val*, Val*>& replacement_map);
template <typename FilterType, typename Iterator>
class FilterIterator {
public:
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = FilterType*;
using pointer = value_type*;
using reference = value_type&;
FilterIterator(Iterator begin, Iterator end) : current_(begin), end_(end) {
advance();
}
FilterType* operator*() const {
return (*current_)->template as<FilterType>();
}
FilterType* operator->() const {
return (*this);
}
FilterIterator& operator++() {
++current_;
advance();
return *this;
}
FilterIterator operator++(int) {
const auto before_increment = *this;
++current_;
advance();
return before_increment;
}
bool operator==(const FilterIterator& other) const {
TORCH_INTERNAL_ASSERT(
end_ == other.end_,
"Comparing two FilteredViews that originate from different containers");
return current_ == other.current_;
}
bool operator!=(const FilterIterator& other) const {
return !(*this == other);
}
private:
void advance() {
current_ = std::find_if(current_, end_, [](const auto& val) {
return dynamic_cast<const FilterType*>(val) != nullptr;
});
}
private:
Iterator current_;
Iterator end_;
};
// An iterable view to a given container of Val pointers. Only returns
// Vals of a given Val type.
// NOTE: Add a non-const iterator if needed.
template <typename FilterType, typename InputIt>
class FilteredView {
public:
using value_type = FilterType*;
using const_iterator = FilterIterator<FilterType, InputIt>;
FilteredView(InputIt first, InputIt last) : input_it_(first), last_(last) {}
const_iterator cbegin() const {
return const_iterator(input_it_, last_);
}
const_iterator begin() const {
return cbegin();
}
const_iterator cend() const {
return const_iterator(last_, last_);
}
const_iterator end() const {
return cend();
}
bool empty() const {
return begin() == end();
}
std::vector<value_type> vector() const {
return std::vector<value_type>(begin(), end());
}
private:
const InputIt input_it_;
const InputIt last_;
};
template <typename FilterType, typename InputIt>
auto filterByType(InputIt first, InputIt last) {
return FilteredView<FilterType, InputIt>(first, last);
}
template <typename FilterType, typename ContainerType>
auto filterByType(const ContainerType&& inputs) = delete;
template <typename FilterType, typename ContainerType>
auto filterByType(const ContainerType& inputs) {
return filterByType<FilterType>(inputs.cbegin(), inputs.cend());
}
//! Returns a list of new-to-old mappings.
//!
//! This funcion canonicalizes the dimensions and validates that multiple old
//! dimension are mapped to the same new dimension.
std::vector<int64_t> normalizeNew2Old(
const std::vector<int64_t>& new2old_in,
size_t ndims);
//! Returns a list of new-to-old mappings.
//!
//! The input map does not need to be complete. Missing axes are
//! assumed not to be affected.
//!
//! This is used to preprocess broadcast and transpose arguments.
//!
//! Example: (N := ndims)
//! {{0, 1}} -> [1, 0, ...., N-1]
//! Transposes the first two axes with no other change.
//!
//! {{0, -1}} -> [N-1, ...., 0]
//! Swaps the first and last axes.
std::vector<int> normalizeOld2New(
const std::unordered_map<int, int>& old2new_in,
size_t ndims);
// Replace all uses of reference with substitute in expr. Return the Expr.
// Warning: Invalidates provided Expr.
// Warning: Removes connection of reference through provided Expr.
// Warning: Creates new Expr connecting substitue.
// Reference is found through direct pointer comparison.
Expr* replaceValInExpr(Expr* expr, Val* reference, Val* substitute);
//! Replace Vals in an index Val as specified by replacement_map while
//! cloning the given index Val. The index val is assumed to represent
//! a tensor index consisting of Ints and arithmetic expressions.
//!
//! This is similar to replaceValInExpr but is different as Vals are
//! cloned such that no other exprs using the same leaf Vals are not
//! modified. TODO: Consider cleaning up the multiple replacement
//! routines.
Val* replaceValInIndexVal(
Val* index,
const std::unordered_map<Val*, Val*>& replacement_map);
// Makes rfactor generic with reduction ops and Welford
TORCH_CUDA_CU_API TensorView* rfactorHelper(
TensorView* red_tv,
const std::vector<int>& axes);
// Return immediate producers of val, this function can be used on any Val and
// will return producers through Exprs.
//
// Warning: returned val's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses val->definition() or val->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<Val*> producerValsOf(Val* val);
// Return immediate consumers of val, this function can be used on any Val and
// will return consumers through Exprs.
//
// Warning: returned val's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses val->definition() or val->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<Val*> consumerValsOf(Val* val);
// Return immediate siblings of val, this function can be used on any Val and
// will return siblings through Exprs.
//
// Warning: returned val's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses val->definition() or val->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<Val*> siblingValsOf(Val* val);
// Return immediate producers of vals, this function can be used on any vals and
// will return producers through Exprs.
//
// Warning: returned val's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses val->definition() or val->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<Val*> producerValsOf(
const std::vector<Val*>& vals);
// Return immediate consumers of vals, this function can be used on any vals and
// will return consumers through Exprs.
//
// Warning: returned val's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses val->definition() or val->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<Val*> consumerValsOf(
const std::vector<Val*>& vals);
// Return immediate producers of tv, this function will return all immediate
// producers of tv through Exprs.
//
// Warning: returned tv's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses tv->definition() or tv->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<TensorView*> producerTvsOf(TensorView* tv);
// Return immediate consumers of tv, this function will return all immediate
// consumers of tv through Exprs.
//
// Warning: returned tv's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses tv->definition() or tv->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<TensorView*> consumerTvsOf(TensorView* tv);
// Return immediate siblings of tv, this function will return all immediate
// siblings of tv through Exprs.
//
// Warning: returned tv's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses tv->definition() or tv->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<TensorView*> siblingTvsOf(TensorView* tv);
// Return immediate producers of tvs, this function will return all immediate
// producers of tvs through Exprs.
//
// Warning: returned tv's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses tv->definition() or tv->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<TensorView*> producerTvsOf(
const std::vector<TensorView*>& tvs);
// Return immediate consumers of tvs, this function will return all immediate
// consumers of tvs through Exprs.
//
// Warning: returned tv's are not guaranteed to be between fusion inputs and
// outputs. This function simply uses tv->definition() or tv->uses() which is
// limited to not go through fusion inputs/outputs, but if on a path that isn't
// strictly between fusion inputs/outputs, it could effectively return dead
// code.
TORCH_CUDA_CU_API std::vector<TensorView*> consumerTvsOf(
const std::vector<TensorView*>& tvs);
// Returns producers of tv that are inputs of fusion
TORCH_CUDA_CU_API std::vector<TensorView*> inputTvsOf(TensorView* tv);
// Returns consumers of tv that are outputs of fusion
TORCH_CUDA_CU_API std::vector<TensorView*> outputTvsOf(TensorView* tv);
// Returns producers of tvs that are inputs of fusion
TORCH_CUDA_CU_API std::vector<TensorView*> inputTvsOf(
std::vector<TensorView*> tvs);
// Returns consumers of tvs that are outputs of fusion
TORCH_CUDA_CU_API std::vector<TensorView*> outputTvsOf(
std::vector<TensorView*> tvs);
// returns all tensor views in fusion that are used between outputs and inputs.
TORCH_CUDA_CU_API std::vector<TensorView*> allTvs(Fusion* fusion);
// returns all tensor views in fusion that are used between outputs and inputs
// except the specified set.
TORCH_CUDA_CU_API std::vector<TensorView*> allTvsExcept(
Fusion* fusion,
const std::unordered_set<TensorView*>& except);
TORCH_CUDA_CU_API std::vector<Expr*> getReductionOps(
Fusion* fusion,
bool ignore_trivial = true);
// Returns the initialization value of tv or nullptr if not initialized.
TORCH_CUDA_CU_API Val* getReductionInitValOf(TensorView* tv);
// Returns if Expr is a reduction op
TORCH_CUDA_CU_API bool isReductionOp(const Expr*);
// Returns if Expr is a reduction op with TensorView or TensorIndex
TORCH_CUDA_CU_API bool isReductionTvOp(const Expr*);
// Returns all non-trivial view operations. We shouldn't have trivial view
// operations but this function is to simply make sure if we ever do we don't
// pull them in.
TORCH_CUDA_CU_API std::vector<ViewOp*> getViewOps(Fusion*);
template <typename T>
std::string toString(const T& nodes) {
std::stringstream ss;
for (const Statement* stmt : nodes) {
if (ss.tellp() != 0) {
ss << ", ";
}
ss << stmt->toString();
}
return ss.str();
}
} // namespace ir_utils
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 11,988
| 34.055556
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/iter_visitor.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <type.h>
#include <deque>
#include <unordered_set>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class Fusion;
class Statement;
class Expr;
class Val;
/*
* IterVisitor starts from leaf nodes, fusion outputs, or the provided values.
* It walks the DAG bacwkards from the starting nodes, to roots. Each node in
* the dag will be called with handle(Statement*) in topolgical order inputs of
* the fusion to outputs of the fusion.
*
* TODO: We may want a BFS version of this code to extract ILP, not implemented
* yet.
*
* TODO: We may want to have ordering of outputs to inputs. I'm not sure why we
* would want this, but seems like it would be a reasonable request.
*/
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API IterVisitor : public OptOutDispatch {
public:
~IterVisitor() override = default;
IterVisitor() = default;
IterVisitor(const IterVisitor& other) = default;
IterVisitor& operator=(const IterVisitor& other) = default;
IterVisitor(IterVisitor&& other) = default;
IterVisitor& operator=(IterVisitor&& other) = default;
protected:
// Functions return nodes in reverse order to be added to the to_visit queue
// These functions will start at outputs and propagate up through the DAG
// to inputs based on depth first traversal. Next could be called on a node
// multiple times.
virtual std::vector<Statement*> next(Statement* stmt);
virtual std::vector<Statement*> next(Val* v);
virtual std::vector<Statement*> next(Expr* expr);
// This handle functions is called on every Statement* in topological order,
// starting from outputs to inputs.
void handle(Statement* s) override;
// This handle functions is called on every Expr* in topological order,
// starting from outputs to inputs.
void handle(Expr* e) override;
// This handle functions is called on every Val* in topological order,
// starting from outputs to inputs.
void handle(Val* v) override;
// The entire stack during traversal. stmt_stack.back().back() is the node
// that is being called in handle(). stmt_stack.back() contains siblings (not
// guarenteed to be all siblings throughout traversal). stmt_stack.front()
// contains the outputs we started with (not guarenteed to be all outputs
// throughout traversal).
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<std::vector<Statement*>> stmt_stack;
void traverseHelper(Fusion* fusion, bool traverse_all_paths = false);
public:
//! Traverses nodes in Fusion from inputs in topological order to "to". i.e.
//! from inputs towards outputs.
//! \param traverseAllPaths = false only call handle on each Statement* once
//! traverseAllPaths = true traverses all paths between expressions/values.
//! Calls handle on a Statement* for every path from inputs to "to".
//! \param traverseIntoMembers = When hitting nodes like TensorView,
//! TensorDomain, or IterDomain where there are members of the nodes that are
//! Val's a value of "true" will also traverse into those member Val's, a
//! value of "false" will not traverse into the members.
void traverseTo(
Fusion* fusion,
const std::vector<Val*>& to,
bool traverse_all_paths = false,
bool traverse_into_members = false);
//! Traverses nodes in Fusion from inputs in topological order to "to". i.e.
//! from inputs towards outputs.
//! \param traverseAllPaths = false only call handle on each Statement* once
//! traverseAllPaths = true traverses all paths between expressions/values.
//! Calls handle on a Statement* for every path from inputs to "to".
//! \param traverseIntoMembers = When hitting nodes like TensorView,
//! TensorDomain, or IterDomain where there are members of the nodes that are
//! Val's a value of "true" will also traverse into those member Val's, a
//! value of "false" will not traverse into the members.
//! \param from: Specified values to start traversing. If a "from" Val is not
//! on path from inputs to "to" node it will not be visited. If there's a path
//! from inputs to "to" that doesn't go through "from" that input and the path
//! from it will also be traversed.
void traverseBetween(
Fusion* fusion,
const std::unordered_set<Val*>& from,
const std::vector<Val*>& to,
bool traverse_all_paths = false,
bool traverse_into_members = false);
// Iterates from terminating outputs registered with the fusion. Terminating
// means value is not used to generate any other value used in producing
// registered outputs.
void traverse(Fusion* fusion);
// Same as traverse put it traverses every edge, meaning it will traverse
// values more than once.
void traverseAllPaths(Fusion* fusion);
//! Get inputs to vals. Possible input vals can be optionally
//! given. If not, vals with no producers are returned.
//
// TODO: This doesn't seem to fit with IterVisitor. Should probably be moved
// out of the class.
static std::vector<Val*> getInputsTo(
const std::vector<Val*>& vals,
const std::vector<Val*>& inputs = {});
};
/*
* Backward visitor IterVisitor calls handle in reverse order from outputs
* to inputs It would be really nice to unify this with IterVisitor, however,
* the challenge there is that we specify traversal from outputs towards inputs
* because it implicitly provides DCE. However, if users are not careful, they
* could miss necessary outputs to do a backward traversal.
*
* BackwardVisitor checks that all outputs of an Expr is visited before visiting
* the Expr. If we don't provide nodes to start from on all backward paths of
* those outputs we will never visit the Expr.
*
* The first step of BackwardVisitor is to make sure we've specified enough
* outputs to guarentee that we will traverse all outputs of all exprs during
* the backward traversal. In the case where we don't require visiting all
* outputs of some exprs, example being the `N` output of welford ops.
* `must_cover_all_expr_outputs` is added to disable the check, and in
* this case the visitor pass need be aware
* 1. Exprs with any output that has a use chain that ends with a final
* consumer in the `from` list `will be` visited.
* 2. Vals that doesn't have a use chain that ends with a final
* consumer in the `from` list `will not be` visited, even though its
* definition expr might be visited. An example is if the `N` output
* of an welford op is unused, but other outputs are, the welford op
* will be visited but the `N` output will not.
*
*/
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API BackwardVisitor : public OptOutDispatch {
protected:
// NOLINTNEXTLINE(modernize-use-override)
virtual ~BackwardVisitor() = default;
BackwardVisitor(bool must_cover_all_expr_outputs = true)
: must_cover_all_expr_outputs_(must_cover_all_expr_outputs) {}
BackwardVisitor(const BackwardVisitor& other) = default;
BackwardVisitor& operator=(const BackwardVisitor& other) = default;
BackwardVisitor(BackwardVisitor&& other) = default;
BackwardVisitor& operator=(BackwardVisitor&& other) = default;
// Functions return nodes in reverse order to be added to the to_visit queue
// These functions will start at outputs and propagate up through the DAG
// to inputs based on depth first traversal. Next could be called on a node
// multiple times.
virtual std::vector<Statement*> next(Statement* stmt);
virtual std::vector<Statement*> next(Expr* expr);
virtual std::vector<Statement*> next(Val* val);
// This handle functions is called on every Statement* in topological order,
// starting from outputs to inputs.
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
virtual void handle(Statement* stmt) override;
// This handle functions is called on every Expr* in topological order,
// starting from outputs to inputs.
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
virtual void handle(Expr* expr) override;
// This handle functions is called on every Val* in topological order,
// starting from outputs to inputs.
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
virtual void handle(Val* val) override;
// All exprs that need to be visited in this traversal. Labeled in topological
// order (size_t).
std::unordered_map<Expr*, size_t> traversal_exprs_;
// The entire stack during traversal. stmt_stack.back().back() is the node
// that is being called in handle(). stmt_stack.back() contains siblings (not
// guarenteed to be all siblings throughout traversal). stmt_stack.front()
// contains the inputs we started with (not guarenteed to be all outputs
// throughout traversal).
std::deque<std::deque<Statement*>> stmt_stack_;
// Starts at nodes provided in from, traverses from these nodes to inputs.
// Calls handle on all Statement*s in topological sorted order.
// traverseAllPaths = false only call handle on each Statement* once
// traverseAllPaths = true traverses all paths from nodes in from to inputs.
// Handle on a Statement* for every path from "from" nodes, to inputs.
void traverseTo(
Fusion* fusion,
const std::vector<Val*>& from,
bool traverseAllPaths = false);
bool must_cover_all_expr_outputs_ = true;
};
class TORCH_CUDA_CU_API DependencyCheck {
public:
// Returns if "dependency" is a dependency of "of".
static bool isDependencyOf(Val* dependency, Val* of);
// Finds a Val* path from "of" to "dependency". Returns that path.
// deque.back() is "of", deque[0] is dependency if a chain exists.
static std::deque<Val*> getSingleDependencyChain(Val* dependency, Val* of);
// Finds all Val* paths from "of" to "dependency". Returns those paths.
// deque[i].back() is "of", and deque[i][0] is "dependency". Returns an
// empty deque if no dependency found.
static std::deque<std::deque<Val*>> getAllDependencyChains(
Val* dependency,
Val* of);
// Finds all Val* paths from all leaf nodes to "dependency". Returns those
// paths. deque[i].back() are leaf nodes, and deque[i][0] is "dependency".
// Returns an empty deque if there are no uses of dependency found.
static std::deque<std::deque<Val*>> getAllUseChains(Val* dependency);
// Grab all values that exist between and including provided
// vals. Returned values are topologicaly ordered, and unique.
static std::vector<Val*> getAllValsBetween(
const std::unordered_set<Val*>& dependencies,
const std::vector<Val*>& of);
// Returns all dependent exprs that exist between
// the provided vals
static std::vector<Expr*> getAllExprsBetween(
const std::unordered_set<Val*>& dependencies,
const std::vector<Val*>& of);
// Return registered outputs of the fusion that are a dependency of any val of
static std::unordered_set<Val*> getAllOutputsOf(
const std::unordered_set<Val*>& of);
// Return all Vals that depend on the given Vals
static std::unordered_set<Val*> getAllDependentVals(
const std::unordered_set<Val*>& of);
};
// Expr sort will take a fusion and return a topologically sorted list of
// expressions.
class StmtSort : public IterVisitor {
protected:
StmtSort() = default;
std::vector<Statement*> stmts;
void handle(Statement* stmt) override;
public:
// If traverse_members it will also extract all member nodes in the sorted
// statement list in the fusion. i.e. all IterDomains, extents, and associated
// expressions of them
static std::vector<Statement*> getStmts(
Fusion* fusion,
bool traverse_members = false);
// Returns ordered Statements required to produce from, including from.
static std::vector<Statement*> getStmts(
Fusion* fusion,
const std::vector<Val*>& to,
bool traverse_members = false);
// Returns ordered Statements required to produce from, including from.
// Stops traversal once hiting any Statements in to. Includes Statements in
// to.
//
// Warning: this doesn't necessarily prevent statements before `to` from being
// returned. e.g.
// i1 = i0
// i2 = i1
// i3 = i2
// i4 = i3 + i1
// getExprs(fusion, {i4}, {i3})
// will return the definition and values {i0, i1, i4}
// i3 is dependent on i1, but since i4 also is then the traversal will go down
// the i4->i1->i0 path, even though the i4->i3-//>i2->i1 path is blocked.
//
// If traverse_members it will also extract all member nodes in the sorted
// expr list in the fusion. i.e. all expressions on IterDomains, extents, etc
static std::vector<Statement*> getStmtsBetween(
Fusion* fusion,
const std::vector<Val*>& from,
const std::vector<Val*>& to,
bool traverse_members = false);
// Same as getStmts version but filters to only return the Expr*s
static std::vector<Expr*> getExprs(
Fusion* fusion,
bool traverse_members = false);
// Same as getStmts version but filters to only return the Expr*s
static std::vector<Expr*> getExprs(
Fusion* fusion,
const std::vector<Val*>& to,
bool traverse_members = false);
// Same as getStmts version but filters to only return the Expr*s
static std::vector<Expr*> getExprsBetween(
Fusion* fusion,
const std::vector<Val*>& from,
const std::vector<Val*>& to,
bool traverse_members = false);
};
class InputsOf : public IterVisitor {
private:
std::unordered_set<Val*> grabbed_inputs;
std::vector<Val*> ordered_inputs;
void handle(Val* v) final;
public:
static std::vector<Val*> output(Fusion* fusion, Val* output_);
static std::vector<Val*> outputs(
Fusion* fusion,
const std::vector<Val*>& outputs_);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 14,030
| 38.974359
| 88
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/kernel.h
|
#pragma once
#include <c10/macros/Export.h>
#include <fusion.h>
#include <ir_base_nodes.h>
#include <ir_builder.h>
#include <lower_sync_information.h>
#include <lower_warp_reduce.h>
#include <parallel_dimension_map.h>
#include <utils.h>
#include <vectorization_info.h>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace kir {
//! Summary of interesting facts about the kernel
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct KernelSummary {
//! Count of WAR (write-after-read) hazard barriers
int war_hazard_syncs_count = 0;
//! List of global buffers
std::vector<const kir::Allocate*> global_allocations;
//! List of dynamic shared memory buffers
std::vector<const kir::Allocate*> dynamic_smem_allocations;
//! List of static shared memory buffers
std::vector<const kir::Allocate*> static_smem_allocations;
//! Indicate the need to generate random numbers
int max_rng_offsets = -1;
//! Do we have any block reductions?
bool has_block_reductions = false;
//! Number of static grid reductions
bool has_grid_reductions = false;
//! Do we have any grid reduction in a loop, or grid reductions dependent on
//! grid reductions
bool has_cooperative_grid_reduction = false;
//! Do we have any block broadcasts?
bool has_block_broadcasts = false;
//! Do we have any grid broadcasts?
bool has_grid_broadcasts = false;
//! Do we have any welford op?
bool has_welford = false;
//! Do we have any welford op?
bool has_block_welford = false;
//! Do we have any welford op?
bool has_grid_welford = false;
//! Largest shared memory buffer base type
DataType largest_smem_data_type = DataType::Null;
//! Do we have allocations of dynamic local memory?
bool has_dynamic_local_memory_allocations = false;
//! List of dynamic local memory buffers.
//! Only used for debugging.
std::vector<const kir::Allocate*> dynamic_lmem_allocations;
//! ceilDiv extents that must be divisible
std::vector<std::pair<const Val*, const Val*>> splits_to_validate;
//! Effective ParallelTypes of broadcast ops
std::unordered_map<const BroadcastOp*, ParallelTypeBitmap>
broadcast_parallel_types;
//! Track which tensor views are inputs or outputs of a vectorized operation
//! and their maximum vectorized access size
std::unordered_map<TensorView*, int> vectorized_accesses;
// Sync map is needed to figure out if global memory buffers need to be marked
// as volatile because they're used for communication.
SyncMap sync_map;
// Parallel dimension map needed to set the correct properties of grid buffers
// (is a dim inactive)
ParallelDimensionMap parallel_dimension_map_;
//! Track information on vectorized set operations for runtime validation
std::vector<VectorizedSetInfo> vectorized_set_info;
};
class TORCH_CUDA_CU_API KernelPerformanceProfile {
public:
//! Register an expression to profile
void registerExpr(const Expr* expr);
//! Query if an expression is profiled
bool isProfiled(const Expr* expr) const;
//! Get the number of profiled expressions
int getNumberOfProfileEntries() const {
return num_profile_entries_;
}
//! Set the backing buffer of profile.
void setBuffer(TensorView* buffer) {
buffer_ = buffer;
}
//! Get the backing buffer
TensorView* getBuffer() const {
return buffer_;
}
//! Get the indices of the profile of an expression in the backing buffer
std::array<int, 2> getIndicesInProfileBuffer(const Expr* expr) const;
std::string toString(const at::Tensor& buffer) const;
private:
//! Get the new profile index
int getNewIndex();
//! Get the profile index
c10::optional<int> getIndex(const Expr* expr) const;
private:
int num_profile_entries_ = 0;
//! Backing buffer of Nx2 integer tensor, where N is the number of profiled
//! regions. Each region has two integer values, one representing
//! the cycles spent, and another the count.
TensorView* buffer_ = nullptr;
//! Map profiled expressions to profile entry offsets
std::unordered_map<const Expr*, int> expr_entry_map_;
// TODO: Allow profiling of ForLoops
//! Map profiled ForLoop to profile entry offsets
// std::unordered_map<const kir::ForLoop*, int> loop_entry_map_;
};
class KernelInternalProxy;
//! Container for a lowered Kernel IR
//!
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API Kernel final : public Fusion {
friend KernelInternalProxy;
public:
// Kernel starts by grabbing all the nodes from the provided fusion.
// Kernel is not SSA, if a definition is not set, we should update it, but
// not remove previous definition if it is set. This is primarily because when
// we do something like generate an initialization statement for a reduction
// TV, we may want to continue to do fusion like analysis on the original
// expression.
// TODO: Assert index type is int or int32
Kernel(Fusion* fusion, DataType index_type = DataType::Int)
: Fusion(*fusion), index_type_(index_type) {}
Kernel() = delete;
// No move or copy semantics
Kernel(const Kernel&) = delete;
Kernel& operator=(const Kernel&) = delete;
//! Finalize a kernel definition
//!
//! At this point we have a complete kernel definition and we can
//! run analysis passes to build a KernelSummary.
void finalize(std::vector<Expr*> top_level_exprs);
const std::vector<Expr*>& topLevelExprs() const {
return top_level_exprs_;
}
const KernelSummary& summary() const {
return summary_;
}
DataType indexType() const {
return index_type_;
}
//! Checks if parallel type is padded
bool isParallelTypePadded(ParallelType ptype) const {
return ptype == ParallelType::TIDx &&
warp_padded_parallel_info_.is_tidx_padded;
}
const WarpPaddedParallelInfo& getWarpPaddedParallelInfo() const {
return warp_padded_parallel_info_;
}
const KernelPerformanceProfile& profile() const {
return profile_;
}
//! Debug dump of the Kernel IR
void print() const;
protected:
//! Register the Val with this fusion
void registerVal(Val* val) override;
//! Register expr with this fusion.
//! When we register an expression, we want to update the dependency tracking
//! of Vals. We add expr to our general expr_set_,
void registerExpr(Expr* expr) override;
private:
// Analyze the kernel IR and caches the summary of interesting data
void analyze();
// Top level statements
std::vector<Expr*> top_level_exprs_;
// Summary of interesting kernel data
KernelSummary summary_;
// Is this kernel being compiled with int32 or int64 indexing. This
// information is required to resolve DataType::Index
DataType index_type_ = DataType::Int;
WarpPaddedParallelInfo warp_padded_parallel_info_;
KernelPerformanceProfile profile_;
};
//! A special debugging proxy for Kernel.
//!
//! Should not be used for other than testing and debugging.
class TORCH_CUDA_CU_API KernelInternalProxy {
public:
KernelInternalProxy(Kernel* kernel) : kernel_(kernel) {}
std::vector<Expr*>& topLevelExprs();
private:
Kernel* kernel_ = nullptr;
};
} // namespace kir
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 7,358
| 27.523256
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/kernel_cache.h
|
#pragma once
#include <evaluator_common.h>
#include <executor.h>
#include <fusion.h>
#include <fusion_segmenter.h>
#include <scheduler/all_schedulers.h>
#include <scheduler/registry.h>
#include <c10/macros/Export.h>
#include <c10/util/ArrayRef.h>
#include <mutex>
#include <type_traits>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class SegmentedGroup;
class FusionHeuristics;
class SchedulerRuntimeInfo;
// Utilities for benchmarking and profiling
struct ExecutorLog {
std::shared_ptr<HeuristicParams> params = nullptr;
FusionExecutor* fusion_executor = nullptr;
};
//! FusionKernelRuntime is the unified interface from fusion graphs into
//! caching, compilation into kernels, and kernel launches.
//!
//! Each instance is also a cache entry tracked by FusionKernelRuntimeCache.
//!
//! Two types of instance can be created, one for complete/single-kernel fusion
//! and one for segmented/multi-kernel fusion.
//! Conceptually this is a generalization of FusionExecutor that supports both
//! single-kernel and multi-kernel caching/compiling/launching
class TORCH_CUDA_CU_API FusionKernelRuntime {
public:
explicit FusionKernelRuntime(
Fusion* fusion,
const KernelArgumentHolder& inputs);
//! Type notations within FusionKernelRuntime Context
using HashType = size_t;
using SchedulerEntryPtr = std::unique_ptr<SchedulerEntry>;
//! Evicts internally cached parameters based on input sizes.
//! An interface used by runtime caches.
void evictCache(size_t input_id) {
for (auto& fe : executors_) {
fe.evictCache(input_id);
}
}
//! query if we already have a compiled kernel for execution
bool isCompiled() {
std::unique_lock<std::mutex> lock0(mutex_, std::try_to_lock);
std::unique_lock<std::mutex> lock1(compiling_, std::try_to_lock);
if (!lock0.owns_lock() || !lock1.owns_lock()) {
// compilation in progress
return false;
}
return std::all_of(
executors_.begin(), executors_.end(), [](const auto& executor) {
return executor.compiled();
});
}
//! starts compilation async
void startAsyncCompile(KernelArgumentHolder& inputs);
//! maps entries in `args` to fusion inputs.
//! Note that this function also pushes extra bits like dimension extent into
//! `args` for expression evaluator binding. So consider your `args` polluted
//! after this function and use it with caution.
void mapFusionInputsToArgs(
std::unordered_map<Val*, const ArgAbstract*>& tensor_map,
KernelArgumentHolder& args);
//! Unified interface to run the managed kernels with given input
std::vector<at::Tensor> runWithInput(KernelArgumentHolder& args);
//! Turn On/Off profiling
void profile(bool to_profile = true) {
profiling_ = to_profile;
}
//! Internal knob for profiling shape inference
void disableLaunchParamCache() {
for (auto& executor : executors_) {
executor.disableLaunchParamCache();
}
}
//! Internal knob for profiling shape inference
void disableKernelLaunch() {
for (auto& executor : executors_) {
executor.setExecuteKernelFlag(false);
}
}
//! Returns if this runtime is segmented
bool isSegmented() {
return is_segmented_;
}
//! Returns the fusion segments if applicable
SegmentedFusion* fusionSegments() {
return segmented_fusion_.get();
}
//! Returns the list of heuristics in this runtime
FusionHeuristics* schedulerHeuristics() {
return heuristics_.get();
}
//! Return the most recently used executor, corresponding to the
//! most recent kernel launch.
//! TODO: have a interface for grabbing all recent logs. Need to put a buffer
//! space for recent logs
ExecutorLog getMostRecentExecutorLog() {
TORCH_INTERNAL_ASSERT(
profiling_, "Executor log is only produced in profiling mode");
return most_recent_executor_log_;
}
// Try to compute heuristics based on the SegmentedFusion managed
// in this kernel runtime, and will return a nullopt if either
// any segment cannot be scheduled or the parameters don't match
using HeuristicsPtr = std::unique_ptr<FusionHeuristics>;
c10::optional<HeuristicsPtr> getMaybeHeuristicsFor(
const KernelArgumentHolder& args);
//! Copy the launch params given in the parameter heuristics to prepare
//! for kernel launch for a new input dimension but same heuristics
void updateHeuristicsLaunchParams(FusionHeuristics* update_heuristics);
private:
//! Interface to run a single kernel, either one kernel for single-kernel
//! fusions, or a kernel for a segmentedGrouup in a segmented fusion. Returns
//! the kernel outputs.
std::vector<at::Tensor> runKernelWithInput(
KernelArgumentHolder& args,
SegmentedGroup* sg);
//! Interface to compile a single kernel, either one kernel for single-kernel
//! fusions, or a kernel for a segmentedGrouup in a segmented fusion. Returns
//! the kernel outputs with tensor that doesn't own memory.
KernelArgumentHolder compileKernel(
const KernelArgumentHolder& args,
SegmentedGroup* sg);
//! Interface to run a the whole graph in a segmented fusion and return the
//! complete
//! fusion outputs.
std::vector<at::Tensor> runMultiKernelWithInput(
const at::ArrayRef<IValue>& inputs,
size_t input_id);
//! Access the list of schedulers maintained in this runtime instance
const std::vector<SchedulerEntryPtr>& schedulers();
void prepareRuntimeOrder();
private:
//! Entries indexed by groupID:
//! Executors holding compiled kernels
std::vector<FusionExecutor> executors_;
//! Heuristics object holding scheduler entries for all segments
std::unique_ptr<FusionHeuristics> heuristics_;
// Checks if this runtime instance is for a single-kernel fusion (false) or a
// segmented fusion (true).
bool is_segmented_ = true;
//! Multi-Kernel fusion segment when applies
std::unique_ptr<SegmentedFusion> segmented_fusion_ = nullptr;
//! Pre-allocated runtime workspace to speed up kernel launch preparation.
struct RuntimeWorkSpace {
//! Pre-determined order to run the segmented groups
std::vector<SegmentedGroup*> group_run_order;
//! Pre-determined order to bind tensor input meta data
std::vector<Val*> group_extent_binding_order;
} runtime_workspace_;
//! Utility to speed up value evaluation at runtime
std::unique_ptr<FusionPrecomputedValues> precomputed_values_;
// States for profiling support
bool profiling_ = false;
std::mutex mutex_;
// TODO: remove `compiling_` mutex and rely on `mutex_` only.
// we don't need the second mutex, if only I could figure out how to pass
// unique_lock into lambda
std::mutex compiling_;
// The heuristics and executor for most recent kernel launch
ExecutorLog most_recent_executor_log_;
};
//! Encoding an input set to unique id, which is used to short-cut cache entry
//! selection in our nested cache implementation to cut off overhead.
//!
//! We have implemented naive LRU cache eviction policy here, since each entry
//! in `InputsIdLookup` is attached to a static input shape/stride, and could
//! grow gigantic when we have input shapes that does not stabalize to a finite
//! set.
//!
//! \note the uniqueness of the ide generated for a given input set is only
//! local to the instance of `InputsIdLookup`.
//!
class TORCH_CUDA_CU_API InputsIdLookup : public NonCopyable {
public:
//! constructor where maximum cache size is fixed during init
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-avoid-magic-numbers)
explicit InputsIdLookup(size_t max_cache_size = 100)
: max_cache_size_(max_cache_size){};
//! struct to hold return value for lookupId.
struct IdLookupReturn {
size_t id = 0;
size_t evict_id = 0;
bool eviction = false;
};
//! encode each input sets to with an unique id;
//! Returned data structure also indicates whether eviction has happened
//! within the lookup cache. This is needed because lookup shortcut is also
//! cached in nested `GraphCache`, `FusionExecutorCache` and `FusionExecutor`.
//! see [ Note -- 2 level cache implementation ]
IdLookupReturn lookupId(const at::ArrayRef<IValue>& inputs);
//! debugging API that returns the size of lookup table
size_t size() const {
return encoding_lookup_.size();
}
private:
// string to store encoded input meta information. Reuse the buffer instead of
// stringtream gives few us perf gain.
std::string encoding_; // Note: shared state, guarded by mutex_
// mutex_ used to guard reused encoding_
std::mutex mutex_;
//! entry stored in `encoding_lookup_` to implement LRU
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct EncodingEntry {
size_t id = 0;
std::list<std::string>::iterator lru_iter;
};
//! maximum cache size for LRU
const size_t max_cache_size_;
//! next available unique id, we monotonically increase `current_id_` avoid
//! conflicts
size_t current_id_ = 1;
//! entry in the cache, This is used to implement LRU cache, where entries in
//! the list is ordered by their recent usage (freshly used entry is placed at
//! the beginning)
std::list<std::string> used_entry_;
//! map from `std::string` to a unique id `size_t` (packaged in
//! `EncodingEntry`
//! ). We store an iterator to `used_entry_` to implement LRU
std::unordered_map<std::string, EncodingEntry> encoding_lookup_;
};
//! [ Note -- 2 level cache implementation ]
//!
//! We have 2 level cache for a separation in function to keep them simpler.
//!
//! 2 level hierarchically nested cache is to handle the code generation and
//! execution of a given PyTorch IR graph that is unique in its computational
//! graph (see note on unique computational graph down).
//!
//! The nested cache structures are:
//! a. GraphCache
//! - GraphCache translates PyTorch IR into Fusion IR and pass it to a
//! `FusionExecutorCache`;
//! - GraphCache assumes all inputs to comply with profiling information,
//! mostly tensor size & contiguity (see note on unique computational
//! graph). The assumption is assured at runtime by
//! `prim::CudaFusionGuard`;
//! b. FusionExecutorCache
//! - has a single `Fusion`, FusionExecutorCache handles kernel schedule
//! and passed scheduled tensor to `FusionExecutor` to generate code;
//! - create `FusionExecutor` instances to handle heuristics from dynamic
//! shape (varying tensor sizes);
//! - create `FusionExecutor` instances to handle different devices;
//! - holds input cache `InputsIdLookup`, which allow cache on heuristics
//! and launch parameters to reduce latency.
//!
//! * note on unique computational graph
//! In theory, computational graph should refer to only the computational nodes
//! in a subgraph and should remain agnostic to input meta info, like
//! shape, strides, type e.t.c.. However, the contract right here is fuzzy.
//! Different executor applies their own protocol of what is a unique
//! computational graph. e.g. Legacy Executor embeds tensor type &
//! dimensionality in the graph, while Profiling Executor keeps symbolic shape
//! as well as stride order in the graph as well.
//!
//! Our definition of a "unique" computational graph is aligned with `Fusion`
//! IR, hence the requirement extends to meta information on input tensors.
//! Which means, for each input tensor, following properties are fixed:
//! a) stride order;
//! b) contiguity information;
//! c) broadcasting semantics (size-1 or not);
//! d) rank;
//! e) scalar type;
//!
//!
//! [ Note -- Segmented Fusion Tentative Design ]
//! Segmentation adds an extra dimension in caching. Initial implementation,
//! assumed graph partition strategy is independent of input pattern, which we
//! can revisit once we have more advanced graph segmentation logic Each
//! FusionExecutorCache corresponds to one graph and one graph segmentation.
//!
//!
class TORCH_CUDA_CU_API FusionExecutorCache {
public:
//! create new fusion executor cache at a given device to handle kernel
//! generation of dynamic sizes
//! fusion executor is taking the ownership of `fusion`
explicit FusionExecutorCache(std::unique_ptr<Fusion> fusion);
//! Execute fusion graph with given inputs, create `FusionExecutor` as needed
//! Note this function also handles permutation & input update outside of
//! codegen.
std::vector<at::Tensor> runFusionWithInputs(
const at::ArrayRef<IValue>& inputs);
Fusion* fusion() {
return fusion_.get();
}
void printFusion() {
fusion_->printMath();
}
FusionKernelRuntime* getMostRecentKernelRuntime() {
return most_recent_runtime_;
}
// TODO: in a follow up we need a global logging structure
// to capture runtime profiling info. We also need to define
// a suitable profiling window / buffer size.
ExecutorLog getMostRecentExecutorInfo() {
TORCH_INTERNAL_ASSERT(most_recent_runtime_ != nullptr);
return most_recent_runtime_->getMostRecentExecutorLog();
}
void profile(bool to_profile) {
profiling_ = to_profile;
for (auto& it : kernel_runtimes_) {
for (auto& kernel_runtime : it.second) {
kernel_runtime->profile(to_profile);
}
}
}
//! Internal knob for profiling shape inference
void disableLaunchParamCache() {
for (auto& it : kernel_runtimes_) {
for (auto& kernel_runtime : it.second) {
kernel_runtime->disableLaunchParamCache();
}
}
}
//! Internal knob for profiling shape inference
void disableKernelLaunch() {
for (auto& it : kernel_runtimes_) {
for (auto& kernel_runtime : it.second) {
kernel_runtime->disableKernelLaunch();
}
}
}
//! converts inputs from IValue to KernelArgumentHolder, also handles cache
//! lookup
KernelArgumentHolder prepareInputs(const at::ArrayRef<IValue>& inputs);
//! query if there's a kernel ready to go for given inputs
bool isCompiled(const at::ArrayRef<IValue>& inputs);
//! compile a kernel executor for given inputs. Note: the compilation is
//! async, there's some restriction on the user side. e.g. don't overlap
//! compilation and execution for the same FusionExecutor entry. This is
//! experimental at this moment, please use with extra caution.
void compileFusionAsync(const at::ArrayRef<IValue>& inputs);
private:
//! evict cached short cut entry in `code_to_fe_lookup_` as well as cached
//! entry in `FusionExecutor`
void evictCache(size_t cache_id);
FusionKernelRuntime* getKernelRuntimeFor(const KernelArgumentHolder& inputs);
private:
//! original un-scheduled `Fusion`;
std::unique_ptr<Fusion> fusion_;
//! inputs to unique_id lookup table;
InputsIdLookup inputs_id_lookup_;
//! Graphs after input dependent transfoms
std::unordered_map<size_t, std::vector<std::unique_ptr<FusionKernelRuntime>>>
kernel_runtimes_;
//! Logging state for most recent compilation
bool profiling_ = false;
//! Logging state for most recent compilation
ExecutorLog most_recent_executor_log_;
//! short-cut for cache hit
std::unordered_map<size_t, FusionKernelRuntime*> id_to_kernel_runtime_;
//! Profiling info:
//! TODO: this can be largely expanded to look at complete
//! caching profiles. Currently it just makes it easier to test
FusionKernelRuntime* most_recent_runtime_ = nullptr;
//! indices of fusion outputs that are aliased to inputs. These are used only
//! to support in-place update and should have been dropped before pushing
//! outputs to stack.
std::set<int> aliased_output_indices_;
};
class GraphCache {
public:
//! TODO: we should probably change shared_ptr to unique_ptr, as we want to
//! claim the ownership of the computational graph.
//! create GraphCache on a given graph;
//! We extract global stride index order and translate PyTorch JIT IR to
//! Fusion IR.
explicit GraphCache(const std::shared_ptr<Graph>& graph);
//! execute graph with given inputs
std::vector<at::Tensor> runGraphWithInputs(
const at::ArrayRef<IValue>& inputs);
private:
//! construct FusionExecutorCache
void createFusion(const std::shared_ptr<Graph>& graph);
private:
//! FusionExecutorCache that performs schedule and kernel execution;
std::unique_ptr<FusionExecutorCache> fusion_executor_cache_;
//! num of outputs
size_t num_of_outputs_ = 0;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 16,677
| 34.409766
| 97
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/kernel_expr_evaluator.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <dynamic_type.h>
#include <evaluator_common.h>
#include <kernel_ir.h>
#include <c10/util/Optional.h>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class GpuLower;
namespace kir {
//! Calculate Kernel IR expressions
//!
//! How to evaluate Kernel IR expressions:
//!
//! ```cpp
//! kir::ExpressionEvaluator eval;
//! eval.bind(symbolic_value, concrete_value);
//! ... bind more values ...
//! const auto result = eval.evaluate(interesting_value);
//! if (result.has_value()) {
//! ... we have successfully calculated the result ...
//! } else {
//! ... expression can't be evaluated ...
//! }
//! ```
//!
class TORCH_CUDA_CU_API ExpressionEvaluator : private OptInConstDispatch {
public:
//! Set a concrete value for a symbolic value
void bind(const Val* value, IntOrDouble concrete_value);
//! Set a concrete value for a parallel dimension
void bind(ParallelType pt, Int::ScalarType concrete_value);
//! Try to evaluate a Kernel IR value
c10::optional<IntOrDouble> evaluate(const Val* value);
//! Returns true if `value` is known before binding kernel inputs
static bool isConst(const Val* value);
//! Debugging helper, prints all the currently known values
void print() const;
auto& precomputedValues() {
return precomputed_values_;
}
private:
void handle(const Int* value) final;
void handle(const Double* value) final;
void handle(const NamedScalar* named_scalar) final;
void handle(const UnaryOp* unary_op) final;
void handle(const BinaryOp* binary_op) final;
private:
std::unordered_map<const Val*, IntOrDouble> known_values_;
KernelPrecomputedValues* precomputed_values_ = nullptr;
std::unordered_map<ParallelType, Int::ScalarType, TypeHash>
known_parallel_dimensions_;
};
} // namespace kir
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,994
| 23.9375
| 74
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/kernel_ir.h
|
#pragma once
#include <ir_all_nodes.h>
#include <ir_base_nodes.h>
#include <parallel_type_bitmap.h>
#include <type.h>
#include <utils.h>
#include <c10/macros/Export.h>
#include <c10/util/Optional.h>
#include <cstdint>
#include <string>
#include <unordered_map>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class IrBuilderPasskey;
// Abstract nodes
class Val;
class Expr;
// Values
class Bool;
class Double;
class Int;
class NamedScalar;
class IterDomain;
class TensorDomain;
class TensorView;
// Expressions
class UnaryOp;
class BinaryOp;
class TernaryOp;
class RNGOp;
class ReductionOp;
class WelfordOp;
class BroadcastOp;
namespace kir {
class Kernel;
// Values
class Predicate;
class TensorIndex;
// Expressions
class Allocate;
class BlockSync;
class GridSync;
class CpAsyncWait;
class CpAsyncCommit;
class InitMagicZero;
class UpdateMagicZero;
class ForLoop;
class IfThenElse;
class GridReduction;
class GroupedGridReduction;
class GridBroadcast;
class GridWelford;
class GroupedGridWelford;
class AllocateFusedReduction;
// Expr container
class Scope;
class TORCH_CUDA_CU_API Predicate final : public Val {
public:
explicit Predicate(
IrBuilderPasskey passkey,
PredicateType ptype,
const Expr* expr = nullptr,
Bool* thread_pred = nullptr);
explicit Predicate(IrBuilderPasskey passkey, ForLoop* unrolled_loop);
explicit Predicate(IrBuilderPasskey passkey, Bool* value);
PredicateType predicate_type() const {
return ptype_;
}
const Expr* expr() const {
TORCH_INTERNAL_ASSERT(
ptype_ != PredicateType::Unswitch &&
ptype_ != PredicateType::Vectorize && ptype_ != PredicateType::Manual);
return expr_;
}
Bool* thread_pred() const {
TORCH_INTERNAL_ASSERT(
ptype_ == PredicateType::Inline ||
ptype_ == PredicateType::Misaligned || ptype_ == PredicateType::Shift ||
ptype_ == PredicateType::Padding ||
ptype_ == PredicateType::ReductionWrite);
return thread_pred_;
}
ForLoop* unrolled_loop() const {
TORCH_INTERNAL_ASSERT(ptype_ == PredicateType::Unswitch);
return unrolled_loop_;
}
bool hasValue() const {
return value_ != nullptr;
}
Bool* value() const {
TORCH_INTERNAL_ASSERT(
value_ != nullptr,
"The conditional expression for this Predicate is invalid.");
return value_;
}
void setValue(Bool* value) {
TORCH_INTERNAL_ASSERT(value != nullptr, "The Bool expression is invalid.");
value_ = value;
}
bool isConst() const final {
return hasValue() && value_->isConst();
}
private:
PredicateType ptype_ = PredicateType::Manual;
// For PredicateCompute::getInlinePredicate,
// ShiftPredicateInserter::getShiftPredicate and getPaddingPredicate
const Expr* expr_ = nullptr;
// For PredicateCompute::getInlinePredicate
Bool* thread_pred_ = nullptr;
// For ParallelType::Unswitch - UnswitchPredicate::get
ForLoop* unrolled_loop_ = nullptr;
// The Bool conditional value
// The value is nullptr until lower_predicate pass
Bool* value_ = nullptr;
};
class TORCH_CUDA_CU_API TensorIndex final : public Val {
public:
TensorIndex(
IrBuilderPasskey,
const TensorView* view,
std::vector<Val*> indices);
std::vector<Val*>::size_type nDims() const {
return indices_.size();
}
Val* index(int i) const;
const std::vector<Val*>& indices() const {
return indices_;
}
TensorView* view() const {
TORCH_INTERNAL_ASSERT(view_ != nullptr);
return const_cast<TensorView*>(view_); // NOLINT
}
private:
const TensorView* view_ = nullptr;
std::vector<Val*> indices_;
};
//! Allocate is a lower level Node that describes a buffer of memory that
//! is required as an intermediate within a kernel. The extent is the expression
//! of the size of the buffer that is generated from the TensorView that
//! describes the output of an operation.
class TORCH_CUDA_CU_API Allocate final : public Expr {
public:
//! Allocation of a multi-dimensional buffer
//!
//! param shape Size of each dimension
explicit Allocate(
IrBuilderPasskey passkey,
Val* buffer,
MemoryType memory_type,
std::vector<Val*> shape = {},
bool zero_init = false);
//! Allocation of a non-dimensional buffer
//!
//! param size Size of allocation
explicit Allocate(
IrBuilderPasskey passkey,
Val* buffer,
MemoryType memory_type,
Val* size,
bool zero_init = false);
Expr* shallowCopy() const override;
Val* buffer() const {
return buffer_;
}
MemoryType memoryType() const {
return memory_type_;
}
Val* size() const {
return size_;
}
const std::vector<Val*>& shape() const {
return shape_;
}
bool zeroInit() const {
return zero_init_;
}
const Allocate* alias() const {
return alias_;
}
void setAlias(const Allocate* alias) {
TORCH_INTERNAL_ASSERT(alias != this);
TORCH_INTERNAL_ASSERT(alias->memoryType() == memory_type_);
alias_ = alias;
}
private:
Val* buffer_ = nullptr;
MemoryType memory_type_ = MemoryType::Local;
//! Size of each dimension
std::vector<Val*> shape_;
bool zero_init_ = false;
//! Total size
Val* size_ = nullptr;
// This alias tracks the next Allocate node in a linked chain of aliases
// If the alias is nullptr, then the Allocate node uses memory in the kernel
const Allocate* alias_ = nullptr;
};
// Sync represents __syncthreads barrier for block level coordination.
//
// TODO(kir): change name to SyncThreads as we could have other barriers.
//
class TORCH_CUDA_CU_API BlockSync final : public Expr {
public:
explicit BlockSync(IrBuilderPasskey passkey, bool war_sync = false);
Expr* shallowCopy() const override;
bool isWarHazardSync() const {
return war_sync_;
}
private:
// TODO: war_sync_ is only used for testing/validation purposes.
bool war_sync_ = false;
};
// CpAsyncWait represents wait intrinsics for cp.async
class TORCH_CUDA_CU_API CpAsyncWait final : public Expr {
public:
explicit CpAsyncWait(IrBuilderPasskey passkey, unsigned int keep_stages = 0);
Expr* shallowCopy() const override;
//! Returns the remaining number of stages that are not synchronized
//! after this op.
unsigned int keepStages() const {
return keep_stages_;
}
private:
//! Number of stage to leave un-sync'ed by this op.
unsigned int keep_stages_ = 0;
};
// CpAsyncCommit represents commit intrinsics for cp.async
// A commit intrinsic communicates delimiter of transaction groups
// to the async load hardware. Example usage see [Cicular buffer].
class TORCH_CUDA_CU_API CpAsyncCommit final : public Expr {
public:
explicit CpAsyncCommit(IrBuilderPasskey passkey);
Expr* shallowCopy() const override;
};
// Synchronize all blocks in device, implies cooperative group launch is
// required.
class TORCH_CUDA_CU_API GridSync final : public Expr {
public:
explicit GridSync(
IrBuilderPasskey passkey,
ParallelTypeBitmap sync_dims,
Val* sync_buffer);
Expr* shallowCopy() const override;
ParallelTypeBitmap syncDims() const {
return sync_dims_;
}
Val* syncBuffer() const {
return sync_buffer_;
}
private:
ParallelTypeBitmap sync_dims_;
Val* sync_buffer_ = nullptr;
};
// Simply prints "DEFINE_MAGIC_ZERO" in the code in accordance with magic_zero
// in helpers.cu
class TORCH_CUDA_CU_API InitMagicZero final : public Expr {
public:
explicit InitMagicZero(IrBuilderPasskey passkey);
Expr* shallowCopy() const override;
};
// Simply prints "UPDATE_MAGIC_ZERO" in the code in accordance with magic_zero
// in helpers.cu
class TORCH_CUDA_CU_API UpdateMagicZero final : public Expr {
public:
explicit UpdateMagicZero(IrBuilderPasskey passkey);
Expr* shallowCopy() const override;
};
// TODO(kir): promote to IR node
class TORCH_CUDA_CU_API Scope {
public:
explicit Scope(Expr* owner) : owner_(owner) {}
const std::vector<Expr*>& exprs() const {
return exprs_;
}
bool empty() const {
return exprs_.empty();
}
auto size() const {
return exprs_.size();
}
auto& operator[](size_t i) {
return exprs_[i];
}
auto& operator[](size_t i) const {
return exprs_[i];
}
// Insert expr before expression at pos
void insert(size_t pos, Expr* expr);
// Insert expr before ref
void insert_before(Expr* ref, Expr* expr);
// Insert expr after ref
void insert_after(Expr* ref, Expr* expr);
void push_back(Expr* e) {
exprs_.push_back(e);
}
// Erase expr at pos
void erase(size_t pos);
// Erase expr ref
void erase(Expr* ref);
bool contains(Expr* expr) const;
void clear();
Expr* owner() const {
return owner_;
}
private:
// Insert expr before pos
void insert(std::vector<Expr*>::const_iterator pos, Expr* expr);
// Erase expr at pos
void erase(std::vector<Expr*>::const_iterator pos);
private:
std::vector<Expr*> exprs_;
//! Owner exprssion of this scope, e.g., IfThenElse
Expr* owner_ = nullptr;
};
//! ForLoop provides scoping around an int iterator from 0 to range. Exprs
//! placed in its body are considered inside the scope of the for loop. In the
//! future the implementation should look quite different so that we can do
//! proper dependency annalysis like in Fusion.
//!
//! TODO(kir): this is not a real expression
//!
//! ForLoop may represent a part of an iteration domain representend
//! by iter_domain_. In that case, the loop extent field, extent_, may
//! be smaller than the extent of iter_domain_.
class TORCH_CUDA_CU_API ForLoop final : public Expr {
public:
//! By default, start and stop are the same as those of iter_domain.
//! Step is one by default.
//!
//! TODO: cleaner way to set options?
ForLoop(
IrBuilderPasskey passkey,
IterDomain* iter_domain,
Val* index,
Val* start,
Val* stop,
Val* step,
bool vectorize,
Val* vectorize_shift,
bool unroll_required,
DoubleBufferLoopStage double_buffer_loop_stage);
ForLoop(IrBuilderPasskey passkey, IterDomain* iter_domain);
ForLoop(IrBuilderPasskey passkey, const ForLoop* other);
Expr* shallowCopy() const override;
Val* index() const {
return index_;
}
Val* start() const;
Val* stop() const;
Val* step() const;
Val* vectorize_shift() const {
return vectorize_shift_;
}
IterDomain* iter_domain() const {
return iter_domain_;
}
// TODO: Return pointer instead of reference to be more consistent
Scope& body() {
return body_;
}
const Scope& body() const {
return body_;
}
bool vectorize() const {
return vectorize_;
}
//! True if unrolled (i.e., "#pragma unroll" is attached)
bool isUnrolled() const;
//! True if unrolling is required
bool isUnrollRequired() const {
return unroll_required_;
}
//! Set unrolling required
void requireUnroll() {
unroll_required_ = true;
}
//! True if no actual for-loop is materialized
bool isTrivial() const;
//! Returns the stage of a double buffered iterdomain
//! that this for loop materializes.
auto doubleBufferLoopStage() const {
return double_buffer_loop_stage_;
}
private:
//! Returns if a loop could be unrolled.
bool isUnrollable() const;
private:
IterDomain* const iter_domain_ = nullptr;
Val* index_ = nullptr;
Val* start_ = nullptr;
Val* stop_ = nullptr;
Val* step_ = nullptr;
// vectorize is true when the for-loop contains a vectorize set
// the flag is used to omit the for-loop from the kernel
bool vectorize_ = false;
// [pre | vectorize | post] <= inner-most, merged root domain
// shift_ is applied to vectorize and post sections.
Val* vectorize_shift_ = nullptr;
//! True if unroll is required for avoiding stack allocation
bool unroll_required_ = false;
Scope body_;
//! Tracks if this for loop is implementing a stage of
//! a double buffered iterdomain.
DoubleBufferLoopStage double_buffer_loop_stage_ =
DoubleBufferLoopStage::NotApplicable;
};
//! IfThenElse provides scoping for an boolean operator. Exprs placed in its
//! body are considered inside the scope of the if statement. In the future the
//! implementation should look quite different so that we can do proper
//! dependency annalysis like in Fusion.
//!
//! TODO(kir): this is not a real expression
//!
class TORCH_CUDA_CU_API IfThenElse final : public Expr {
public:
explicit IfThenElse(IrBuilderPasskey passkey, Predicate* cond);
Expr* shallowCopy() const override;
Scope& thenBody() {
return then_body_;
}
const Scope& thenBody() const {
return then_body_;
}
Scope& elseBody() {
return else_body_;
}
const Scope& elseBody() const {
return else_body_;
}
bool hasElse() const {
return !else_body_.empty();
}
private:
Scope then_body_;
Scope else_body_;
};
//! Grid reduction operation
//!
//! This node is used only after lowering a fusion to explicitly mark a grid
//! reduction and the buffer allocation needed to do it.
//!
//! This node provides FusionExecutor the information it needs to allocate the
//! reduction and sync buffers.
class TORCH_CUDA_CU_API GridReduction final : public ReductionOp {
public:
GridReduction(
IrBuilderPasskey passkey,
BinaryOpType reduction_op_type,
Val* init,
Val* out,
Val* in,
Allocate* reduction_buffer,
Allocate* sync_buffer,
Val* entrance_index,
Val* entrances,
bool is_allreduce = false);
Expr* shallowCopy() const override;
Allocate* reduction_buffer() const {
return reduction_buffer_;
}
Allocate* sync_buffer() const {
return sync_buffer_;
}
// Which instance of entering this grid reduction is this iteration?
Val* entrance_index() const {
return entrance_index_;
}
// How many times will this grid reduction be entered
Val* entrances() const {
return entrances_;
}
const ParallelTypeBitmap& threadPredicate() const {
return thread_predicate_;
}
GridReduction* withThreadPredicate(
const ParallelTypeBitmap& thread_predicate) {
auto result = shallowCopy()->as<GridReduction>();
result->thread_predicate_ = thread_predicate;
return result;
}
private:
Allocate* reduction_buffer_ = nullptr;
Allocate* sync_buffer_ = nullptr;
// gridReduce has template flags for thread predicates. In order to
// use them, the thread predicate is held here separately from
// Expr::predicate_.
ParallelTypeBitmap thread_predicate_;
Val* entrance_index_ = nullptr;
Val* entrances_ = nullptr;
};
class TORCH_CUDA_CU_API GroupedGridReduction final : public GroupedReductionOp {
public:
GroupedGridReduction(
IrBuilderPasskey passkey,
std::vector<BinaryOpType> reduction_op_type,
std::vector<Val*> init,
std::vector<Val*> out,
std::vector<Val*> in,
std::vector<Allocate*> reduction_buffers,
Allocate* sync_buffer,
Val* entrance_index,
Val* entrances,
Val* buffer_stride,
bool is_allreduce = false);
Expr* shallowCopy() const override;
const std::vector<Allocate*>& reduction_buffers() const {
return reduction_buffers_;
}
Allocate* reduction_buffer(size_t i) const {
return reduction_buffers_.at(i);
}
Allocate* sync_buffer() const {
return sync_buffer_;
}
// Which instance of entering this grid reduction is this iteration?
Val* entrance_index() const {
return entrance_index_;
}
// How many times will this grid reduction be entered
Val* entrances() const {
return entrances_;
}
Val* buffer_stride() const {
return buffer_stride_;
}
const ParallelTypeBitmap& threadPredicate() const {
return thread_predicate_;
}
GroupedGridReduction* withThreadPredicate(
const ParallelTypeBitmap& thread_predicate) {
auto result = shallowCopy()->as<GroupedGridReduction>();
result->thread_predicate_ = thread_predicate;
return result;
}
private:
std::vector<Allocate*> reduction_buffers_;
Allocate* sync_buffer_ = nullptr;
// gridReduce has template flags for thread predicates. In order to
// use them, the thread predicate is held here separately from
// Expr::predicate_.
ParallelTypeBitmap thread_predicate_;
Val* entrance_index_ = nullptr;
Val* entrances_ = nullptr;
// Stride of reduction buffers
Val* buffer_stride_ = nullptr;
};
//! Grid broadcast operation
//!
//! This node is used only after lowering a fusion to explicitly mark a grid
//! broadcast and the buffer allocation needed to do it.
//!
//! This node provides FusionExecutor the information it needs to allocate the
//! broadcast and sync buffers.
class TORCH_CUDA_CU_API GridBroadcast final : public Expr {
public:
GridBroadcast(
IrBuilderPasskey passkey,
BroadcastOp* broadcast_op,
Allocate* broadcast_buffer,
Allocate* sync_buffer);
Expr* shallowCopy() const override;
BroadcastOp* broadcast_op() const {
return broadcast_op_;
}
Allocate* broadcast_buffer() const {
return broadcast_buffer_;
}
Allocate* sync_buffer() const {
return sync_buffer_;
}
private:
BroadcastOp* broadcast_op_ = nullptr;
Allocate* broadcast_buffer_ = nullptr;
Allocate* sync_buffer_ = nullptr;
};
//! Grid welford operation
//!
//! This node is used only after lowering a fusion to explicitly mark a grid
//! reduction and the buffer allocation needed to do it.
//!
//! This node provides FusionExecutor the information it needs to allocate the
//! reduction and sync buffers.
//!
//! TODO: Make this a subclass of WelfordOp
class TORCH_CUDA_CU_API GridWelford final : public Expr {
public:
GridWelford(
IrBuilderPasskey passkey,
WelfordOp* welford_op,
Allocate* var_buffer,
Allocate* avg_buffer,
Allocate* n_buffer,
Allocate* sync_buffer,
Val* entrance_index,
Val* entrances);
Expr* shallowCopy() const override;
WelfordOp* welford_op() const {
return welford_op_;
}
Allocate* var_buffer() const {
return var_buffer_;
}
Allocate* avg_buffer() const {
return avg_buffer_;
}
Allocate* N_buffer() const {
return n_buffer_;
}
Allocate* sync_buffer() const {
return sync_buffer_;
}
// Which instance of entering this grid reduction is this iteration?
Val* entrance_index() const {
return entrance_index_;
}
// How many times will this grid reduction be entered
Val* entrances() const {
return entrances_;
}
const ParallelTypeBitmap& threadPredicate() const {
return thread_predicate_;
}
GridWelford* withThreadPredicate(const ParallelTypeBitmap& thread_predicate) {
auto result = shallowCopy()->as<GridWelford>();
result->thread_predicate_ = thread_predicate;
return result;
}
private:
WelfordOp* welford_op_ = nullptr;
Allocate* var_buffer_ = nullptr;
Allocate* avg_buffer_ = nullptr;
Allocate* n_buffer_ = nullptr;
Allocate* sync_buffer_ = nullptr;
Val* entrance_index_ = nullptr;
Val* entrances_ = nullptr;
// gridReduce has template flags for thread predicates. In order to
// use them, the thread predicate is held here separately from
// Expr::predicate_.
ParallelTypeBitmap thread_predicate_;
};
class TORCH_CUDA_CU_API GroupedGridWelford final : public GroupedWelfordOp {
public:
// input, output and init vals are vectors of triplets
GroupedGridWelford(
IrBuilderPasskey passkey,
std::vector<WelfordTriplet> output_vals,
std::vector<WelfordTriplet> input_vals,
std::vector<WelfordTriplet> init_vals,
std::array<std::vector<Allocate*>, 3> reduction_buffers,
Allocate* sync_buffer,
Val* entrance_index,
Val* entrances,
Val* buffer_stride,
bool is_allreduce = false);
Expr* shallowCopy() const override;
const std::array<std::vector<Allocate*>, 3>& reduction_buffers() const {
return reduction_buffers_;
}
Allocate* sync_buffer() const {
return sync_buffer_;
}
// Which instance of entering this grid reduction is this iteration?
Val* entrance_index() const {
return entrance_index_;
}
// How many times will this grid reduction be entered
Val* entrances() const {
return entrances_;
}
Val* buffer_stride() const {
return buffer_stride_;
}
const ParallelTypeBitmap& threadPredicate() const {
return thread_predicate_;
}
GroupedGridWelford* withThreadPredicate(
const ParallelTypeBitmap& thread_predicate) {
auto result = shallowCopy()->as<GroupedGridWelford>();
result->thread_predicate_ = thread_predicate;
return result;
}
private:
std::array<std::vector<Allocate*>, 3> reduction_buffers_;
Allocate* sync_buffer_ = nullptr;
// gridReduce has template flags for thread predicates. In order to
// use them, the thread predicate is held here separately from
// Expr::predicate_.
ParallelTypeBitmap thread_predicate_;
Val* entrance_index_ = nullptr;
Val* entrances_ = nullptr;
// Stride of reduction buffers
Val* buffer_stride_ = nullptr;
};
// Allocate an instance of the fused reduction class.
class TORCH_CUDA_CU_API AllocateFusedReduction final : public Expr {
public:
explicit AllocateFusedReduction(
IrBuilderPasskey passkey,
GridReduction* grid_reduction);
explicit AllocateFusedReduction(
IrBuilderPasskey passkey,
GridWelford* grid_welford);
explicit AllocateFusedReduction(
IrBuilderPasskey passkey,
GroupedGridReduction* grouped_grid_reduction);
explicit AllocateFusedReduction(
IrBuilderPasskey passkey,
GroupedGridWelford* grouped_grid_welford);
Expr* shallowCopy() const override;
Expr* gridExpr() const {
return grid_expr_;
}
TensorIndex* out() const;
const ParallelTypeBitmap& threadPredicate() const;
private:
//! GridReduction, GridWelford, GroupedGridReduction or GroupedGridWelford
Expr* grid_expr_ = nullptr;
};
//! An IR node consisting of a pair of integers
//! to facilitate definition of 2D swizzle operators.
//! All swizzle 2D ops takes two inputs and outputs
//! an integer pair.
//! TODO:
//! currently this IR node is only allowed as input
//! to the new PairSelect node. In follow ups would
//! possibly build out to support out of line
//! definition of the pair alone.
class TORCH_CUDA_CU_API IntPair : public Val {
public:
IntPair(IrBuilderPasskey passkey);
};
//! An IR node marking selection of first or second
//! value from a pair of integers, e.g.:
//! Pair(X,Y) -> X or Y.
//! This IR node is used to facilitate generation
//! of inline 2D swizzle math.
class TORCH_CUDA_CU_API PairSelect : public Expr {
public:
//! Indicates which value from the input
//! integer pair to output.
enum class Selection { X = 0, Y };
PairSelect(IrBuilderPasskey, Val* out, IntPair* in, Selection selection);
Expr* shallowCopy() const override;
Val* out() const {
return out_;
}
IntPair* in() const {
return in_;
}
auto selection() const {
return selection_;
}
private:
Val* const out_ = nullptr;
IntPair* const in_ = nullptr;
Selection selection_;
};
//! An integer IR node that will be generated
//! using custom integer swizzle functions
//! from the cuda runtime functions.
//! Most supported swizzle functions require
//! the sizes of each dimension defined so
//! all operators will take the extents as inputs.
class TORCH_CUDA_CU_API Swizzle2DInt : public Expr {
public:
Swizzle2DInt(
IrBuilderPasskey,
IntPair* out,
Val* in_x,
Val* in_y,
Val* extent_x,
Val* extent_y,
Swizzle2DType swizzle_type);
Expr* shallowCopy() const override;
IntPair* out() const {
return out_;
}
Val* inX() const {
return in_x_;
}
Val* inY() const {
return in_y_;
}
Val* extentX() const {
return extent_x_;
}
Val* extentY() const {
return extent_y_;
}
const auto& swizzleType() const {
return swizzle_type_;
}
private:
IntPair* const out_ = nullptr;
Val* const in_x_ = nullptr;
Val* const in_y_ = nullptr;
Val* const extent_x_ = nullptr;
Val* const extent_y_ = nullptr;
Swizzle2DType swizzle_type_;
};
} // namespace kir
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 24,348
| 23.324675
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/kernel_ir_dispatch.h
|
#pragma once
#include <dispatch.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class Expr;
namespace kir {
class Predicate;
class TensorIndex;
class ForLoop;
class IfThenElse;
class Scope;
// Base visitor class that visits all nodes in provided vector<Expr*>.
//
// Includes visiting through scopes like IfThenElse and ForLoop, and tracks
// them in scopes_ and for_loops_.
//
// Makes a copy of exprs at exprs_ which could be used to modify and return.
//
// When traversing through ITE/FLs it will use a copy
// of the provided expressions to make it safe to insert/delete nodes.
//
// Provides a simple base class to inherit from for typical lowering passes on
// Expr list
class TORCH_CUDA_CU_API IrVisitor : public OptOutDispatch {
public:
std::vector<Expr*> handle(const std::vector<Expr*>& expr);
protected:
using OptOutDispatch::handle;
virtual void handle(ForLoop*) override;
virtual void handle(IfThenElse*) override;
protected:
std::vector<ForLoop*> for_loops_;
std::vector<Scope*> scope_;
std::vector<Expr*> scope_exprs_;
std::vector<Expr*> exprs_;
};
// Const version of IrVisitor
class TORCH_CUDA_CU_API ConstIrVisitor : public OptOutConstDispatch {
public:
std::vector<const Expr*> handle(const std::vector<const Expr*>& expr);
protected:
using OptOutConstDispatch::handle;
virtual void handle(const ForLoop*) override;
virtual void handle(const IfThenElse*) override;
protected:
std::vector<const ForLoop*> for_loops_;
std::vector<const Scope*> scope_;
std::vector<const Expr*> scope_exprs_;
std::vector<const Expr*> exprs_;
};
// Base Expr Mutator class that visits all nodes with IrVisitor, and then
// inserts new expressions, replaces expressions based on insertion/replace
// maps provided or removes existing expressions. These replacement
// maps are expected to accumulate during an initial traversal, then
// runs an insertion based on them after the overloaded traversal.
//
// Order of mutations may be important, mutations are ordered according to the
// following rules:
// Before/After insertions are ordered as registered when reverse_order ==
// false,
//
// Before/After insertions are in reverse order as registered when
// reverse_order == true,
//
// Before/After insertions are done before Expr replacements, so reference for
// insertions must be on pre-replaced Exprs
//
// Removal of expressions is done after replacements.
//
// To place in a scope that is empty, simply provide a nullptr reference
// Since insertions are done in order, it's possible to insert an expression in
// an empty scope, and then use that inserted scope as a reference for
// subsequent mutations.
class ExprMutator : public IrVisitor {
protected:
std::vector<Expr*> traverseAndInsert(
const std::vector<Expr*>& expr,
bool reverse_order = false);
std::vector<Expr*> mutate(bool reverse_order = false);
using IrVisitor::handle;
// Registration function which *don't* need to be called "in place" during
// visiting.
void registerInsertBefore(Expr* reference, Expr* new_expr, Scope* scope);
void registerInsertAfter(Expr* reference, Expr* new_expr, Scope* scope);
void registerReplace(Expr* reference, Expr* new_expr, Scope* scope);
void registerRemove(Expr* expr_to_remove, Scope* scope);
// Registration function which need to be called "in place" during visiting.
// I.E.
// if you want to insert before/after or replace an Expr, you must register
// when in handle(Expr*) of that expr.
void registerInsertBefore(Expr* reference, Expr* new_expr);
void registerInsertAfter(Expr* reference, Expr* new_expr);
void registerReplace(Expr* reference, Expr* new_expr);
void registerRemove(Expr* expr_to_remove);
private:
enum class MutationMode { BEFORE, AFTER, REPLACE, REMOVE };
void registerMutation(
Expr* ref,
Expr* new_expr,
Scope* scope,
MutationMode mode);
struct MutationInformation {
Expr* reference = nullptr;
Expr* new_expr = nullptr;
Scope* scope = nullptr;
MutationMode mode = MutationMode::BEFORE;
};
// Track insertions as they're registered
std::vector<MutationInformation> insertions_;
// Track replacements as they're registered
std::vector<MutationInformation> replacements_;
// Track removal as they're registered
std::vector<MutationInformation> removal_;
};
} // namespace kir
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 4,500
| 30.041379
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower2device.h
|
#pragma once
#include <c10/macros/Export.h>
#include <compute_at_map.h>
#include <ir_all_nodes.h>
#include <kernel.h>
#include <kernel_ir.h>
#include <lower_allocation.h>
#include <lower_double_buffer.h>
#include <lower_fused_reduction.h>
#include <lower_index_hoist.h>
#include <lower_predicate.h>
#include <lower_predicate_elimination.h>
#include <lower_shift.h>
#include <lower_sync_information.h>
#include <lower_thread_predicate.h>
#include <lower_trivial_broadcast.h>
#include <lower_trivial_reductions.h>
#include <lower_warp_reduce.h>
#include <non_divisible_split.h>
#include <parallel_dimension_map.h>
#include <partial_split_map.h>
#include <root_domain_map.h>
#include <vectorization_info.h>
#include <memory>
#include <ostream>
#include <unordered_map>
#include <unordered_set>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// TODO: we frequently use pairwise root mapping from consumers to producers.
// This information is implicitly in the computeAtMaps, but there's no isolated
// container for this information that we can reuse. Would be nice to generate
// such a structure and propagate it through lowering.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_CUDA_CU_API GpuLower : public NonCopyable {
class KernelIrMapper;
public:
GpuLower() = delete;
// GpuLower lowers the provided fusion into a kernel which can be translated
// into cuda code. index_type allows to compile the kernel based on int32
// indexing instead of int64 for additional performance.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit GpuLower(Fusion* fusion, DataType index_type = DataType::Int) {
lower(fusion, index_type);
}
kir::Kernel* kernel() const;
//! Returns the currently active lowering object.
//! It's an error if no lowering is in progress.
static GpuLower* current();
//! Query if lowering is in progress
static bool hasCurrent();
std::shared_ptr<const ConcretizedBroadcastDomains>
concretizedBroadcastDomains() {
return concretized_broadcast_domains_;
}
const ThreadPredicateMap& threadPredMap() const {
return thread_pred_map_;
}
// Returns non-const reference. Necessary to reset a predicate flag
// when a broadcast expression is fused into a reduction.
ThreadPredicateMap& threadPredMap() {
return thread_pred_map_;
}
std::shared_ptr<const ComputeAtMap> caMap() const {
return std::const_pointer_cast<const ComputeAtMap>(compute_at_map_);
}
const TrivialReductionInfo& trivialReductionInfo() const {
return trivial_reduction_info_;
}
std::shared_ptr<const HaloInfo> haloInfo() const {
return std::const_pointer_cast<const HaloInfo>(halo_info_);
}
const ParallelDimensionMap& parallelDimensionMap() const {
return parallel_dimension_map_;
}
ParallelDimensionMap& parallelDimensionMap() {
return parallel_dimension_map_;
}
PredicateElimination& predicateElimination() {
return pred_elimination_;
}
const PredicateElimination& predicateElimination() const {
return pred_elimination_;
}
LocalAllocationInfoMap& localAllocationInfoMap() {
return local_allocation_info_map_;
}
const WarpPaddedParallelInfo& getWarpPaddedParallelInfo() const {
return warp_pad_info_;
}
PartialSplitMap& partialSplitMap() {
return partial_split_map_;
}
const PartialSplitMap& partialSplitMap() const {
return partial_split_map_;
}
auto& nonDivisibleSplitInfo() {
return non_divisible_split_info_;
}
const auto& nonDivisibleSplitInfo() const {
return non_divisible_split_info_;
}
const auto& divisbleSplitSet() const {
return divisible_splits_;
}
DoubleBufferInfo& doubleBufferInfo() {
return double_buffer_info_;
}
CommonIndexMap& commonIndexMap() {
return common_index_map_;
}
const auto& vectorizedAccesses() const {
return vectorized_accesses_;
}
auto& vectorizedAccesses() {
return vectorized_accesses_;
}
const auto& vectorizedSetInfo() const {
return vectorized_set_info_;
}
auto& vectorizedSetInfo() {
return vectorized_set_info_;
}
FusedReductionInfo& fusedReductionInfo() {
return fused_reduction_info_;
}
const SyncMap& syncMap() const {
return sync_map_;
}
kir::KernelPerformanceProfile& profile() {
return profile_;
}
// This is an interface to propagate information after expression
// replacement on the kernel IR. E.g.:
// for ...
// c = a + b (expr 0)
// after any pass that does replacement:
// for ...
// c1 = a1 + b1 (expr1)
// The previous analysis that was performed on expr0 might still
// be valid on expr1 but that info would be lost after replacement.
// This function provides an interface to manually update the info
// in any pass that performs replacement.
void propagateExprInfo(const Expr* old_expr, const Expr* new_expr);
private:
void lower(Fusion* fusion, DataType index_type);
// Goes through the parallelized iterdomains of the used TVs and find
// the parallel dimensions that need to be padded to a multiples of
// warp size.
void collectPaddedParallelDims();
private:
// Lowered Kernel IR
std::unique_ptr<kir::Kernel> kernel_;
// Some stateful information during lowering
// TODO: A lot of this information uses a define class then call build. It
// would be safer to wrap all of these in unique pointers and remove the build
// interface and default constructor. That way they couldn't be accessed
// without being initialized.
std::shared_ptr<const ConcretizedBroadcastDomains>
concretized_broadcast_domains_;
ThreadPredicateMap thread_pred_map_;
PredicateElimination pred_elimination_;
std::shared_ptr<ComputeAtMap> compute_at_map_;
TrivialReductionInfo trivial_reduction_info_;
std::shared_ptr<HaloInfo> halo_info_;
LocalAllocationInfoMap local_allocation_info_map_;
WarpPaddedParallelInfo warp_pad_info_;
ParallelDimensionMap parallel_dimension_map_;
PartialSplitMap partial_split_map_;
NonDivisibleSplitInfo non_divisible_split_info_;
DoubleBufferInfo double_buffer_info_;
CommonIndexMap common_index_map_;
FusedReductionInfo fused_reduction_info_;
SyncMap sync_map_;
kir::KernelPerformanceProfile profile_;
std::unordered_set<Split*> divisible_splits_;
// Track which tensor views are inputs or outputs of a vectorized operation
// and their maximum vectorized access size
// std::unordered_map<TensorView*, VectorizationInfo> vectorized_accesses_;
std::unordered_map<TensorView*, int> vectorized_accesses_;
// Info on each vectorized set op
std::vector<VectorizedSetInfo> vectorized_set_info_;
Fusion* fusion_ = nullptr;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 6,832
| 28.076596
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_alias_memory.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <ir_all_nodes.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Reuse Allocation nodes via pointer aliasing
//!
//! First pass finds candidate TensorViews
//! A candidate TensorView is anything in shared memory OR
//! in local memory with a static size larger than register_size_threshold
//!
//! Second pass finds appropriate input Allocate Node
//! among candidate TensorViews
//!
//! Alias Criteria:
//! If input is a candidate TensorView,
//! input allocation has the same size as output allocation,
//! thread bindings match,
//! is not used after this op:
//! then alias output Allocate to input Allocate.
//!
std::vector<Expr*> reuseMemoryAllocations(const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 925
| 24.027027
| 75
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_allocation.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Buffer allocation information to store in GPU lower to avoid
//! logic duplication
struct LocalAllocationInfo {
kir::Allocate* alloc_expr = nullptr;
std::vector<IterDomain*> alloc_domains;
bool has_halo = false;
};
using LocalAllocationInfoMap =
std::unordered_map<kir::Allocate*, std::unique_ptr<LocalAllocationInfo>>;
//! Insert buffer allocations
std::vector<Expr*> insertAllocations(const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 705
| 20.393939
| 77
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_bank_conflict.h
|
#pragma once
#include <dynamic_type.h>
#include <executor_launch_params.h>
#include <ir_base_nodes.h>
#include <kernel.h>
#include <unordered_map>
#include <utility>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// for more info on shared memory access see page 54-72 of:
// https://on-demand.gputechconf.com/gtc/2018/presentation/s81006-volta-architecture-and-performance-optimization.pdf
// Warning: The bank confliction checking utility here is not a replacement of
// nsight compute. This utility currently has the following assumptions and
// limitations:
//
// 1. This utility assumes that the data of the tensor is accessed by
// `T0[index]`, where `index` is the one stored in the `TensorIndex`
// object.
// 2. This utility only checks the first iteration. If we have something like
// `T1_s[tidx, 5]`, then different iterations should have different
// conflictions, which will not be evaluated for all of them
// 3. This utility assumes that all tensors are independent, which means:
// 3.1 All shared memory tensors are allocated starting from a multiple of
// 4*32 bytes
// 3.2 The only source of bank confliction is from within a tensor.
// There is no bank conflict between different tensors.
//
// Also note that this utility will not provide accurate estimation if the above
// assumptions are satisfied
std::unordered_map<const Expr*, std::pair<int, int>> getBankConflictInfo(
kir::Kernel* kernel,
c10::optional<LaunchParams> launch_params = c10::nullopt,
const std::unordered_map<std::string, IntOrDouble>& known_values = {});
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,723
| 35.680851
| 117
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_divisible_split.h
|
#pragma once
#include <c10/macros/Export.h>
#include <compute_at_map.h>
#include <fusion.h>
#include <ir_all_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Looks through all transformations assocaited with view, or enforced divisible
// vectorization splits and gathers all splits that provably don't have a
// remainder, therefore the extents of the associated IterDomains do not require
// a ceilDiv expressions.
TORCH_CUDA_CU_API std::unordered_set<Split*> getAllDivisibleSplits(
Fusion* fusion);
// Same as above but will use provided ComputeAtMap instead of building its own.
TORCH_CUDA_CU_API std::unordered_set<Split*> getAllDivisibleSplits(
Fusion* fusion,
const ComputeAtMap* ca_map);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 827
| 26.6
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_double_buffer.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <kernel_ir_dispatch.h>
// Double buffering a tensor doubles its allocation size and uses two
// buffers to facilitate computation and memory access
// overlapping. The basic form of code looks like as follows:
//
// Before:
// for i
// x[S]; // allocation
// for j:
// x[j] = y[i, j]
// for j:
// ... = x[j]
//
// After:
// X[S * 2]; // allocation
// for i in 0 to 1: // Prologue
// for j:
// x[j] = y[i, j]
//
// for i in 0 to N-1: // Main
// for j:
// x[j + (1 - i % 2) * S] = y[i + 1, j]
// for j:
// ... = x[j + (i % 2) * S]
//
// for i in N-1 to N: // Epilogue
// for j:
// ... = x[j + (i % 2) * S]
//
// Here, S is the original size of tensor x.
//
// The i loop is the double buffer loop of tensor x, where double
// buffering is applied to the tensor. The first step of lowering is
// to find the double buffering axis for each double buffered
// tensor. It must not be parallelized as it isn't possible to double
// buffer parallelized loops. Also, an unrolled axis expands the
// allocation and is intended to make the loop completely unrolled,
// which also conflicts with double buffering. So, basically, the double
// buffering axis is the inner-most axis within the axes left
// of the CA position. However, when it is parallelized or unrolled, a
// further left axis is picked.
//
// Once the double buffer axis is determined, the main task is to
// replicate the corresponding double buffer loop as illustrated
// above. The Prologue loop is to just fetch the first element to
// populate the buffer. The main loop is mostly the same as the
// original loop, except for the indexing change to switch the two
// buffers. When used as a consumer, an offset of (1 - i % 2) * S is
// added, whereas (i % 2) * S is added when used as a producer. Here,
// i is the index of the double buffer loop. The Epilogue loop is just
// for the last iteration of the loop. Since the main loop reads one
// element ahead of the producer of the double buffered tensor, it
// would require an additional guard to prevent buffer overruns with
// the producer if the main loop were also used for the last
// iteration. However, the value loaded by the invalid load would not
// be used, so instead of adding the additional predicate, the Epilogue
// loop is replicated from the original loop, except for the load
// expression since it's not used. Note that this overrun does not
// happen when the producer is on gmem, so in that case, this
// additional replication is not done.
//
// When creating those three types of loops, additional care must be
// taken when multiple tensors are double buffered. When multiple
// tensors use the same loop as their double buffer loop, one pass of
// replication takes care of them at once, meaning the same Prologue,
// Main, Epilogue loops are used for the multiple tensors.
//
// Other tasks to do for a double buffer tensor include:
// - Move allocation to outside of the double buffer loop
// - Double the allocation size
// - Omit the RAW sync in the Main and Epilogue loops
// [Cicular buffer] An generalization of double buffering.
// On sm80+ hardware there is asynchronous copy infrastructure that
// motivates a circular buffering generalization of double buffering.
// Almost all analyses previously done for double buffering are exactly
// the same with circular buffering, except for the introduction of
// new concept: `stage depth`.
//
// The `stage depth` is defined as the multiplier of extra buffering
// space used. In the case of double buffering, the stage depth would
// be 2.
//
// A circular buffered loop structure would look like follows, which
// exactly parallels the case of double buffered loop structure, since
// it is a exact generalization to the same purpose.
//
// Here S is the original allocation size as above,
// D is the stage depth. With D=2, the below loop structure becomes
// exactly the same as the case in double buffering.
//
// allocate X[S*D] // allocation
// for i in 0..D-1: // prolog
// for j in ...
// if pred:
// x[i*S+j] = y[i, j];
//
// for i in 0..N: // main loop
// for j in ...
// if pred:
// x[((i+D-1)%D)*S+j] = y[i+D-1, j];
// for j in ...
// .. = x[(i%D)*S+j]
//
// (Epilog omitted since this only makes sense in using
// cp.async, where producer will be in global mem and consumer will
// be in shared mem).
//
// The profitability of this optimization comes from extra tolerance
// of global memory pipeline latency, as on the expression `.. = x[(i%D)*S+j]`
// we only need to make sure the data for the current iteration is
// completed while the remaining D-2 load iterations could still be in progress
// and overlap with the computes of the current loop.
//
// To express this pattern on sm80+ hardware we can group the loads
// in each iteration of the circular buffered loop as one "transaction",
// and specify how many transactions we want to ensure completion when
// we insert the async barriers.
//
// allocate X[S*D] // allocation
// for i in 0..D-1: // prolog
// for j in ...
// if pred:
// x[i*S+j] = y[i, j];
// cp.async.commit; // mark the transaction boundary
//
// # At this point we have D-1 transactions on the fly.
// and for the first iteration of the main loop we need
// one transaction completed, so we leave D-2 transactions
// on the fly, which would be the input to the barrier instruction.
//
// cp.async.wait D-2 // ensure all but the last D-2 transactions complete.
//
// for i in 0..N: // main loop
// # At this point we always have D-2 transactions on the fly.
// and one completed.
// for j in ...
// if pred:
// x[((i+D-1)%D)*S+j] = y[i+D-1, j];
// for j in ...
// .. = x[(i%D)*S+j]
// cp.async.commit; // mark the transaction boundary for the
// load issued in this iteration.
// # At this point we have D-1 transactions on the fly,
// and none completed.
// cp.async.wait D-2; // Ensure all but the last D-2 transactions complete.
// __syncthreads(); // Need to syncthreads because each thread will only
// ensure completion of its own async copies so
// would need to sync to this point to ensure
// completion of the whole tile.
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
unsigned int getDoubleBufferAxisPosition(const TensorView* tv);
IterDomain* getDoubleBufferAxis(const TensorView* tv);
void validateDoubleBufferedTensor(const TensorView* tv);
class TORCH_CUDA_CU_API DoubleBufferPass {
public:
//! Apply double buffering transformations
static std::vector<Expr*> run(const std::vector<Expr*>& exprs);
};
class TORCH_CUDA_CU_API DoubleBufferInfo {
// Lowering information of double buffered tensors.
struct TvInfo {
IterDomain* double_buffer_axis = nullptr;
Val* original_alloc_size = nullptr;
};
public:
void build(Fusion* fusion);
void setDoubleBufferAxis(const TensorView* tv, IterDomain* id);
IterDomain* getDoubleBufferAxis(const TensorView* tv);
//! Get a loop that matches with a given double-buffer axis. If
//! ignore_prologue is true, a matched loop is ignored if it's a
//! prologue loop.
static kir::ForLoop* getDoubleBufferLoop(
IterDomain* axis,
const std::vector<kir::ForLoop*>& loops,
bool ignore_prologue = false);
//! Get a loop that matches with the double-buffer axis of a given
//! double-buffered tensor. If ignore_prologue is true, a matched
//! loop is ignored if it's a prologue loop.
kir::ForLoop* getDoubleBufferLoop(
const TensorView* tv,
const std::vector<kir::ForLoop*>& loops,
bool ignore_prologue = false);
void setOriginalAllocSize(const TensorView* tv, Val* size);
Val* getOriginalAllocSize(const TensorView* tv);
//! Returns true if the iterdomain will be realized
//! as a double buffer loop.
bool isDoubleBufferedIterDomain(IterDomain* id);
//! Get the number of circular buffer stages for the given axis,
//! the number of stages will be 2 in the case of double buffer loop.
unsigned int getStageDepthFor(IterDomain* circular_buffered_id);
private:
TvInfo& getTvInfo(const TensorView* tv);
//! Set the number of circular buffer stages for the given
//! circular_buffered_id.
//! Current code generation only supports one stage depth per loop disjoint
//! set,
//! so this function will throw an error if trying to set different stage
//! numbers to iterdomains that are loop mapped.
void setStageDepth(
IterDomain* circular_buffered_id,
unsigned int stage_depth);
private:
//! Keeps track of information for lowering double buffered tensors
std::unordered_map<const TensorView*, TvInfo> map_;
//! Keeps track of which concrete loop map is realizing double buffer
//! iterdomains.
std::unordered_set<const IterDomain*> concrete_double_buffered_loop_id_;
//! Keeps track of double buffer loop stage depth.
//! Currently for each disjoint set of loop mapped iterdomains,
//! Only one stage depth is supported, so that the loops can indeed
//! shared with the same prolog extent and main loop offset.
std::unordered_map<IterDomain*, unsigned int> stage_depth_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 9,456
| 36.828
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_fused_reduction.h
|
#pragma once
#include <ir_all_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Keep track of certain patterns of reductions.
//!
//! - Allreduce IterDomain: reduced and broadcast domain.
class FusedReductionInfo {
public:
void markAsAllreduce(IterDomain* id);
bool isAllreduce(IterDomain* id) const;
private:
// Reduction IterDomains that are also broadcast
std::unordered_set<IterDomain*> allreduce_ids_;
};
//! Detect reductions and broadcasts that are eligible for the fused
//! reduction kernel. When found, the predicate flags of the broadcast
//! is unset, which effectively makes the broadcast just a unary set
//! op.
//! TODO: Consider moving the warp-based fused reduction here.
void fuseReductionsAndBroadcasts(Fusion*);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 863
| 23.685714
| 70
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_fusion_simplifier.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <lower_trivial_reductions.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Replaces trivial reductions with Unary Set Ops
void trivialReductionReplacement(Fusion*, const TrivialReductionInfo&);
// Transpose, Shift, Gather, and View Ops with Unary Set Ops
std::vector<Expr*> unarySetOpInserter(const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 580
| 20.518519
| 71
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_index.h
|
#pragma once
#include <c10/macros/Export.h>
#include <instrumentation.h>
#include <kernel_ir.h>
#include <kernel_ir_dispatch.h>
#include <root_domain_map.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// TODO: Replace with mutator as IndexLowering is replacing expr's with
// versions that are doing indexing
class TORCH_CUDA_CU_API IndexLowering : private OptOutConstDispatch {
public:
static std::vector<Expr*> getIndexedExprs(std::vector<Expr*> incoming_exprs) {
FUSER_PERF_SCOPE("GpuLower::Lower::IndexLowering::getIndexedExprs");
IndexLowering il;
il.generate(incoming_exprs);
return il.lowered_exprs_;
}
private:
IndexLowering() = default;
void pushBack(Expr*);
// Return the most recently inserted
// expression in the current active
// scope or global scope.
Expr* back() const;
// Insert an expression before the current top-level expression.
void insertAtTopLevel(Expr* expr);
void handle(const FullOp*) final;
void handle(const ARangeOp*) final;
void handle(const EyeOp*) final;
void handle(const ViewAsScalar*) final;
void handle(const UnaryOp*) final;
void handle(const BinaryOp*) final;
void handle(const TernaryOp*) final;
void handle(const RNGOp*) final;
void handle(const ReductionOp*) final;
void handle(const GroupedReductionOp*) final;
void handle(const WelfordOp*) final;
void handle(const GroupedWelfordOp*) final;
void handle(const LoadStoreOp*) final;
void handle(const MmaOp*) final;
void handle(const BroadcastOp*) final;
void handle(const kir::ForLoop*) final;
void handle(const kir::IfThenElse*) final;
void handle(const kir::Allocate*) final;
void handle(const kir::BlockSync*) final;
void handle(const kir::GridSync*) final;
void handle(const kir::CpAsyncWait*) final;
void handle(const kir::CpAsyncCommit*) final;
void generate(const std::vector<Expr*>& exprs);
Val* lowerSrcIndex(Val* val, Val* dst) const;
Val* lowerDstIndex(Val* dst) const;
void handleBlockReduction(const ReductionOp* rop, Val* out, Val* in);
void handleGridReduction(const ReductionOp* rop, Val* out, Val* in);
void handleBlockReduction(
const GroupedReductionOp* rop,
const std::vector<Val*>& outputs,
const std::vector<Val*>& inputs);
void handleGridReduction(
const GroupedReductionOp* rop,
const std::vector<Val*>& outputs,
const std::vector<Val*>& inputs);
void handleGridWelford(WelfordOp* new_wop);
void handleGroupedBlockWelford(
const GroupedWelfordOp* wop,
const std::vector<WelfordTriplet>& output_vals,
const std::vector<WelfordTriplet>& input_vals,
const std::vector<WelfordTriplet>& init_vals);
void handleGroupedGridWelford(
const GroupedWelfordOp* wop,
const std::vector<WelfordTriplet>& output_vals,
const std::vector<WelfordTriplet>& input_vals,
const std::vector<WelfordTriplet>& init_vals);
// Allocate a unique buffer for grid reductions and broadcast. A
// buffer is uniquely allocated for each output tensor of an
// expression.
kir::Allocate* allocateUniqueBuffer(
Val* buffer_size,
DataType dtype,
bool zero_init,
TensorView* out_tv,
std::unordered_map<TensorView*, kir::Allocate*>& alloc_map);
std::vector<kir::Allocate*> allocateWelfordWorkBuffer(
const std::vector<WelfordTriplet>& triplets,
WelfordTriplet::ValName name,
Val* buffer_size);
// Allocate a fused reduction object uniquely for a given
// TensorView. Parameter expr is the expression corresponding to the
// fused reduction.
void allocateUniqueFusedReduction(Expr* expr, TensorView* out_tv);
private:
std::vector<Expr*> lowered_exprs_;
// This is a slight work around as scope has a couple definitions, we have the
// Scope that's in ForLoop/IfThenElse which is really just a wrapper around
// std::vector<Expr*> and then we have the actual ForLoop/IfThenElse. We want
// to be able to carry both around because when we push back to a scope it
// could be either the body or else body of the IfThenElse. However, we want
// to understand the nesting of IfThenElse/ForLoop nodes.
kir::Scope* active_scope_ = nullptr;
// Track for loops to send to indexing. Similar to what's done in
// kir::IrVisitor
std::vector<kir::ForLoop*> for_loops_;
// Maps to keep track of allocated buffers and objects that must be
// allocated only once
std::unordered_map<TensorView*, kir::Allocate*> sync_buffer_map_;
std::unordered_map<TensorView*, kir::Allocate*> work_buffer_map_;
std::unordered_map<TensorView*, kir::AllocateFusedReduction*>
fused_reduction_map_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 4,787
| 32.25
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_index_compute.h
|
#pragma once
#include <fusion.h>
#include <index_compute.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// Struct to hold useful information from an index pass on iterdomain graph.
// Used to return the IndexCompute structure back to the indexing calls in
// index_compute.cpp. Other structurs are required to resolve the actual
// indexing math there.
struct IndexFromIdGraph {
IndexCompute index;
IndexCompute concrete_index;
std::unordered_map<IterDomain*, Val*> initial_concrete_index_map;
std::vector<IterDomain*> resolved_loop_domains;
explicit IndexFromIdGraph(
IndexCompute index,
IndexCompute concrete_index,
std::unordered_map<IterDomain*, Val*> initial_concrete_index_map,
std::vector<IterDomain*> loop_domains);
};
//! Indexing interface, returns IndexFromIdGraph which the IndexCompute object
//! can be queried from directly for the produced indexing. If producer_tv !=
//! nullptr producer will be indexed, if producer_tv == nullptr consumer will be
//! indexed. If is_global global indexing will be done, else shared memory or
//! local indexing will be performed.
IndexFromIdGraph getTensorIndexFromIdGraph(
const std::vector<kir::ForLoop*>& loops,
const TensorView* consumer_tv,
const TensorView* producer_tv = nullptr,
bool is_global = true,
std::unordered_map<IterDomain*, IterDomain*> c2p_map = {});
//! Indexing interface for calculating predicate index returns IndexFromIdGraph
//! which the IndexCompute object can be queried from directly for the produced
//! indexing If is_start_predicate, will produce indexing math for the start
//! predicates.
IndexFromIdGraph getPredicateIndexingFromIdGraph(
const std::vector<kir::ForLoop*>& loops,
TensorView* consumer_tv,
kir::ForLoop* unswitch_or_vec_loop,
IterDomain* double_buffer_axis,
bool is_start_predicate);
//! getTensorIndexFromIdGraph is the function that index_compute will call very
//! straightforwardly. However, for implementing the new indexing logic that
//! starts to abstract some of the indexing away from index_compute we need to
//! move quite a bit of the intertwined indexing logic away from the
//! index_compute file and the index_reference_replay file. This is because we
//! want to separate out what has to be done on the fly, from what analysis we
//! can do early on with the iter domain graph and associated properties.
//!
//! getTensorIndexFromIdGraph places this analysis internally in
//! LoopIndexingAnalysis. LoopIndexingAnalysis though has to communicate to:
//! 1) index_compute.cpp::IndexCompute to tell IndexCompute which expressions
//! it needs to traverse to compute the indexing math.
//! 2) lower_shift.cpp::HaloInfo::buildConcreteHaloExtentMap to build the halo
//! extent map used in indexing.
//!
//! LoopIndexing is nothing but a mechanism for this communication.
//!
//! Holds information needed to produce indexing math. In the current version of
//! indexing pass, the iter domains combined with the loop nests are the source
//! of truth in terms of resolving the actual integer indexing math from the
//! sequence of iterdomain transforms.
//!
//! This information is crtiical in resolving indexing associated with complex
//! broadcast patterns. Check FusionComplexBCast* test cases as well as
//! FusionAdvancedIndexing* for examples where resolving indices from IterDomain
//! transformations can be challenging.
//!
//! The source of this challenge is due to inling patterns where the IterDomains
//! responsible for control flow are not local to a particular TensorView.
//! Broadcast, operations like view/reshape, and gather/shift can make indexing
//! local buffers complex because of the complex effects inlining into other
//! TensorViews produce.
//!
//! TODO:
//! The first iteration tries to match the semantics of reference
//! replay without any new logic. In a follow up iteration will
//! need to revisit a few further pathological patterns.
//!
//! Note:
//! The current implementation of loop indexing pass works on
//! equivalent classes defined by ComputeAt exact map. The
//! list of expressions stored in this class form a "reference", graph of
//! iterdomain expressions when all of their inputs and outputs are replaced
//! with their exact concrete mapped id's.
//!
//! Here an invariant in a graph of iterdomain expressions is that
//! each iterdomain is produced exactly once and is either a leaf domain
//! or has been consumed exactly once by another expression. This makes sure
//! that a well defined indexing can be generated for each of the concrete ids
//! whenever we either forward or backward traverse the graph.
class LoopIndexing {
public:
//! Returns the original loop nest.
const auto& loops() const {
return loops_;
}
//! Returns the vector of Iterdomains
//! that match the original loop pattern.
const auto& loopDomains() const {
return loop_domains_;
}
//! Returns the consumer tv that the view info
//! was derived from.
auto consumerTv() const {
return consumer_tv_;
}
//! Returns the set of Iterdomain transforms that
//! define the correct indexing path, in forward
//! topological order.
std::vector<Expr*> getForwardExprList() const;
//! Returns the set of Iterdomain transforms that
//! define the correct indexing path, in backward
//! topological order.
std::vector<Expr*> getBackwardExprList() const;
//! Returns the set of out of line expressions in
//! reverse topological order.
const std::vector<Expr*>& getBackwardOutOfLineExprList() const {
return out_of_line_exprs_;
}
//! Returns all exact concrete id's that were produced
//! or consumed in the selected indexing expressions
std::unordered_set<IterDomain*> getAllExactConcreteIdSet() const;
private:
friend class LoopIndexingAnalysis;
//! The loop nest that this loop indexing is derived from.
std::vector<kir::ForLoop*> loops_;
//! Consumer tv, where the view related info was derived from.
const TensorView* consumer_tv_;
//! The source iterdomains that all the Iterdomain transforms
//! in this loop nest originated from.
std::vector<IterDomain*> loop_root_;
//! The leaf iterdomains that the original loop nests correspond
//! to. May be longer than loops_ with the dangling iterdomains
//! appended towards the end.
std::vector<IterDomain*> loop_domains_;
//! The selected sequence of expressions that should represent
//! the correct indexing math from the given loop nest.
std::vector<Expr*> index_exprs_;
//! The subset of sequence of expressions that can be resolved
//! with only the iterdomains on the right of consumer tv's ca
//! axis.
//! Expressions are ordered in reverse topological order.
std::vector<Expr*> out_of_line_exprs_;
};
// When indexing there are sometimes an option to propagate an index down
// multiple paths. This will return the IterDomains in the history of the
// reference domain and mark which paths should be taken (if there's a
// preference) to reach the roots provided in preferred_roots.
std::unordered_set<IterDomain*> buildLoopIndexingPreferredPath(
const TensorView* original_tv,
const LoopIndexing& loop_indexing,
bool use_replay_map = false,
std::unordered_map<IterDomain*, IterDomain*> p2c_map = {});
// Get an rfactor IterDomain that is mapped with an IterDomain. If
// multiple such IDs exist, select one whose input IDs are mapped with
// the consumer IDs. This is to ensure the path from the leaf
// IterDomains to the root matches with the consumer tensor.
IterDomain* getRfactorIDToTraverse(
IterDomain* id,
const std::vector<Val*>& consumer_all_ids);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 7,821
| 39.95288
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_index_hoist.h
|
#pragma once
#include <ir_all_nodes.h>
#include <functional>
#include <unordered_map>
#include <vector>
// Hoisting common index subexpressions
//
// Class CommonIndexMap is updated during the lowering as new indices
// are inserted. An index is uniquely identified with CommonIndexKey,
// which consists of the concrete ID of the indexed/predicated domain,
// the for-loops used in the index, and the index vals of the use
// for-loops.
//
// Once all indices are inserted to CommonIndexMap, allocations of the
// the hoisted indices are inserted by allocateCommonIndices. Note
// that this assumes that the CUDA code generator does not inline a
// scalar Val with allocation (PR #1434).
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Class to represent unique indexed domains for index
//! hoisting. Uniquenesss is determined with the indexed domain
//! itself, the for-loops and their index values.
class TORCH_CUDA_CU_API CommonIndexKey {
friend struct CommonIndexKeyHash;
public:
//! \param consumer_indexed_id Indexed consumer domain
//! \param consumer_td TensorDomain of consumer_indexed_id
//! \param ref_td Reference domain at the time of indexing
//! \param ref_index_map Index map of the reference domain
//! \param loops Loop structure where this id is indexed
CommonIndexKey(
IterDomain* consumer_indexed_id,
TensorDomain* consumer_td,
TensorDomain* ref_td,
const std::unordered_map<IterDomain*, Val*>& ref_index_map,
const std::vector<kir::ForLoop*>& loops);
//! \param consumer_indexed_id Indexed consumer domain
//! \param consumer_td TensorDomain of consumer_indexed_id
//! \param loop_domains Resolved vector of iterdomain corresponding to loops
//! \param loop_index_map Index mapping generated from the loop nest.
//! \param loops Loop structure where this id is indexed
//! Duplicate of above, but without a reference domain. TODO: Remove other
//! implementation.
CommonIndexKey(
IterDomain* consumer_indexed_id,
TensorDomain* consumer_td,
const std::vector<IterDomain*>& loop_domains,
const std::unordered_map<IterDomain*, Val*>& loop_index_map,
const std::vector<kir::ForLoop*>& loops);
const IterDomain* concreteIndexedId() const {
return concrete_indexed_id_;
}
const std::vector<kir::ForLoop*>& usedLoops() const {
return used_loops_;
}
const std::vector<Val*>& loopIndexVals() const {
return loop_index_vals_;
}
bool operator==(const CommonIndexKey& other) const;
std::string toString() const;
private:
//! Concrete domain of indexed domain
IterDomain* concrete_indexed_id_ = nullptr;
//! Loops used for the index
std::vector<kir::ForLoop*> used_loops_;
//! Loop index vals for the used loops
std::vector<Val*> loop_index_vals_;
};
struct CommonIndexKeyHash {
std::size_t operator()(const CommonIndexKey& key) const {
auto h = std::hash<const IterDomain*>{}(key.concrete_indexed_id_);
// NOTE: do not use other fields as the pointers can be different
// even when two keys can share the same index
return h;
}
};
//! Map to hold hoisted common indices
class TORCH_CUDA_CU_API CommonIndexMap {
public:
//! Register an indexd consumer domain to hoist
//!
//! Returns a corresponding hoisted index and a flag indicating if a
//! new index is inserted.
//!
//! Consumer domains are used even for producer indexing since
//! producer domains in producer indexing are temporary replay
//! domains.
std::pair<Val*, bool> insert(
IterDomain* indexed_consumer_id,
TensorDomain* consumer_td,
TensorDomain* ref_td,
const std::unordered_map<IterDomain*, Val*>& ref_index_map,
const std::vector<kir::ForLoop*>& loops,
Val* index);
//! Duplicate of above, but without a reference domain. TODO: Remove other
//! implementation.
std::pair<Val*, bool> insert(
IterDomain* indexed_consumer_id,
TensorDomain* consumer_td,
const std::vector<IterDomain*>& loop_domains,
const std::unordered_map<IterDomain*, Val*>& loop_index_map,
const std::vector<kir::ForLoop*>& loops,
Val* index);
const auto& commonIndexMap() const {
return common_index_map_;
}
const auto& useCounts() const {
return use_counts_;
}
private:
//! Utility method to insert a key into common index
//! map. Returns a pair of an IR node and a boolean value.
//! The IR node will be the previously inserted index if
//! the key found a match, or will be the original index
//! if this is new key and the key will be stored.
//! The boolean value will be true if the key is stored,
//! i.e. first time it is inserted.
std::pair<Val*, bool> tryInsertNewIndex(CommonIndexKey key, Val* index);
private:
//! Map to hold hoisted common indices
std::unordered_map<CommonIndexKey, Val*, CommonIndexKeyHash>
common_index_map_;
std::unordered_map<CommonIndexKey, int, CommonIndexKeyHash> use_counts_;
};
//! Insert allocations of hoisted indices. Must be called after
//! collecting all common indices.
std::vector<Expr*> allocateCommonIndices(const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 5,270
| 32.788462
| 78
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_insert_syncs.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Insert sync at end of for-loops to prevent write-after-read race condition.
//!
//! WAR race condition occurs when the next iteration of the loop overwrites
//! shared memory value before a previous operation has finished reading it.
std::vector<Expr*> insertWarThreadSynchronization(
const std::vector<Expr*>& exprs);
//! Insert syncs between writing to shared memory and then reading it.
//! RAW pass is run before indexing, unrolling (loop duplication), memory
//! aliasing, and index (grid/block bcast/reduction)
std::vector<Expr*> insertRawThreadSynchronization(
const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 882
| 26.59375
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_instrument.h
|
#pragma once
#include <ir_all_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Set up KernelPerformanceProfile of GpuLower when enabled, which
//! keeps track of expressions to profile. A new TensorView is added
//! for storing profiling results. The expression list is prepended
//! with an kir::Allocate node to allocate the TensorView profile
//! buffer. Note that any expression added after this pass will not be
//! profiled, so this pass should be called after all expressions are
//! lowered. KernelPerformanceProfile is copied to Kernel after
//! lowering.
std::vector<Expr*> instrumentKernel(const std::vector<Expr*>& exprs);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 753
| 30.416667
| 70
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_loops.h
|
#pragma once
#include <c10/macros/Export.h>
#include <compute_at_map.h>
#include <instrumentation.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <lower_thread_predicate.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Loop nest generator pass will get IR that looks something like:
//! T0[I0o{ceil(I0/4)}, I1o{ceil(I1/128)}, I0iU{4}, I1i{128}] = ...
// and will generate the loop nest structure for these exprs like:
//!
//! for( i : I0o{ceil(I0/4)} ) {
//! for( j : I1o{ceil(I1/128)} ) {
//! for( k : I0i{4} )
//! for( l : I1i{128} )
//! T0[I0o{ceil(I0/4)}, I1o{ceil(I1/128)}, I0iU{4}, I1i{128}] = ...
//!
//! It does not generate predicates, but it will generate allocations, and loop
//! nests to initialize reduction buffers.
class TORCH_CUDA_CU_API LoopNestGenerator {
public:
static std::vector<Expr*> loweredExprs(const std::vector<Expr*>& exprs);
private:
LoopNestGenerator(const std::vector<Expr*>& exprs);
// Open a new inner most for loop, track which TV it was constructed from
// according to the computeAt chain.
void openFor(IterDomain*);
// Close the inner most for loop
void closeFor();
// Appends an expression to the current scope
void pushFront(Expr* expr);
void handle(Expr* expr);
// Run the pass and accumulate output in lowered_exprs_
void generate(const std::vector<Expr*>& exprs);
private:
// Lowered exprs to return
std::vector<Expr*> lowered_exprs_;
// Keep all for loops conveniently to make unrolling easier, basically just a
// stack of the active for_loops
std::vector<kir::ForLoop*> for_loops_;
// Loop structure of each expression
std::unordered_map<TensorView*, std::vector<IterDomain*>> loop_structures_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,844
| 26.132353
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_magic_zero.h
|
#pragma once
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
struct IndexFromIdGraph;
//! Insert magic zero definition at the begining of the kernel. Insert magic
//! zero update after every (outer most) loop nest with a compile time extent.
//!
//! This will make sure nvrtc does not aggressively save predicate and indices.
std::vector<Expr*> insertMagicZero(const std::vector<Expr*>& exprs);
//! Check if val is a reference to the magic zero variable
TORCH_CUDA_CU_API bool isMagicZero(const Val* val);
//! Check if val is protected with magic zero.
//!
//! Specifically, this returns true if val is defined as "x + magic_zero".
bool isProtectedWithMagicZero(const Val* val);
// Determine if we may run into over reuse of predicates or registers in the
// compiler. If the loop can be unrolled and the index and domain are not
// "simple" we likely want the loop protected.
//
// Magic zero protection should only be done for global memory and predicates.
// We should avoid use on registers. Shared memory does not require it, but
// likely wouldn't hurt.
bool needsMagicZero(
kir::ForLoop* loop,
IterDomain* reference_domain = nullptr,
Val* ind = nullptr);
struct IndexMagicZeroInfo {
//! Index that may be updated with magic zero
Val* index = nullptr;
//! Loop index that is protected by magic zero. nullptr if no loop
//! is protected
Val* original_loop_index = nullptr;
//! Protected loop index. nullptr if no loop is protected
Val* protected_loop_index = nullptr;
//! Protected loop. nullptr if no loop is protected
IterDomain* loop_id = nullptr;
};
//! Protect an index val of an IterDomain with magic zero
//!
//! This should be only used for predicate indexing.
//!
//! No protection is done if none of the loops is determined to require
//! protection by needsMagicZero.
IndexMagicZeroInfo protectPredicateIndexWithMagicZero(
Val* index,
const IndexFromIdGraph& id_graph,
const std::vector<kir::ForLoop*>& loops);
//! Protect an index val of a tensor with magic zero
//!
//! This should be only used for non-predicate indexing.
//!
//! No protection is done if none of the loops is determined to require
//! protection by needsMagicZero.
void protectNonPredicateIndexWithMagicZero(
const std::vector<kir::ForLoop*>& loops,
const std::vector<IterDomain*>& loop_domains,
std::unordered_map<IterDomain*, Val*>& concrete_loop_idx_map);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 2,580
| 31.670886
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_misaligned_vectorization.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Transform for-loop structure to handle misaligned addresses
//!
//! Sections of misaligned addresses are handled sequentially
//! while aligned addresses use vectorized memory accesses.
//!
//! ---------------------------------------------------------------------------
//! Before Misaligned Vectorization:
//!
//! Inputs: T0
//! Outputs: T3
//!
//! for(...) {
//! T1[vector_size];
//! for( i : vector_size ) {
//! T1[i] = T0[...]
//! }
//!
//! T2[vector_size];
//! for( i : vector_size ) {
//! T2[i] = unaryOp(T1[i])
//! }
//!
//! for( i : vector_size ) {
//! T3[...] = T2[i]
//! }
//! }
//!
//! ---------------------------------------------------------------------------
//! After Misaligned Vectorization:
//!
//! Inputs: T0
//! Outputs: T3
//!
//! for(...) {
//! T1[vector_size];
//! T2[vector_size];
//!
//! if (inline_predicate_except_last_root_domain) {
//! index_except_last_root_domain = ...
//! address = (int64_t) &T1[index_except_last_root_domain]
//!
//! offset_size = (address % vector_size_bytes) / data_type_size_bytes
//! shift_init = vector_size - offset_size
//! shift = (shift_init == vector_size) ? 0 : shift_init
//!
//! // size of the last root domain
//! extent = ...
//! remainder = (extent - shift) % vector_size
//!
//! last_root_domain_index = ...
//!
//! // Vectorize Section
//! if ( (last_root_domain_index + shift) < (extent - remainder) ) {
//! T1[0] = vectorize_load( T0[index + shift] );
//!
//! for( i : vector_size ) {
//! T2[i] = unaryOp(T1[i])
//! }
//!
//! T3[index + shift] = vectorize_store( T2[0] );
//! }
//!
//! // Initial Section
//! if ( last_root_domain_index == 0 ) {
//! for( i : shift ) {
//! T1[i] = T0[...]
//! }
//!
//! for( i : shift ) {
//! T2[i] = unaryOp(T1[i])
//! }
//!
//! for( i : shift ) {
//! T3[...] = T2[i]
//! }
//! }
//!
//! // Remainder Section
//! if ( (last_root_domain_index + shift) >= (extent - remainder) &&
//! (last_root_domain_index + shift) < extent) {
//!
//! for( i : remainder ) {
//! T1[i] = T0[index + shift]
//! }
//!
//! for( i : remainder ) {
//! T2[i] = unaryOp(T1[i])
//! }
//!
//! for( i : remainder ) {
//! T3[index + shift] = T2[i]
//! }
//! }
//! }
//! }
//!
std::vector<Expr*> processMisalignedVectorization(
const std::vector<Expr*>& exprs);
bool containsAnyDirectChildMisalignedVectorize(const kir::ForLoop* fl);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 2,830
| 22.991525
| 79
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_predicate_elimination.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class TORCH_CUDA_CU_API PredicateElimination : public IterVisitor {
public:
void build(Fusion* fusion);
//! True if expr does not need a predicate
//!
//! \param expr Tensor expression
bool canOmitPredicate(const Expr* expr) const;
//! Value to initialize out-of-bound regions
Val* getInitValue(TensorView* tv) const;
//! Dump to string for debugging
std::string toString() const;
// A utility to set removal info of `to` the same as `from`.
// See issue #1641
// We build predicate info before lowering but more expressions
// are created during lowering that this class also need to
// keep track of to make sure correct predicate removal is
// applied.
// This utility is a quick patch for the missing information
// since it might be better just to recompute predicate info
// if all expressions were mutated, but that'd take much more
// global info to reliably track.
void propagateRemovalInfo(const Expr* from, const Expr* to);
private:
using IterVisitor::handle;
void handle(Expr* expr) final;
//! Set a value to initialize out-of-bound regions
bool setDefaultInitValue(TensorView* tv);
//! Set a value to initialize out-of-bound regions of reduction tensors
bool setReductionInitValue(TensorView* tv, Val* reduction_init);
//! Check if expr needs to be predicated
bool needsPredicate(Expr* expr) const;
private:
//! Expressions that are found to be safe without predicates
std::unordered_set<const Expr*> non_predicated_exprs_;
//! Tensors and their initialization values
std::unordered_map<TensorView*, Val*> init_value_map_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,888
| 28.061538
| 73
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_replace_size.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <fusion.h>
#include <ir_all_nodes.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
// TensorViews are all based on symbolic sizes. When we first initialize them
// we don't know if they're inputs or outputs which would mean that they have
// runtime shapes. Intermediate tensors (those not going to global memory) do
// not have this information. Since we need to have the correct information in
// the kernel being fetched for shapes, we want to replace input and output
// tensors to reference the runtime structure containing sizes.
void replaceSymbolicSizes(Fusion*);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 756
| 28.115385
| 78
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_shift.h
|
#pragma once
#include <c10/macros/Export.h>
#include <dispatch.h>
#include <ir_all_nodes.h>
#include <kernel_ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class LoopIndexing;
//! Auxiliary class to represent information about halo of an axis
class AxisHaloInfo {
public:
//! Width of halo.
//!
//! pos is either 0 or 1. The width of halo at offset zero is set
//! when pos is 0.
int width(int pos) const;
//! Sum of the widths of both widths
int width() const;
const auto& widths() const {
return widths_;
}
//! Set the halo width of either side.
//! pos is either 0 or 1. The width of halo at offset zero is set
//! when pos is 0.
void setWidth(int pos, int width);
//! Extend the halo width to account for another axis.
void merge(int pos, int other);
//! Extend the halo width to account for another axis.
void merge(const AxisHaloInfo& other);
//! True when halo may be attached
bool hasHalo() const;
std::string toString() const;
private:
//! Sizes of the halo regions of two sides. Both values are zero for
//! axes with no halo. When an axis has halo at offset zero,
//! widths_[0] is non-zero and designates the size of the
//! halo. Similarly, non-zero widths_[1] means the axis has halo at
//! the other end of the axis.
std::array<int, 2> widths_ = {0, 0};
};
//! Helper class for lowering tensors with halo. Only valid at the
//! lowering time.
class TORCH_CUDA_CU_API HaloInfo {
public:
//! Scan a fusion and collect all information for lowering
HaloInfo(Fusion* fusion, std::shared_ptr<const ComputeAtMap> ca_map);
//! Almost exact duplicate of build(TensorDomain* td), except that
//! the traversal was done on loop indexing expressions.
std::unordered_map<IterDomain*, Val*> buildConcreteHaloExtentMap(
const LoopIndexing& loop_indexing) const;
//! Returns true if id has the root halo information set by
//! setRootAxisInfo.
bool hasRootAxisInfo(IterDomain* id) const;
//! Returns the registed AxisHaloInfo of a root axis.
//!
//! This is only for root axes. It is an error to query with
//! non-root axes.
const AxisHaloInfo& getRootAxisInfo(IterDomain* id) const;
//! Query if an axis has a halo width.
//!
//! See the comment at halo_width_map_.
bool hasHaloWidth(IterDomain* id) const;
//! Return the halo width of an axis.
//!
//! It's an error if queried for an axis with no halo width
//! information.
int getHaloWidth(IterDomain* id) const;
//! Returns an extent if id is extended for halo. Nullptr is
//! returned otherwise.
Val* getExtent(IterDomain* id) const;
//! Returns all child domains of a root domain that inherits the
//! halo of the root domain.
//!
//! If a root domain is split, only the inner domain inherits the
//! halo, so the inner domain is included but not the outer domain.
const std::unordered_set<IterDomain*>& getChildDomains(
IterDomain* root_id) const;
//! Returns all root domains from which the halo of a domain
//! originates.
std::unordered_set<IterDomain*> getRootDomains(IterDomain* id) const;
//! Returns true if a domain inherits halo associated with a root
//! domain.
bool isHaloInherited(IterDomain* root_id, IterDomain* id) const;
// True when the extent of id1 is guaranteed to be lesser than or
// equal to id2. False when it *may* not.
bool extentLessEqual(IterDomain* id1, IterDomain* id2) const;
// True when the extent of id1 is guaranteed to be equal to
// id2. False when it *may* not.
bool extentEqual(IterDomain* id1, IterDomain* id2) const;
//! Check if expr must be predicated based on boundary conditions
//! directly or indirectly induced by shift expressions.
//!
//! When yes, the expression needs two predications: one for
//! interior and another for padding. Predicate insertion is done in
//! the ShiftPredicateInserter class below.
bool needsShiftPredicate(Expr* expr) const;
std::string toString() const;
private:
//! Build mappings of extent information of a TensorDomain
void build(TensorDomain* td);
//! Propagate root axis information from outputs to inputs of an
//! expression
void propagateRootAxisInfo(Expr* expr);
//! Set initial AxisHaloInfo of a root axis
//!
//! The axis does not need to be a root domain in the case of
//! reference tensors. Reference tensors get halo information from
//! consumer root domains, which may correspond to rfactor domains
//! of tensors from which reference tensors are derived.
void setRootAxisInfo(IterDomain* id, const AxisHaloInfo& root_axis_info);
//! Adds a domain to the halo inheritance map.
//!
//! A domain, child, is added to the same set as domain parent. Both
//! domains must be part of TensorDomain td.
void insertToInheritanceMap(
TensorDomain* td,
IterDomain* parent,
IterDomain* child);
//! Propagate root axis information from consumer to producer
void propagateRootAxisInfo(
TensorView* producer,
TensorView* consumer,
Expr* expr);
//! Initialize mappings for a given root domain. The given domain
//! must be previously given to setRootAxisInfo.
void initializeFromRootAxisInfo(IterDomain* id);
//! Validate shift usage
void validate(TensorView* td, std::shared_ptr<const ComputeAtMap> ca_map)
const;
void setHaloWidth(IterDomain* id, int halo_width);
private:
// Copy the permissive map from the passed in compute at map
const DisjointSets<IterDomain*> permissive_map_;
//! Halo information of root axes
std::unordered_map<IterDomain*, AxisHaloInfo> root_axis_map_;
//! Halo-extended extents. No mapping for axes without halo extension
std::unordered_map<IterDomain*, Val*> extent_map_;
//! The halo width of an axis.
//!
//! The mapped value is a sum of two widths of both sizes of an
//! axis. For root axes, it is equivalent to AxisHaloInfo.widths_[0]
//! + AxisHaloInfo.widths_[1] (or AxisHaloInfo.width()). For
//! example, when a root axis is extended by 1 for both sides, it'd
//! be mapped to 2. For axes with no halo, they are mapped to zero.
//!
//! When an axis is split, its halo is only propagated to the inner
//! output axis, so the value of this map for the inner output is
//! the same as the input of split, while the outer output is mapped
//! to zero.
//!
//! When an axis is merged, no mapping is created for its
//! output at this point primarly because it isn't clear what the
//! "halo width" for a merged axis should mean. Perhaps, a merged
//! axis of (N+a)*(M+b), where N and M correspond to the original
//! extens of two axes, and a and b correspond to their halo widths,
//! it might make sense to set the halo width of this merged axis as
//! (N+a)*(M+b)-N*M. Currently, however, this isn't necessary, so no
//! particular mapping is created for merged axes.
//!
//! This is currently used only for conservatively comparing the
//! overall extents of axes. See HaloInfo::extentLessEqual and
//! HaloInfo::extentEqual.
//!
//! Example: Suppose a root axis has {0, 1} of
//! AxisHaloInfo.widths_. The root axis is mapped to 1. When it is
//! split, say, by 4, the output axes, [N / 4] and [4], where N is
//! the extent of the root axis, the outer axis is mapped to 0,
//! whereas the inner axis is mapped to 1. Further, suppose the
//! inner axis is merged with another axis of extent M, we know that
//! the extent of the resulting output axis is 5*M, but we don't
//! create its mapping.
std::unordered_map<IterDomain*, int> halo_width_map_;
//! Mappings from root domains to child domains that inherit halo
std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
inheritance_map_;
};
class ShiftPredicateInserter {
public:
//! Works mostly the same way as
//! PredicateCompute::getInlinePredicate but does the insertion of
//! the generated predicate. The branch structure is different from
//! the usual predicated expression, so the insertion is also done
//! here.
static Expr* insert(
Expr* expr,
const std::vector<kir::ForLoop*>& loops,
Bool* thread_pred,
bool within_unswitch);
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 8,349
| 33.937238
| 75
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_sync_information.h
|
#pragma once
#include <ir_all_nodes.h>
#include <parallel_type_bitmap.h>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
class SyncMap {
public:
std::string toString() const;
//! Validates all tensors are consistently parallelized. Basically,
//! when a producer axis is threaded, either with threadIdx or
//! blockIdx, there must be a mapped consumer axis with the
//! same ParallelType with some exceptions.
//!
//! This function assumes Loop and Parallel ComputeAtMaps are already
//! built as they are used to validate consistency.
//!
//! Fills needs_raw_sync with output TVs if they need a raw sync if on smem or
//! gmem. The second entry in this map is the parallel dimensions being
//! communicated across.
void build(Fusion* fusion);
ParallelTypeBitmap needsRawSync(TensorView* tv) const {
auto it = needs_raw_sync_.find(tv);
if (it != needs_raw_sync_.end()) {
return it->second;
}
return ParallelTypeBitmap();
}
private:
std::unordered_map<TensorView*, ParallelTypeBitmap> needs_raw_sync_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 1,195
| 25
| 80
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_thread_predicate.h
|
#pragma once
#include <c10/macros/Export.h>
#include <ir_all_nodes.h>
#include <lower_utils.h>
#include <parallel_type_bitmap.h>
#include <unordered_map>
#include <unordered_set>
#include <utility>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Maps TensorViews to a { ParallelTypeBitmap, SourceMap } pair
//!
//! Map from TensorView to bit set represnting <BIDx, BIDy, BIDz, TIDx, TIDy,
//! TIDz> If any dependency of TV had a parallelized reduction, we will track
//! it here. This will be used for predicate generation to prevent
//! parallelization on that axis. This is important if we have a reduction on
//! for example TIDx, as the reduced value is only valid on threadIdx.x == 0
//! therefore if we use that value later in the kernel we have that predicate.
//! If we follow a reduction parallelized on TIDx with a broadcast on TIDx we
//! no longer need the predicate and can reset the bit accordingly
//!
//! In addition, if a parallel thread type is not used, it is
//! redundant to use all threads/blocks. That isn't a problem
//! generally although it can be inefficient, but when an aliased smem
//! buffer is used as an output, redundant writes can be invalid (see issue
//! #1110). PredicateInfo::redundant_types track which parallel types
//! are redundant for each tensor and is used to let only one
//! thread/block of a redundant type execute the expression for a
//! tensor.
class TORCH_CUDA_CU_API ThreadPredicateMap {
public:
using SourceMap = std::unordered_map<
ParallelType,
std::unordered_set<const TensorView*>,
TypeHash>;
//! Thread predicate information for each tensor
struct PredicateInfo {
// Parallel types where only one thread/block is valid.
ParallelTypeBitmap limited_types;
// Parallel types where only one thread/block is enough.
ParallelTypeBitmap redundant_types;
// Tracking use chain of redundant writes:
// [Redundant use chain]
// a parallel type is a `redundant_consumer_type` only
// if all of its propagation use chains terminate with
// a redundant write of this type.
// A propagation use chain is currently either a reg-to-reg
// chain for a shared mem tv, or a reg/smem-to-reg/smem chain
// for a global tv.
// This is complementary information to `redundant_types`.
// If a tensor view is redundantly written and not redundantly
// used by all consumers, see FusionRedundantPredSync3,
// a RAW sync will need to be inserted before reading
// this redundantly written tensor.
ParallelTypeBitmap redundant_use_types;
bool operator==(const PredicateInfo& other) const {
return limited_types == other.limited_types &&
redundant_types == other.redundant_types &&
redundant_use_types == other.redundant_use_types;
}
};
using MapType = std::unordered_map<const TensorView*, PredicateInfo>;
using const_iterator = MapType::const_iterator;
//! Build a map from each tensor to PredicateInfo.
void build(Fusion* fusion);
//! Get a PredicateInfo for a given tensor. If it's an output of
//! a parallel broadcast, unmask the limited_types_ bit of the
//! corresponding parallel type since it must join the broadcast
//! operation although the valid input is only available at one of
//! the threads/blocks.
PredicateInfo getPredicateInfo(const TensorView* tv) const;
//! Returns a flag set that indicates which parallel types should be
//! predicated.
ParallelTypeBitmap getPredicatedParallelTypes(const TensorView* tv) const;
//! Returns a Bool predicate for a given TensorView.
Bool* getPredicate(const TensorView* tv) const;
//! Returns a ParallelTypeBitmap representing which domain needs
//! blockBroadcast.
//!
//! Even when a domain is broadcast and parallelized, it does not need
//! blockBroadcast unless it is predicated by limited_types_
ParallelTypeBitmap getParallelBroadcastDomains(const TensorView* tv) const;
//! Mark tv as updated so that rebuilding the map should recompute
//! its predicates and those of its dependents.
void markAsUpdated(const TensorView* tv);
void print() const;
//! Generate a Bool value from PredicateInfo.
static Bool* getPredicateFromPredicateInfo(
const ThreadPredicateMap::PredicateInfo& pred_info);
//! Get the redundant use types of the given expr, see [Redundant use chain]
ParallelTypeBitmap getRedundantConsumerType(Expr* expr) const;
private:
// Update the thread_predicates bitset based on provided Expr
void updateBitSet(const Expr*);
const_iterator find(const TensorView* tv) const;
const_iterator end() const;
const PredicateInfo& at(const TensorView* tv) const;
PredicateInfo& at(const TensorView* tv);
//! Update a mapping
bool update(
const TensorView* tv,
const ParallelTypeBitmap& limited_types,
const ParallelTypeBitmap& redundant_types);
//! Update a mapping
bool update(const TensorView* tv, const PredicateInfo& pred_and_src);
//! Backward populate redundant use chain info once the redundant
//! parallel writes have been identified.
void populateRedundantUseMap(Fusion* fusion);
private:
MapType thread_predicates_;
//! Keep track of updated tensors that need predicates to be computed
std::unordered_set<const TensorView*> updated_tvs_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 5,449
| 36.328767
| 78
|
h
|
null |
pytorch-main/third_party/nvfuser/csrc/lower_trivial_broadcast.h
|
#pragma once
#include <ir_all_nodes.h>
#include <root_domain_map.h>
#include <c10/macros/Export.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
//! Traverse and collect all concretized broadcast domains.
//!
//! The traversal first initializes the origin map with broadcast
//! domains in input tensors. Then, a new entry is added to the origin
//! map when a broadcast op is encountered during a forward traversal
//! of the given fusion. For non-broadcast ops, mappings are just
//! propagated forward using PairwiseRootDomainMap.
//!
//! When the mapped consumer domain is not broadcast, it means the
//! producer broadcast domain is concretized, and its origin broadcast
//! domains are marked as concretized.
class TORCH_CUDA_CU_API ConcretizedBroadcastDomains : private IterVisitor {
public:
ConcretizedBroadcastDomains() = delete;
ConcretizedBroadcastDomains(Fusion* fusion);
//! Is a domain concretized?
bool isConcretized(IterDomain* id) const;
//! Is a domain concretized to a unique concrete domain?
bool isUniquelyConcretized(IterDomain* id) const;
//! Is a domain concretized to multiple concrete domains?
bool maybeNonUniquelyConcretized(IterDomain* id) const;
private:
using IterVisitor::handle;
void handle(BroadcastOp* bop) final;
void handle(Expr* expr) final;
void markAsConcretized(
IterDomain* broadcast_root_domain,
IterDomain* concrete_root_domain);
bool insertRootDomainToConcreteDomainSet(
IterDomain* new_root_id,
std::unordered_set<IterDomain*>& id_set);
private:
//! Maps each root broadcast domain to its original root broadcast
//! domains. Their can be multiple original domains due to, e.g.,
//! binary ops with broadcast domains in both inputs.
std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
broadcast_origin_map_;
//! Map all broadcast domains to concrete root domains
std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
broadcast_to_concrete_map_;
std::unique_ptr<ExactRootDomainMap> exact_map_;
};
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
| 2,164
| 29.928571
| 75
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.