text
stringlengths 1
22.8M
|
|---|
```xml
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/mps/OperationUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/constant_pad_nd_native.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_native.h>
#endif
namespace at::native {
namespace mps {
// Pad operations (1D/2D/3D forward and backward)
static Tensor& pad_out_template(Tensor& output,
const Tensor& input_,
IntArrayRef padding,
const std::optional<Tensor>& grad_output_opt,
MPSGraphPaddingMode mode,
double constantValue,
const string op_name) {
using CachedGraph = MPSUnaryGradCachedGraph;
const int padding_size = (int)padding.size();
int padding_dim = padding_size / 2; // either 1D, 2D, or 3D
TORCH_CHECK(
padding_size == 2 || padding_size == 4 || padding_size == 6, "invalid padding argument of size ", padding_size);
const Tensor& grad_output_ = *(at::borrow_from_optional_tensor(grad_output_opt));
const bool is_backward_pass = grad_output_.defined();
int64_t nbatch = 1;
int64_t ndims = input_.ndimension();
TORCH_CHECK(ndims >= (int64_t)padding_dim,
"Length of pad should be no more than twice the number of "
"dimensions of the input. Pad length is ",
padding_size,
"while the input has ",
ndims,
"dimensions.");
// number of input dims with ConstantPad could be less than 2
int dim_w = padding_dim;
int dim_h = padding_dim - 1;
int dim_d = padding_dim - 2;
int dim_slices = 0;
if (!is_backward_pass && mode != MPSGraphPaddingModeConstant && ndims > padding_dim) {
bool valid_dims = input_.size(1) != 0 && input_.size(padding_dim) != 0;
TORCH_CHECK((ndims == 1 + padding_dim && valid_dims) ||
(ndims == 2 + padding_dim && valid_dims && input_.size(1 + padding_dim) != 0),
"3D or 4D (batch mode) tensor expected for input, but got: ",
input_);
}
if (ndims == padding_dim) {
dim_w--;
dim_h--;
dim_d--;
} else if (ndims > padding_dim + 1) {
const int dim_diff = (int)ndims - padding_dim - 1;
// this virtually inflates the padding with zeros if ndims > padding_dim + 2
padding_dim += dim_diff - 1;
dim_w += dim_diff;
dim_h += dim_diff;
dim_d += dim_diff;
dim_slices++;
nbatch = input_.size(0);
}
int64_t pad_l = padding[0];
int64_t pad_r = padding[1];
int64_t pad_t = padding_size > 2 ? padding[2] : 0;
int64_t pad_b = padding_size > 2 ? padding[3] : 0;
int64_t pad_front = padding_size > 4 ? padding[4] : 0;
int64_t pad_back = padding_size > 4 ? padding[5] : 0;
int64_t nplane = input_.size(dim_slices);
int64_t input_w = input_.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;
int64_t input_h = padding_dim > 1 ? input_.size(dim_h) : 0;
int64_t output_h = padding_dim > 1 ? input_h + pad_t + pad_b : 0;
int64_t input_d = padding_dim > 2 ? input_.size(dim_d) : 0;
int64_t output_d = padding_dim > 2 ? input_d + pad_front + pad_back : 0;
Tensor grad_output, input = input_;
if (!is_backward_pass) {
TORCH_CHECK(output_w >= 1 || output_h >= padding_dim - 1,
"input (H: ",
input_h,
", W: ",
input_w,
") is too small. Calculated "
"output H: ",
output_h,
" W: ",
output_w);
std::vector<int64_t> outputSizes;
if (mode == MPSGraphPaddingModeConstant) {
// support arbitrary input dimensions for constant pad.
auto input_sizes = input_.sizes();
auto ori_padding_dim = padding_size / 2;
auto l_diff = ndims - ori_padding_dim;
for (size_t i = 0; i < (size_t)l_diff; i++) {
outputSizes.emplace_back(input_sizes[i]);
}
for (const auto i : c10::irange((size_t)ori_padding_dim)) {
auto pad_idx = padding.size() - ((i + 1) * 2);
auto new_dim = input_sizes[l_diff + i] + padding[pad_idx] + padding[pad_idx + 1];
outputSizes.emplace_back(new_dim);
}
} else {
// these checks are only relevant for reflection padding (code taken from ReflectionPad.cpp)
if (mode == MPSGraphPaddingModeReflect) {
TORCH_CHECK(pad_l < input_w && pad_r < input_w,
"Argument #4: Padding size should be less than the corresponding "
"input dimension, but got: padding (",
pad_l,
", ",
pad_r,
") at dimension ",
dim_w,
" of input ",
input_.sizes());
if (padding_dim > 1) {
TORCH_CHECK(pad_t < input_h && pad_b < input_h,
"Argument #6: Padding size should be less than the corresponding "
"input dimension, but got: padding (",
pad_t,
", ",
pad_b,
") at dimension ",
dim_h,
" of input ",
input_.sizes());
}
if (padding_dim > 2) {
TORCH_CHECK(pad_front < input_d && pad_back < input_d,
"Argument #8: Padding size should be less than the corresponding "
"input dimension, but got: padding (",
pad_front,
", ",
pad_back,
") at dimension ",
dim_d,
" of input ",
input_.sizes());
}
}
outputSizes.insert(outputSizes.begin(), output_w);
if (padding_dim >= 2)
outputSizes.insert(outputSizes.begin(), output_h);
if (padding_dim >= 3)
outputSizes.insert(outputSizes.begin(), output_d);
if (ndims >= 1 + padding_dim)
outputSizes.insert(outputSizes.begin(), nplane);
if (ndims >= 2 + padding_dim)
outputSizes.insert(outputSizes.begin(), nbatch);
}
output.resize_(outputSizes);
if (output.numel() == 0) {
return output;
}
if (input_.numel() == 0) {
output.fill_(constantValue);
return output;
}
input = input_.contiguous();
} else {
TORCH_CHECK(output_w == grad_output_.size(dim_w),
"gradOutput width unexpected. Expected: ",
output_w,
", Got: ",
grad_output_.size(dim_w));
if (padding_dim > 1) {
TORCH_CHECK(output_h == grad_output_.size(dim_h),
"gradOutput height unexpected. Expected: ",
output_h,
", Got: ",
grad_output_.size(dim_h));
}
output.resize_as_(input);
if (output.numel() == 0 || grad_output_.numel() == 0)
return output;
grad_output = grad_output_.contiguous();
}
const uint32_t dims_mask = (1U << ndims) - 1;
uint32_t startMask = dims_mask, endMask = dims_mask;
std::vector<NSNumber*> leftPadVec(ndims, @(0));
std::vector<NSNumber*> rightPadVec(ndims, @(0));
std::vector<NSNumber*> startsVec(ndims, @(0));
std::vector<NSNumber*> endsVec(ndims, @(0));
std::vector<NSNumber*> stridesVec(ndims, @(1));
for (int64_t pdim = 0; pdim < padding_size / 2; pdim++) {
const int64_t leftIdx = pdim * 2;
const int64_t rightIdx = pdim * 2 + 1;
const int64_t padIdx = ndims - pdim - 1;
leftPadVec[padIdx] = @(padding[leftIdx]);
rightPadVec[padIdx] = @(padding[rightIdx]);
// workaround for negative padding issue in backward pass
if (is_backward_pass) {
if (padding[leftIdx] < 0) {
leftPadVec[padIdx] = @(0);
startsVec[padIdx] = @(-padding[leftIdx]);
startMask &= ~(1U << padIdx);
}
if (padding[rightIdx] < 0) {
rightPadVec[padIdx] = @(0);
endsVec[padIdx] = @(input.size(padIdx) + padding[rightIdx]);
endMask &= ~(1U << padIdx);
}
}
}
MPSShape* leftPadding = [NSArray arrayWithObjects:leftPadVec.data() count:ndims];
MPSShape* rightPadding = [NSArray arrayWithObjects:rightPadVec.data() count:ndims];
MPSDataType dataType = getMPSScalarType(input.scalar_type());
// workaround for Bool type assert with Constant padding
if (input.scalar_type() == kBool) {
dataType = MPSDataTypeInt8;
}
@autoreleasepool {
string key = op_name + getTensorsStringKey({input, grad_output, output}) + ":[" + getArrayRefString(padding) +
"]:" + std::to_string(constantValue);
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
newCachedGraph->inputTensor_ = mpsGraphRankedPlaceHolder(mpsGraph, dataType, getMPSShape(input));
const bool needsSlice = startMask != dims_mask || endMask != dims_mask;
if (!is_backward_pass) {
MPSGraphTensor* padTensor = [mpsGraph padTensor:newCachedGraph->inputTensor_
withPaddingMode:mode
leftPadding:leftPadding
rightPadding:rightPadding
constantValue:constantValue
name:nil];
// workaround for the right padding bug in Monterey
if (needsSlice) {
newCachedGraph->gradInputTensor_ =
[mpsGraph sliceTensor:padTensor
starts:[NSArray arrayWithObjects:startsVec.data() count:ndims]
ends:[NSArray arrayWithObjects:endsVec.data() count:ndims]
strides:[NSArray arrayWithObjects:stridesVec.data() count:ndims]
startMask:startMask
endMask:endMask
squeezeMask:0
name:nil];
} else {
newCachedGraph->gradInputTensor_ = padTensor;
}
} else {
newCachedGraph->gradOutputTensor_ = mpsGraphRankedPlaceHolder(mpsGraph, dataType, getMPSShape(grad_output));
MPSGraphTensor* padGradTensor =
[mpsGraph padGradientWithIncomingGradientTensor:newCachedGraph->gradOutputTensor_
sourceTensor:newCachedGraph->inputTensor_
paddingMode:mode
leftPadding:leftPadding
rightPadding:rightPadding
name:nil];
// workaround for negative padding issue with padGradientWithIncomingGradientTensor()
if (needsSlice) {
newCachedGraph->gradInputTensor_ =
[mpsGraph sliceGradientTensor:padGradTensor
fwdInShapeTensor:[mpsGraph shapeOfTensor:newCachedGraph->inputTensor_ name:nil]
starts:[NSArray arrayWithObjects:startsVec.data() count:ndims]
ends:[NSArray arrayWithObjects:endsVec.data() count:ndims]
strides:[NSArray arrayWithObjects:stridesVec.data() count:ndims]
startMask:startMask
endMask:endMask
squeezeMask:0
name:nil];
} else {
newCachedGraph->gradInputTensor_ = padGradTensor;
}
}
});
Placeholder inputPlaceholder = Placeholder(cachedGraph->inputTensor_, input, nullptr, true, dataType);
Placeholder outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, output, nullptr, true, dataType);
Placeholder gradOutputPlaceholder = !is_backward_pass
? Placeholder()
: Placeholder(cachedGraph->gradOutputTensor_, grad_output, nullptr, true, dataType);
NSMutableDictionary* feeds = [[NSMutableDictionary new] autorelease];
feeds[inputPlaceholder.getMPSGraphTensor()] = inputPlaceholder.getMPSGraphTensorData();
if (is_backward_pass) {
feeds[gradOutputPlaceholder.getMPSGraphTensor()] = gradOutputPlaceholder.getMPSGraphTensorData();
}
runMPSGraph(getCurrentMPSStream(), cachedGraph->graph(), feeds, outputPlaceholder);
}
return output;
}
} // namespace mps
// 1D Reflection and Replication Padding
TORCH_IMPL_FUNC(reflection_pad1d_out_mps)
(const Tensor& input, IntArrayRef padding, const Tensor& output) {
mps::pad_out_template(const_cast<Tensor&>(output),
input,
padding,
std::nullopt,
MPSGraphPaddingModeReflect,
0.0,
"reflection_pad1d_out_mps");
}
TORCH_IMPL_FUNC(reflection_pad1d_backward_out_mps)
(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, const Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
mps::pad_out_template(const_cast<Tensor&>(grad_input),
input,
padding,
grad_output,
MPSGraphPaddingModeReflect,
0.0,
"reflection_pad1d_backward_out_mps");
}
TORCH_IMPL_FUNC(replication_pad1d_out_mps)
(const Tensor& input, IntArrayRef padding, const Tensor& output) {
mps::pad_out_template(const_cast<Tensor&>(output),
input,
padding,
std::nullopt,
MPSGraphPaddingModeClampToEdge,
0.0,
"replication_pad1d_out_mps");
}
TORCH_IMPL_FUNC(replication_pad1d_backward_out_mps)
(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, const Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
mps::pad_out_template(const_cast<Tensor&>(grad_input),
input,
padding,
grad_output,
MPSGraphPaddingModeClampToEdge,
0.0,
"replication_pad1d_backward_out_mps");
}
// 2D Reflection and Replication Padding
Tensor& reflection_pad2d_out_mps(const Tensor& input, IntArrayRef padding, Tensor& output) {
return mps::pad_out_template(output, input, padding, std::nullopt, MPSGraphPaddingModeReflect, 0.0, __func__);
}
Tensor reflection_pad2d_mps(const Tensor& input, IntArrayRef padding) {
Tensor output = at::empty({0}, input.options());
return mps::pad_out_template(output, input, padding, std::nullopt, MPSGraphPaddingModeReflect, 0.0, __func__);
}
Tensor& reflection_pad2d_backward_out_mps(const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding,
Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeReflect, 0.0, __func__);
}
Tensor reflection_pad2d_backward_mps(const Tensor& grad_output, const Tensor& input, IntArrayRef padding) {
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeReflect, 0.0, __func__);
}
TORCH_IMPL_FUNC(replication_pad2d_out_mps)
(const Tensor& input, IntArrayRef padding, const Tensor& output) {
mps::pad_out_template(const_cast<Tensor&>(output),
input,
padding,
std::nullopt,
MPSGraphPaddingModeClampToEdge,
0.0,
"replication_pad2d_out_mps");
}
Tensor& replication_pad2d_backward_out_mps(const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding,
Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeClampToEdge, 0.0, __func__);
}
Tensor replication_pad2d_backward_mps(const Tensor& grad_output, const Tensor& input, IntArrayRef padding) {
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeClampToEdge, 0.0, __func__);
}
// 3D Reflection and Replication Padding
TORCH_IMPL_FUNC(reflection_pad3d_out_mps)
(const Tensor& input, IntArrayRef padding, const Tensor& output) {
mps::pad_out_template(const_cast<Tensor&>(output),
input,
padding,
std::nullopt,
MPSGraphPaddingModeReflect,
0.0,
"reflection_pad3d_out_mps");
}
TORCH_IMPL_FUNC(reflection_pad3d_backward_out_mps)
(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, const Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
mps::pad_out_template(const_cast<Tensor&>(grad_input),
input,
padding,
grad_output,
MPSGraphPaddingModeReflect,
0.0,
"reflection_pad3d_backward_out_mps");
}
TORCH_IMPL_FUNC(replication_pad3d_out_mps)
(const Tensor& input, IntArrayRef padding, const Tensor& output) {
mps::pad_out_template(const_cast<Tensor&>(output),
input,
padding,
std::nullopt,
MPSGraphPaddingModeClampToEdge,
0.0,
"replication_pad3d_out_mps");
}
Tensor& replication_pad3d_backward_out_mps(const Tensor& grad_output,
const Tensor& input,
IntArrayRef padding,
Tensor& grad_input) {
grad_input.resize_as_(input).zero_();
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeClampToEdge, 0.0, __func__);
}
Tensor replication_pad3d_backward_mps(const Tensor& grad_output, const Tensor& input, IntArrayRef padding) {
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeClampToEdge, 0.0, __func__);
}
// backward pass is explicitly handled in autograd by negating the "pad" argument
Tensor constant_pad_nd_mps(const Tensor& self, IntArrayRef pad, const Scalar& value) {
if (pad.size() > 6) {
TORCH_WARN_ONCE("MPS: The constant padding of more than 3 dimensions is not currently supported natively. ",
"It uses View Ops default implementation to run. This may have performance implications.");
return at::native::constant_pad_nd(self, pad, value);
}
Tensor output = at::empty({0}, self.options());
return mps::pad_out_template(
output, self, pad, std::nullopt, MPSGraphPaddingModeConstant, value.toDouble(), __func__);
}
} // namespace at::native
```
|
```objective-c
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_FILE_ORG_APACHE_ARROW_FLATBUF_H_
#define FLATBUFFERS_GENERATED_FILE_ORG_APACHE_ARROW_FLATBUF_H_
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
FLATBUFFERS_VERSION_MINOR == 5 &&
FLATBUFFERS_VERSION_REVISION == 26,
"Non-compatible flatbuffers version included");
#include "Schema_generated.h"
namespace org {
namespace apache {
namespace arrow {
namespace flatbuf {
struct Footer;
struct FooterBuilder;
struct Block;
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Block FLATBUFFERS_FINAL_CLASS {
private:
int64_t offset_;
int32_t metaDataLength_;
int32_t padding0__;
int64_t bodyLength_;
public:
Block()
: offset_(0),
metaDataLength_(0),
padding0__(0),
bodyLength_(0) {
(void)padding0__;
}
Block(int64_t _offset, int32_t _metaDataLength, int64_t _bodyLength)
: offset_(::flatbuffers::EndianScalar(_offset)),
metaDataLength_(::flatbuffers::EndianScalar(_metaDataLength)),
padding0__(0),
bodyLength_(::flatbuffers::EndianScalar(_bodyLength)) {
(void)padding0__;
}
/// Index to the start of the RecordBlock (note this is past the Message header)
int64_t offset() const {
return ::flatbuffers::EndianScalar(offset_);
}
/// Length of the metadata
int32_t metaDataLength() const {
return ::flatbuffers::EndianScalar(metaDataLength_);
}
/// Length of the data (this is aligned so there can be a gap between this and
/// the metadata).
int64_t bodyLength() const {
return ::flatbuffers::EndianScalar(bodyLength_);
}
};
FLATBUFFERS_STRUCT_END(Block, 24);
/// your_sha256_hash------
/// Arrow File metadata
///
struct Footer FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef FooterBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_VERSION = 4,
VT_SCHEMA = 6,
VT_DICTIONARIES = 8,
VT_RECORDBATCHES = 10,
VT_CUSTOM_METADATA = 12
};
org::apache::arrow::flatbuf::MetadataVersion version() const {
return static_cast<org::apache::arrow::flatbuf::MetadataVersion>(GetField<int16_t>(VT_VERSION, 0));
}
const org::apache::arrow::flatbuf::Schema *schema() const {
return GetPointer<const org::apache::arrow::flatbuf::Schema *>(VT_SCHEMA);
}
const ::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *> *dictionaries() const {
return GetPointer<const ::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *> *>(VT_DICTIONARIES);
}
const ::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *> *recordBatches() const {
return GetPointer<const ::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *> *>(VT_RECORDBATCHES);
}
/// User-defined metadata
const ::flatbuffers::Vector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>> *custom_metadata() const {
return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>> *>(VT_CUSTOM_METADATA);
}
bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int16_t>(verifier, VT_VERSION, 2) &&
VerifyOffset(verifier, VT_SCHEMA) &&
verifier.VerifyTable(schema()) &&
VerifyOffset(verifier, VT_DICTIONARIES) &&
verifier.VerifyVector(dictionaries()) &&
VerifyOffset(verifier, VT_RECORDBATCHES) &&
verifier.VerifyVector(recordBatches()) &&
VerifyOffset(verifier, VT_CUSTOM_METADATA) &&
verifier.VerifyVector(custom_metadata()) &&
verifier.VerifyVectorOfTables(custom_metadata()) &&
verifier.EndTable();
}
};
struct FooterBuilder {
typedef Footer Table;
::flatbuffers::FlatBufferBuilder &fbb_;
::flatbuffers::uoffset_t start_;
void add_version(org::apache::arrow::flatbuf::MetadataVersion version) {
fbb_.AddElement<int16_t>(Footer::VT_VERSION, static_cast<int16_t>(version), 0);
}
void add_schema(::flatbuffers::Offset<org::apache::arrow::flatbuf::Schema> schema) {
fbb_.AddOffset(Footer::VT_SCHEMA, schema);
}
void add_dictionaries(::flatbuffers::Offset<::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *>> dictionaries) {
fbb_.AddOffset(Footer::VT_DICTIONARIES, dictionaries);
}
void add_recordBatches(::flatbuffers::Offset<::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *>> recordBatches) {
fbb_.AddOffset(Footer::VT_RECORDBATCHES, recordBatches);
}
void add_custom_metadata(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>>> custom_metadata) {
fbb_.AddOffset(Footer::VT_CUSTOM_METADATA, custom_metadata);
}
explicit FooterBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
::flatbuffers::Offset<Footer> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = ::flatbuffers::Offset<Footer>(end);
return o;
}
};
inline ::flatbuffers::Offset<Footer> CreateFooter(
::flatbuffers::FlatBufferBuilder &_fbb,
org::apache::arrow::flatbuf::MetadataVersion version = org::apache::arrow::flatbuf::MetadataVersion::V1,
::flatbuffers::Offset<org::apache::arrow::flatbuf::Schema> schema = 0,
::flatbuffers::Offset<::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *>> dictionaries = 0,
::flatbuffers::Offset<::flatbuffers::Vector<const org::apache::arrow::flatbuf::Block *>> recordBatches = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>>> custom_metadata = 0) {
FooterBuilder builder_(_fbb);
builder_.add_custom_metadata(custom_metadata);
builder_.add_recordBatches(recordBatches);
builder_.add_dictionaries(dictionaries);
builder_.add_schema(schema);
builder_.add_version(version);
return builder_.Finish();
}
inline ::flatbuffers::Offset<Footer> CreateFooterDirect(
::flatbuffers::FlatBufferBuilder &_fbb,
org::apache::arrow::flatbuf::MetadataVersion version = org::apache::arrow::flatbuf::MetadataVersion::V1,
::flatbuffers::Offset<org::apache::arrow::flatbuf::Schema> schema = 0,
const std::vector<org::apache::arrow::flatbuf::Block> *dictionaries = nullptr,
const std::vector<org::apache::arrow::flatbuf::Block> *recordBatches = nullptr,
const std::vector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>> *custom_metadata = nullptr) {
auto dictionaries__ = dictionaries ? _fbb.CreateVectorOfStructs<org::apache::arrow::flatbuf::Block>(*dictionaries) : 0;
auto recordBatches__ = recordBatches ? _fbb.CreateVectorOfStructs<org::apache::arrow::flatbuf::Block>(*recordBatches) : 0;
auto custom_metadata__ = custom_metadata ? _fbb.CreateVector<::flatbuffers::Offset<org::apache::arrow::flatbuf::KeyValue>>(*custom_metadata) : 0;
return org::apache::arrow::flatbuf::CreateFooter(
_fbb,
version,
schema,
dictionaries__,
recordBatches__,
custom_metadata__);
}
inline const org::apache::arrow::flatbuf::Footer *GetFooter(const void *buf) {
return ::flatbuffers::GetRoot<org::apache::arrow::flatbuf::Footer>(buf);
}
inline const org::apache::arrow::flatbuf::Footer *GetSizePrefixedFooter(const void *buf) {
return ::flatbuffers::GetSizePrefixedRoot<org::apache::arrow::flatbuf::Footer>(buf);
}
inline bool VerifyFooterBuffer(
::flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<org::apache::arrow::flatbuf::Footer>(nullptr);
}
inline bool VerifySizePrefixedFooterBuffer(
::flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<org::apache::arrow::flatbuf::Footer>(nullptr);
}
inline void FinishFooterBuffer(
::flatbuffers::FlatBufferBuilder &fbb,
::flatbuffers::Offset<org::apache::arrow::flatbuf::Footer> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedFooterBuffer(
::flatbuffers::FlatBufferBuilder &fbb,
::flatbuffers::Offset<org::apache::arrow::flatbuf::Footer> root) {
fbb.FinishSizePrefixed(root);
}
} // namespace flatbuf
} // namespace arrow
} // namespace apache
} // namespace org
#endif // FLATBUFFERS_GENERATED_FILE_ORG_APACHE_ARROW_FLATBUF_H_
```
|
```xml
<?xml version="1.0" encoding="utf-8"?>
<!--
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-->
<!--TODO (1) Change the ConstraintLayout to a LinearLayout-->
<!--TODO (2) Make the orientation vertical-->
<!--TODO (3) Give left, right, and top padding of 16dp-->
<!--TODO (4) Remove the line that declares the id, we don't need it-->
<!--TODO (5) Remove the xmlns:app declaration, we don't need that anymore-->
<android.support.constraint.ConstraintLayout
xmlns:android="path_to_url"
xmlns:app="path_to_url"
android:id="@+id/activity_main"
android:layout_width="match_parent"
android:layout_height="match_parent">
<!--TODO (6) Delete this TextView-->
<TextView
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="Hello World!"
app:layout_constraintBottom_toBottomOf="@+id/activity_main"
app:layout_constraintLeft_toLeftOf="@+id/activity_main"
app:layout_constraintRight_toRightOf="@+id/activity_main"
app:layout_constraintTop_toTopOf="@+id/activity_main" />
<!--TODO (7) Add an EditText-->
<!--TODO (8) Give the EditText an id of @+id/et_search_box-->
<!--TODO (9) Set the text size to 22sp-->
<!--TODO (10) Set the width to match_parent and the height to wrap_content-->
<!--TODO (11) Provide a hint telling the user to enter a query and then click search-->
<!--TODO (12) Add a TextView-->
<!--TODO (13) Give the TextView an id of @+id/tv_url_display-->
<!--TODO (14) Set the text size to 22sp-->
<!--TODO (15) Set the width to wrap_content and the height to wrap_content-->
<!--TODO (16) Give the TextView a top margin of 8dp-->
<!--TODO (17) Set the text to tell the user their search URL will show up here when they click search-->
<!--TODO (18) Add a ScrollView-->
<!--TODO (19) Set the width to match_parent and the height to wrap_content-->
<!--TODO (20) Set the top margin to 16dp-->
<!--TODO (21) Within the ScrollView, add a TextView-->
<!--TODO (22) Give the TextView an id of @+id/tv_github_search_results_json-->
<!--TODO (23) Set the text size to 18sp-->
<!--TODO (24) Set the height and width to wrap_content-->
<!--TODO (25) Set the text to something that tells the user to make a search-->
</android.support.constraint.ConstraintLayout>
```
|
Maurice Black may refer to:
Maurice Black (Australian politician) (1835–1899), (Maurice Hume Black) member of the Queensland Legislative Assembly
Maurice Black (Mississippi politician) (1915–2000), member of the Mississippi House of Representatives
Maurice Black (1891–1938), American movie actor
Maurice M. Black (1918–1996), American pathologist, expert on breast cancer
|
The Radio 4 Appeal is a British radio programme on BBC Radio 4. Each week a single speaker, usually a celebrity, appeals for support for a different charity (for example Paul Heiney appealed on behalf of Send a Cow in 2008, while Ross Noble appealed on behalf of Riders for Health in 2010). Listeners are invited to respond by sending cheques using a Freepost address, or can make payments online or by telephone. Listeners can also set up a standing order payment to support all 52 charities each year.
The programme is transmitted in a three minute block at 07:55 and 21:26 on Sunday, and at 15:27 on the following Thursday. It is governed by the BBC's Charity Appeal Policy.
Each year since 1927, the BBC has broadcast a special Christmas Appeal in association with St Martin-in-the-Fields church in Trafalgar Square, London. This raises funds which are divided equally between The Connection at St Martin's, which supports vulnerable and homeless people in central London, and the Vicar's Relief Fund, which makes grants, averaging £200, to people anywhere in the UK who are at risk of homelessness or with experience of being homeless. The 2012 Appeal raised a record £1.9 million.
In 2007-2008 the appeals raised £1,433,154.02 for 52 charities. Just over half of this, £725,000, was for the annual Christmas appeal, broadcast on 2 Dec 2007. Amounts raised for other charities in that year ranged from £1,366.00 (Jenni Murray appealing for BEAT (Eating Disorders Association) on 24 Feb 2008) to £53,988.00 (Krishnan Guru-Murthy appealing for Homeless International on 3 March 2007, one week later).
An earlier programme based on the same principle was The Week's Good Cause, which ran from 1926 (from 1939 in the BBC Home Service) until controller James Boyle's major reforms to Radio 4 in 1998.
References
External links
Radio 4 Appeal with link to audio of latest appeal and archive listing past appeals
Radio 4 Appeal page at the BBC Charity Appeal website
The St Martin-in-the-Fields BBC Radio 4 Christmas Appeal
The Connection at St Martin's
St Martin-in-the-Fields Vicar's Relief Fund
BBC Radio 4 programmes
|
```javascript
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --harmony-intl-segmenter
const seg = new Intl.Segmenter([], {granularity: "word"})
for (const text of [
"Hello world!", // English
" Hello world! ", // English with space before/after
" Hello world? Foo bar!", // English
"Jedovatou mambu objevila ena v zahrdksk kolonii.", // Czech
"Vit Nam: Nht th ha s khc Trung Quc?", // Vietnamese
" ", // Greek
" -400 ", // Russian
" ", // Hebrew,
" : .. ", // Arabic
" 400 , ", // Hindi
" ; !", // Tamil
"' '", // Telugu
"Ppupu", // Chinese
" ", // Thai
"( 20181006) - tenki.jp", // Japanese
" MB ", // Korean
]) {
const iter = seg.segment(text);
let prev = 0;
let segments = [];
while (!iter.following()) {
assertTrue(["word", "none"].includes(iter.breakType), iter.breakType);
assertTrue(iter.index >= 0);
assertTrue(iter.index <= text.length);
assertTrue(iter.index > prev);
segments.push(text.substring(prev, iter.index));
prev = iter.index;
}
assertEquals(text, segments.join(""));
}
```
|
Pelican Lake First Nation ( cahcahkiw-sâkahikanihk, meaning: at the Pelican Lake) is a member of the Federation of Saskatchewan First Nations, the body that represents 74 First Nations in Saskatchewan.
Pelican Lake First Nation is located on the shore of Chitek Lake, approximately 170 miles northwest of Saskatoon 115 miles northeast of North Battleford and 120 miles west of Prince Albert.
Together with Witchekan Lake First Nation and Big River First Nation, Pelican Lake is affiliated with the Agency Chiefs Tribal Council.
Demographics
At date of first survey there was total of 112 band members. The population according to the 2006 census was 825. However, the registered population was 1,555 as of December 2013.
Government
Councilor Donny Rabbitskin
Councilor Willie Thomas
Councilor Tony Wolfe
Councilor Evan Chamakese
Councilor Greg S. Bill
Councilor Lee Bill
Chief Peter Bill (chief since March 2019)
History
At date of first survey, the Pelican Lake First Nation received 8,630.4 acres for reserve land. Through the Treaty Land Entitlement (TLE) Process, Pelican Lake First Nation was awarded and additional 36,714.66 acres in 1979. Further completed, in 1992 by the Office of the Treaty Commissioner, determined that Pelican Lake First Nation was legally entitled to a minimum of 36,714 acres under Treaty No. Six. Pelican Lake First Nation voted in favor of ratifying the TLE Agreement in September 1995, which provided the Pelican Lake First Nation with a total of 30,753.06 additional equity acres to Reserve status. The TLE Process will allow Pelican Lake First Nation to increase its total Reserve land to 45,345 acres.
Reserves
Pelican Lake First Nation has reserved for itself five reserves:
Chitek Lake 191
Pelican Lake 191A
Pelican Lake 191B
Pelican Lake 191C
Pelican Lake 191D
References
First Nations governments in Saskatchewan
|
```python
from pathlib import Path
from typing import List
import pytest
from raiden.storage.versions import filter_db_names, latest_db_file
def test_latest_db_file():
assert latest_db_file([Path("v10_log.db"), Path("v9_log.db")]) == Path("v10_log.db")
assert latest_db_file([Path("v9_log.db"), Path("v10_log.db")]) == Path("v10_log.db")
assert latest_db_file([Path("v1_log.db"), Path("v9_log.db")]) == Path("v9_log.db")
assert latest_db_file([Path("v9_log.db"), Path("v1_log.db")]) == Path("v9_log.db")
assert latest_db_file([]) is None
values = ["a", ".db", "v9.db", "9_log.db", "va9_log.db", "v9a_log.db"]
for invalid_value in values:
with pytest.raises(AssertionError):
latest_db_file([Path(invalid_value)])
def test_filter_db_names():
assert filter_db_names(["v10_log.db", "v9_log.db"]) == [Path("v10_log.db"), Path("v9_log.db")]
assert filter_db_names(["v9_log.db", "v10_log.db"]) == [Path("v9_log.db"), Path("v10_log.db")]
assert filter_db_names(["v1_log.db", "v9_log.db"]) == [Path("v1_log.db"), Path("v9_log.db")]
assert filter_db_names(["v9_log.db", "v1_log.db"]) == [Path("v9_log.db"), Path("v1_log.db")]
values: List[List[str]] = [
[],
["a"],
[".db"],
["v9.db"],
["9_log.db"],
["va9_log.db"],
["v9a_log.db"],
]
for invalid_value in values:
assert filter_db_names(invalid_value) == []
```
|
```xml
import fs from 'fs';
import { URL } from 'url';
import { LogLevel } from '@stryker-mutator/api/core';
import { factory, LoggingServer, testInjector } from '@stryker-mutator/test-helpers';
import { expect } from 'chai';
import { CheckResult, CheckStatus } from '@stryker-mutator/api/check';
import { CheckerFacade, createCheckerFactory } from '../../../src/checker/index.js';
import { coreTokens } from '../../../src/di/index.js';
import { LoggingClientContext } from '../../../src/logging/index.js';
import { IdGenerator } from '../../../src/child-proxy/id-generator.js';
import { TwoTimesTheCharm } from './additional-checkers.js';
describe(`${createCheckerFactory.name} integration`, () => {
let createSut: () => CheckerFacade;
let loggingContext: LoggingClientContext;
let sut: CheckerFacade;
let loggingServer: LoggingServer;
let pluginModulePaths: string[];
function rmSync(fileName: string) {
if (fs.existsSync(fileName)) {
fs.unlinkSync(fileName);
}
}
beforeEach(async () => {
// Make sure there is a logging server listening
pluginModulePaths = [new URL('./additional-checkers.js', import.meta.url).toString()];
loggingServer = new LoggingServer();
const port = await loggingServer.listen();
loggingContext = { port, level: LogLevel.Trace };
createSut = testInjector.injector
.provideValue(coreTokens.loggingContext, loggingContext)
.provideValue(coreTokens.pluginModulePaths, pluginModulePaths)
.provideClass(coreTokens.workerIdGenerator, IdGenerator)
.injectFunction(createCheckerFactory);
});
afterEach(async () => {
await sut.dispose?.();
await loggingServer.dispose();
rmSync(TwoTimesTheCharm.COUNTER_FILE);
});
async function arrangeSut(name: string): Promise<void> {
testInjector.options.checkers = [name];
sut = createSut();
await sut.init?.();
}
it('should pass along the check result', async () => {
const mutantRunPlan = factory.mutantRunPlan({ mutant: factory.mutant({ id: '1' }) });
await arrangeSut('healthy');
const expected: CheckResult = { status: CheckStatus.Passed };
expect(await sut.check('healthy', [mutantRunPlan])).deep.eq([[mutantRunPlan, expected]]);
});
it('should reject when the checker behind rejects', async () => {
await arrangeSut('crashing');
await expect(sut.check('crashing', [factory.mutantRunPlan()])).rejectedWith('Always crashing');
});
it('should recover when the checker behind rejects', async () => {
const mutantRunPlan = factory.mutantRunPlan();
await fs.promises.writeFile(TwoTimesTheCharm.COUNTER_FILE, '0', 'utf-8');
await arrangeSut('two-times-the-charm');
const actual = await sut.check('two-times-the-charm', [mutantRunPlan]);
const expected: CheckResult = { status: CheckStatus.Passed };
expect(actual).deep.eq([[mutantRunPlan, expected]]);
});
it('should provide the nodeArgs', async () => {
// Arrange
const passingMutantRunPlan = factory.mutantRunPlan({ mutant: factory.mutant({ fileName: 'shouldProvideNodeArgs' }) });
const failingMutantRunPlan = factory.mutantRunPlan({ mutant: factory.mutant({ fileName: 'somethingElse' }) });
testInjector.options.checkerNodeArgs = ['--title=shouldProvideNodeArgs'];
// Act
await arrangeSut('verify-title');
const passed = await sut.check('verify-title', [passingMutantRunPlan]);
const failed = await sut.check('verify-title', [failingMutantRunPlan]);
// Assert
expect(passed).deep.eq([[passingMutantRunPlan, factory.checkResult({ status: CheckStatus.Passed })]]);
expect(failed).deep.eq([[failingMutantRunPlan, factory.checkResult({ status: CheckStatus.CompileError })]]);
});
});
```
|
```java
/**
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.facebook.keyframes;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.ColorFilter;
import android.graphics.LinearGradient;
import android.graphics.Matrix;
import android.graphics.Paint;
import android.graphics.PixelFormat;
import android.graphics.Rect;
import android.graphics.Region;
import android.graphics.Shader;
import android.graphics.drawable.Drawable;
import android.util.SparseArray;
import com.facebook.keyframes.model.KFAnimationGroup;
import com.facebook.keyframes.model.KFFeature;
import com.facebook.keyframes.model.KFGradient;
import com.facebook.keyframes.model.KFImage;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedFillColor;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedGradient;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedOpacity;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedPath;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedStrokeColor;
import com.facebook.keyframes.model.keyframedmodels.KeyFramedStrokeWidth;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* This drawable will render a KFImage model by painting paths to the supplied canvas in
* {@link #draw(Canvas)}. There are methods to begin and end animation playback here, which need to
* be managed carefully so as not to leave animation callbacks running indefinitely. At each
* animation callback, the next frame's matrices and paths are calculated and the drawable is then
* invalidated.
*/
public class KeyframesDrawable extends Drawable
implements KeyframesDrawableAnimationCallback.FrameListener, KeyframesDirectionallyScalingDrawable {
private static final float GRADIENT_PRECISION_PER_SECOND = 30;
/**
* The KFImage object to render.
*/
private final KFImage mKFImage;
/**
* A recyclable {@link Paint} object used to draw all of the features.
*/
private final Paint mDrawingPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
/**
* The list of all {@link FeatureState}s, containing all information needed to render a feature
* for the current progress of animation.
*/
private final List<FeatureState> mFeatureStateList;
/**
* The current state of animation layer matrices for this animation, keyed by animation group id.
*/
private final SparseArray<Matrix> mAnimationGroupMatrices;
/**
* The animation callback object used to start and stop the animation.
*/
private final KeyframesDrawableAnimationCallback mKeyframesDrawableAnimationCallback;
/**
* A recyclable matrix that can be reused.
*/
private final Matrix mRecyclableTransformMatrix;
/**
* The scale matrix to be applied for the final size of this drawable.
*/
private final Matrix mScaleMatrix;
private final Matrix mInverseScaleMatrix;
/**
* The currently set width and height of this drawable.
*/
private int mSetWidth;
private int mSetHeight;
/**
* The X and Y scales to be used, calculated from the set dimensions compared with the exported
* canvas size of the image.
*/
private float mScale;
private float mScaleFromCenter;
private float mScaleFromEnd;
private final Map<String, Bitmap> mBitmaps;
private boolean mClipToAECanvas;
private boolean mHasInitialized = false;
/**
* Create a new KeyframesDrawable with the supplied values from the builder.
* @param builder
*/
KeyframesDrawable(KeyframesDrawableBuilder builder) {
mKFImage = builder.getImage();
mBitmaps = builder.getExperimentalFeatures().getBitmaps() == null ?
null :
Collections.unmodifiableMap(builder.getExperimentalFeatures().getBitmaps());
mRecyclableTransformMatrix = new Matrix();
mScaleMatrix = new Matrix();
mInverseScaleMatrix = new Matrix();
mKeyframesDrawableAnimationCallback = KeyframesDrawableAnimationCallback.create(this, mKFImage);
mDrawingPaint.setStrokeCap(Paint.Cap.ROUND);
// Setup feature state list
List<FeatureState> featureStateList = new ArrayList<>();
for (int i = 0, len = mKFImage.getFeatures().size(); i < len; i++) {
featureStateList.add(new FeatureState(mKFImage.getFeatures().get(i)));
}
mFeatureStateList = Collections.unmodifiableList(featureStateList);
// Setup animation layers
mAnimationGroupMatrices = new SparseArray<>();
List<KFAnimationGroup> animationGroups = mKFImage.getAnimationGroups();
for (int i = 0, len = animationGroups.size(); i < len; i++) {
mAnimationGroupMatrices.put(animationGroups.get(i).getGroupId(), new Matrix());
}
setMaxFrameRate(builder.getMaxFrameRate());
mClipToAECanvas = builder.getExperimentalFeatures().getClipToAECanvas();
}
/**
* Sets the bounds of this drawable. Here, we calculate values needed to scale the image from the
* size it was when exported to a size to be drawn on the Android canvas.
*/
@Override
public void setBounds(int left, int top, int right, int bottom) {
super.setBounds(left, top, right, bottom);
mSetWidth = right - left;
mSetHeight = bottom - top;
float idealXScale = (float) mSetWidth / mKFImage.getCanvasSize()[0];
float idealYScale = (float) mSetHeight / mKFImage.getCanvasSize()[1];
mScale = Math.min(idealXScale, idealYScale);
calculateScaleMatrix(1, 1, ScaleDirection.UP);
if (!mHasInitialized) {
// Call this at least once or else nothing will render. But if this is called this every time
// setBounds is called then the animation will reset when resizing.
setFrameProgress(0);
}
}
@Override
public void setDirectionalScale(
float scaleFromCenter,
float scaleFromEnd,
ScaleDirection direction) {
calculateScaleMatrix(scaleFromCenter, scaleFromEnd, direction);
}
/**
* Iterates over the current state of mPathsForDrawing and draws each path, applying properties
* of the feature to a recycled Paint object.
*/
@Override
public void draw(Canvas canvas) {
Rect currBounds = getBounds();
canvas.translate(currBounds.left, currBounds.top);
if (mClipToAECanvas) {
canvas.clipRect(
0,
0,
mKFImage.getCanvasSize()[0] * mScale * mScaleFromEnd * mScaleFromCenter,
mKFImage.getCanvasSize()[1] * mScale * mScaleFromEnd * mScaleFromCenter);
}
KFPath pathToDraw;
FeatureState featureState;
for (int i = 0, len = mFeatureStateList.size(); i < len; i++) {
featureState = mFeatureStateList.get(i);
if (!featureState.isVisible()) {
continue;
}
// We need paint for drawing (opacity animated) bitmaps and paths
mDrawingPaint.setShader(null);
mDrawingPaint.setAlpha(featureState.getAlpha());
// Drawing bitmap
final Bitmap backedImage = featureState.getBackedImageBitmap();
final Matrix uniqueFeatureMatrix = featureState.getUniqueFeatureMatrix();
if (backedImage != null && uniqueFeatureMatrix != null) {
// This block is for the experimental bitmap supporting
canvas.save();
canvas.concat(mScaleMatrix);
canvas.drawBitmap(backedImage, uniqueFeatureMatrix, mDrawingPaint);
canvas.restore();
continue;
}
// Drawing path
pathToDraw = featureState.getCurrentPathForDrawing();
if (pathToDraw == null || pathToDraw.isEmpty()) {
continue;
}
if (featureState.getCurrentMaskPath() != null) {
canvas.save();
applyScaleAndClipCanvas(canvas, featureState.getCurrentMaskPath(), Region.Op.INTERSECT);
}
mDrawingPaint.setStrokeCap(featureState.getStrokeLineCap());
if (featureState.getFillColor() != Color.TRANSPARENT) {
mDrawingPaint.setStyle(Paint.Style.FILL);
if (featureState.getCurrentShader() == null) {
mDrawingPaint.setColor(featureState.getFillColor());
mDrawingPaint.setAlpha(featureState.getAlpha());
applyScaleAndDrawPath(canvas, pathToDraw, mDrawingPaint);
} else {
mDrawingPaint.setShader(featureState.getCurrentShader());
applyScaleToCanvasAndDrawPath(canvas, pathToDraw, mDrawingPaint);
}
}
if (featureState.getStrokeColor() != Color.TRANSPARENT && featureState.getStrokeWidth() > 0) {
mDrawingPaint.setColor(featureState.getStrokeColor());
mDrawingPaint.setStyle(Paint.Style.STROKE);
mDrawingPaint.setStrokeWidth(
featureState.getStrokeWidth() * mScale * mScaleFromCenter * mScaleFromEnd);
applyScaleAndDrawPath(canvas, pathToDraw, mDrawingPaint);
}
if (featureState.getCurrentMaskPath() != null) {
canvas.restore();
}
}
canvas.translate(-currBounds.left, -currBounds.top);
}
private void applyScaleAndClipCanvas(Canvas canvas, KFPath path, Region.Op op) {
path.transform(mScaleMatrix);
canvas.clipPath(path.getPath(), op);
path.transform(mInverseScaleMatrix);
}
private void applyScaleAndDrawPath(Canvas canvas, KFPath path, Paint paint) {
path.transform(mScaleMatrix);
canvas.drawPath(path.getPath(), paint);
path.transform(mInverseScaleMatrix);
}
/**
* Note: This method is only necessary because of cached gradient shaders with a fixed size. We
* need to scale the canvas in this case rather than scaling the path.
*/
private void applyScaleToCanvasAndDrawPath(Canvas canvas, KFPath path, Paint paint) {
canvas.concat(mScaleMatrix);
canvas.drawPath(path.getPath(), paint);
canvas.concat(mInverseScaleMatrix);
}
/**
* Unsupported for now
*/
@Override
public void setAlpha(int alpha) {
}
/**
* Unsupported for now
*/
@Override
public void setColorFilter(ColorFilter cf) {
}
/**
* Unsupported for now
*/
@Override
public int getOpacity() {
return PixelFormat.OPAQUE;
}
/**
* Starts the animation callbacks for this drawable. A corresponding call to
* {@link #stopAnimationAtLoopEnd()}, {@link #stopAnimation() or {@link #pauseAnimation()}
* needs to be called eventually, or the callback will continue to post callbacks
* for this drawable indefinitely.
*/
public void startAnimation() {
mKeyframesDrawableAnimationCallback.start();
}
/**
* Starts the animation and plays it once
*/
public void playOnce() {
mKeyframesDrawableAnimationCallback.playOnce();
}
/**
* Stops the animation callbacks for this drawable immediately.
*/
public void stopAnimation() {
mKeyframesDrawableAnimationCallback.stop();
}
/**
* Pauses the animation callbacks for this drawable immediately.
*/
public void pauseAnimation() {
mKeyframesDrawableAnimationCallback.pause();
}
/**
* Resumes the animation callbacks for this drawable. A corresponding call to
* {@link #stopAnimationAtLoopEnd()}, {@link #stopAnimation() or {@link #pauseAnimation()}
* needs to be called eventually, or the callback will continue to post callbacks
* for this drawable indefinitely.
*/
public void resumeAnimation() {
mKeyframesDrawableAnimationCallback.resume();
}
/**
* Finishes the current playthrough of the animation and stops animating this drawable afterwards.
*/
public void stopAnimationAtLoopEnd() {
mKeyframesDrawableAnimationCallback.stopAtLoopEnd();
}
/**
* Given a progress in terms of frames, calculates each of the paths needed to be drawn in
* {@link #draw(Canvas)}.
*/
public void setFrameProgress(float frameProgress) {
mHasInitialized = true;
mKFImage.setAnimationMatrices(mAnimationGroupMatrices, frameProgress);
for (int i = 0, len = mFeatureStateList.size(); i < len; i++) {
mFeatureStateList.get(i).setupFeatureStateForProgress(frameProgress);
}
}
public void seekToProgress(float progress) {
stopAnimation();
onProgressUpdate(progress * mKFImage.getFrameCount());
}
/**
* The callback used to update the frame progress of this drawable. This leads to a recalculation
* of the paths that need to be drawn before the Drawable invalidates itself.
*/
@Override
public void onProgressUpdate(float frameProgress) {
setFrameProgress(frameProgress);
invalidateSelf();
}
@Override
public void onStop() {
if (mOnAnimationEnd == null) {
return;
}
final OnAnimationEnd onAnimationEnd = mOnAnimationEnd.get();
if (onAnimationEnd == null) {
return;
}
onAnimationEnd.onAnimationEnd();
mOnAnimationEnd.clear();
}
private WeakReference<OnAnimationEnd> mOnAnimationEnd;
public void setAnimationListener(OnAnimationEnd listener) {
mOnAnimationEnd = new WeakReference<>(listener);
}
private void calculateScaleMatrix(
float scaleFromCenter,
float scaleFromEnd,
ScaleDirection scaleDirection) {
if (mScaleFromCenter == scaleFromCenter &&
mScaleFromEnd == scaleFromEnd) {
return;
}
mScaleMatrix.setScale(mScale, mScale);
if (scaleFromCenter == 1 && scaleFromEnd == 1) {
mScaleFromCenter = 1;
mScaleFromEnd = 1;
mScaleMatrix.invert(mInverseScaleMatrix);
return;
}
float scaleYPoint = scaleDirection == ScaleDirection.UP ? mSetHeight : 0;
mScaleMatrix.postScale(scaleFromCenter, scaleFromCenter, mSetWidth / 2, mSetHeight / 2);
mScaleMatrix.postScale(scaleFromEnd, scaleFromEnd, mSetWidth / 2, scaleYPoint);
mScaleFromCenter = scaleFromCenter;
mScaleFromEnd = scaleFromEnd;
mScaleMatrix.invert(mInverseScaleMatrix);
}
/**
* Cap the frame rate to a specific FPS. Consider using this for low end devices.
* Calls {@link KeyframesDrawableAnimationCallback#setMaxFrameRate}
* @param maxFrameRate
*/
public void setMaxFrameRate(int maxFrameRate) {
mKeyframesDrawableAnimationCallback.setMaxFrameRate(maxFrameRate);
}
public KFImage getKFImage() {
return mKFImage;
}
private class FeatureState {
private final KFFeature mFeature;
// Reuseable modifiable objects for drawing
private final KFPath mPath;
private final KFPath mFeatureMaskPath;
private final KeyFramedStrokeWidth.StrokeWidth mStrokeWidth;
private final KeyFramedStrokeColor.StrokeColor mStrokeColor;
private final KeyFramedFillColor.FillColor mFillColor;
private final KeyFramedOpacity.Opacity mOpacity;
private final Matrix mFeatureMatrix;
private final float[] mMatrixValueRecyclableArray = new float[9];
private final Matrix mFeatureMaskMatrix;
private boolean mIsVisible;
public Matrix getUniqueFeatureMatrix() {
if (mFeatureMatrix == mRecyclableTransformMatrix) {
// Don't return a matrix unless it's known to be unique for this feature
return null;
}
return mFeatureMatrix;
}
// Cached shader vars
private Shader[] mCachedShaders;
private Shader mCurrentShader;
public FeatureState(KFFeature feature) {
mFeature = feature;
if (hasCustomDrawable()) {
mPath = null;
mStrokeWidth = null;
mStrokeColor = null;
mFillColor = null;
// Bitmap features use the matrix later in draw()
// so there's no way to reuse a globally cached matrix
mFeatureMatrix = new Matrix();
} else {
mPath = new KFPath();
mStrokeWidth = new KeyFramedStrokeWidth.StrokeWidth();
mStrokeColor = new KeyFramedStrokeColor.StrokeColor();
mFillColor = new KeyFramedFillColor.FillColor();
// Path features use the matrix immediately
// so there's no need to waste memory with a unique copy
mFeatureMatrix = mRecyclableTransformMatrix;
}
mOpacity = new KeyFramedOpacity.Opacity();
if (mFeature.getFeatureMask() != null) {
mFeatureMaskPath = new KFPath();
mFeatureMaskMatrix = new Matrix();
} else {
mFeatureMaskPath = null;
mFeatureMaskMatrix = null;
}
assert mFeatureMatrix != null;
}
public void setupFeatureStateForProgress(float frameProgress) {
if (frameProgress < mFeature.getFromFrame() || frameProgress > mFeature.getToFrame()) {
mIsVisible = false;
return;
}
mIsVisible = true;
mFeature.setAnimationMatrix(mFeatureMatrix, frameProgress);
Matrix layerTransformMatrix = mAnimationGroupMatrices.get(mFeature.getAnimationGroup());
if (layerTransformMatrix != null && !layerTransformMatrix.isIdentity()) {
mFeatureMatrix.postConcat(layerTransformMatrix);
}
mFeature.setOpacity(mOpacity, frameProgress);
KeyFramedPath path = mFeature.getPath();
if (hasCustomDrawable() || path == null) {
return; // skip all the path stuff
}
mPath.reset();
path.apply(frameProgress, mPath);
mPath.transform(mFeatureMatrix);
mFeature.setStrokeWidth(mStrokeWidth, frameProgress);
mFeature.setStrokeColor(mStrokeColor, frameProgress);
mFeature.setFillColor(mFillColor, frameProgress);
mStrokeWidth.adjustScale(extractScaleFromMatrix(mFeatureMatrix));
if (mFeature.getEffect() != null) {
prepareShadersForFeature(mFeature);
}
mCurrentShader = getNearestShaderForFeature(frameProgress);
if (mFeature.getFeatureMask() != null) {
mFeature.getFeatureMask().setAnimationMatrix(mFeatureMaskMatrix, frameProgress);
mFeatureMaskPath.reset();
mFeature.getFeatureMask().getPath().apply(frameProgress, mFeatureMaskPath);
mFeatureMaskPath.transform(mFeatureMaskMatrix);
}
}
public KFPath getCurrentPathForDrawing() {
return mPath;
}
public KFPath getCurrentMaskPath() {
return mFeatureMaskPath;
}
public float getStrokeWidth() {
return mStrokeWidth != null ? mStrokeWidth.getStrokeWidth() : 0;
}
public float getOpacity() {
return mOpacity.getOpacity() / 100;
}
public int getAlpha() {
return Math.round(0xFF * getOpacity());
}
public Shader getCurrentShader() {
return mCurrentShader;
}
public int getStrokeColor() {
if (mStrokeColor != null && mStrokeColor.hasStrokeColor()) {
return (int)mStrokeColor.getStrokeColor();
}
return mFeature.getStrokeColor();
}
public int getFillColor() {
if (mFillColor != null && mFillColor.hasFillColor()) {
return (int)mFillColor.getFillColor();
}
return mFeature.getFillColor();
}
public Paint.Cap getStrokeLineCap() {
return mFeature.getStrokeLineCap();
}
public boolean isVisible() {
return mIsVisible;
}
private void prepareShadersForFeature(KFFeature feature) {
if (mCachedShaders != null) {
return;
}
int frameRate = mKFImage.getFrameRate();
int numFrames = mKFImage.getFrameCount();
int precision = Math.round(GRADIENT_PRECISION_PER_SECOND * numFrames / frameRate);
mCachedShaders = new LinearGradient[precision + 1];
float progress;
KeyFramedGradient.GradientColorPair colorPair = new KeyFramedGradient.GradientColorPair();
KFGradient gradient = feature.getEffect().getGradient();
for (int i = 0; i < precision; i++) {
progress = i / (float) (precision) * numFrames;
gradient.getStartGradient().apply(progress, colorPair);
gradient.getEndGradient().apply(progress, colorPair);
mCachedShaders[i] = new LinearGradient(
0,
0,
0,
mKFImage.getCanvasSize()[1],
colorPair.getStartColor(),
colorPair.getEndColor(),
Shader.TileMode.CLAMP);
}
}
public Shader getNearestShaderForFeature(float frameProgress) {
if (mCachedShaders == null) {
return null;
}
int shaderIndex =
(int) ((frameProgress / mKFImage.getFrameCount()) * (mCachedShaders.length - 1));
return mCachedShaders[shaderIndex];
}
public final Bitmap getBackedImageBitmap() {
if (mBitmaps == null) return null;
return mBitmaps.get(mFeature.getBackedImageName());
}
private boolean hasCustomDrawable() {
return getBackedImageBitmap() != null;
}
private float extractScaleFromMatrix(Matrix matrix) {
matrix.getValues(mMatrixValueRecyclableArray);
return (Math.abs(mMatrixValueRecyclableArray[0]) +
Math.abs(mMatrixValueRecyclableArray[4])) / 2f;
}
}
public interface OnAnimationEnd {
void onAnimationEnd();
}
}
```
|
```c++
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/common/flags.h"
#include "test/cpp/inference/api/tester_helper.h"
namespace paddle_infer {
TEST(Predictor, use_gpu) {
LOG(INFO) << GetVersion();
UpdateDllFlag("conv_workspace_size_limit", "4000");
std::string model_dir = FLAGS_infer_model + "/model";
Config config;
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableUseGpu(100, 0);
auto predictor = CreatePredictor(config);
auto pred_clone = predictor->Clone();
std::vector<int> in_shape = {1, 3, 318, 318};
int in_num =
std::accumulate(in_shape.begin(), in_shape.end(), 1, [](int &a, int &b) {
return a * b;
});
std::vector<float> input(in_num, 0);
auto input_names = predictor->GetInputNames();
auto input_t = predictor->GetInputHandle(input_names[0]);
input_t->Reshape(in_shape);
input_t->CopyFromCpu(input.data());
predictor->Run();
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(
output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
std::vector<float> out_data;
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
predictor->ClearIntermediateTensor();
}
TEST(PredictorPool, basic) {
LOG(INFO) << GetVersion();
UpdateDllFlag("conv_workspace_size_limit", "4000");
std::string model_dir = FLAGS_infer_model + "/model";
Config config;
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableUseGpu(100, 0);
services::PredictorPool pred_pool(config, 4);
auto pred = pred_pool.Retrieve(2);
std::vector<int> in_shape = {1, 3, 318, 318};
int in_num =
std::accumulate(in_shape.begin(), in_shape.end(), 1, [](int &a, int &b) {
return a * b;
});
std::vector<float> input(in_num, 0);
auto in_names = pred->GetInputNames();
auto input_t = pred->GetInputHandle(in_names[0]);
input_t->name();
input_t->Reshape(in_shape);
input_t->CopyFromCpu(input.data());
pred->Run();
auto out_names = pred->GetOutputNames();
auto output_t = pred->GetOutputHandle(out_names[0]);
auto out_type = output_t->type();
LOG(INFO) << GetNumBytesOfDataType(out_type);
if (out_type == DataType::FLOAT32) {
PlaceType place;
int size;
output_t->data<float>(&place, &size);
}
}
} // namespace paddle_infer
```
|
```c#
@{
Layout = null;
}
<!DOCTYPE html>
<!--[if IE 8]> <html lang="zh" class="ie8"> <![endif]-->
<!--[if IE 9]> <html lang="zh" class="ie9"> <![endif]-->
<!--[if !IE]><!-->
<html lang="zh">
<!--<![endif]-->
<!-- BEGIN HEAD -->
<head>
<meta charset="utf-8" />
<meta content="width=device-width, initial-scale=1.0" name="viewport" />
@Html.Partial("_Title")
@Html.Raw(ViewBag.CssFile)
</head>
<body class="fixed-top">
@Html.Partial("_Top")
<div id="container" class="row-fluid @ViewBag.MenuStatus">
@Html.Partial("_LeftMenu")
<div id="body">
<div id="widget-config" class="modal hide">
<div class="modal-header">
<button data-dismiss="modal" class="close" type="button"></button>
<h3>widget Settings</h3>
</div>
<div class="modal-body">
<p>Here will be a configuration form</p>
</div>
</div>
<div class="container-fluid">
<div class="row-fluid">
<div class="span12">
@Html.Raw(ViewBag.NavMenu)
</div>
</div>
<div class="row-fluid">
<div class="span12">
<div class="widget box light-grey">
<div class="widget-title">
<h4><i class="icon-globe"></i></h4>
<div class="tools">
<a href="javascript:;" class="collapse"></a>
<a href="#widget-config" data-toggle="modal" class="config"></a>
<a href="javascript:;" class="reload"></a>
<a href="javascript:;" class="remove"></a>
</div>
</div>
<div class="widget-body">
<div class="row-fluid" style="border: 1px solid #DDDDDD;">
<div class="widget-body form">
<form class="horizontal-form">
<div class="row-fluid">
<div class="span3">
<div class="control-group">
<label class="control-label" for="txtReportNum"></label>
<div class="controls">
<input type="text" id="txtReportNum" name="txtReportNum" class="span12" placeholder="," disabled="disabled" value="@ViewBag.Entity.ReportNum" />
<span class="help-block"></span>
</div>
</div>
</div>
<div class="span3">
<div class="control-group">
<label class="control-label" for="txtReportName"></label>
<div class="controls">
<input type="text" id="txtReportName" name="txtReportName" class="span12" placeholder="" value="@ViewBag.Entity.ReportName" />
<span class="help-block"></span>
</div>
</div>
</div>
<div class="span3">
<div class="control-group">
<label class="control-label" for="ddlReportType"></label>
<div class="controls">
<select id="ddlReportType" name="ddlReportType" class="span12">
@Html.Raw(ViewBag.ReportType)
</select>
<span class="help-block"></span>
</div>
</div>
</div>
<div class="span3">
<div class="control-group">
<label class="control-label" for="txtRemark"></label>
<div class="controls">
<input type="text" id="txtRemark" name="txtRemark" class="span12" placeholder="" value="@ViewBag.Entity.Remark" />
<span class="help-block"></span>
</div>
</div>
</div>
</div>
<div class="row-fluid">
<div class="span3">
<div class="control-group">
<label class="control-label" for="ddlDsType"></label>
<div class="controls">
<select id="ddlDsType" name="ddlDsType" class="span12">
@Html.Raw(ViewBag.DataSourceType)
</select>
<span class="help-block"></span>
</div>
</div>
</div>
<div class="span9">
<div class="control-group">
<label class="control-label" for="txtDataSource"></label>
<div class="controls">
<input type="text" id="txtDataSource" name="txtDataSource" class="span12" placeholder="" value="@ViewBag.Entity.DataSource" />
<span class="help-block"></span>
</div>
</div>
</div>
</div>
<div class="row-fluid">
<div class="span6">
<div class="control-group">
<label class="control-label" for="txtFileName"></label>
<div class="controls">
<input type="text" id="txtFileName" name="txtFileName" class="span12" placeholder="" disabled="disabled" value="@ViewBag.Entity.FileName" />
<span class="help-block"></span>
</div>
</div>
</div>
</div>
</form>
</div>
</div>
<br />
<table class="table table-striped table-bordered table-hover" id="tabInfo">
<thead>
<tr class="widget-title">
<th class="hidden-480"></th>
<th class="hidden-480"></th>
<th class="hidden-480"></th>
<th class="hidden-480"></th>
<th class="hidden-480"></th>
<th class="hidden-480"></th>
<th class="hidden-480"></th>
</tr>
</thead>
<tbody></tbody>
</table>
<div class="form-actions formBtn">
<button id="btnUpload" type="button" class="btn btn-primary"><i class="icon-eye-open"></i></button>
<button id="btnAddParam" type="button" class="btn btn-primary" onclick="ReportManage.ShowDialog()"><i class="icon-eye-open"></i></button>
<button type="submit" class="btn btn-primary" onclick="ReportManage.Save()"><i class="icon-ok"></i></button>
<button type="button" class="btn" onclick="orderProduct.Cancel()"></button>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
@Html.Partial("_Foot")
<script type="text/javascript">
$(document).ready(function () {
ReportManage.UploadCheck();
ReportManage.LoadData();
$('#txtDataSource').keydown(function (e) {
if (e.keyCode == 13) {
ReportManage.GetMetadata();
}
});
});
</script>
</body>
</html>
```
|
```php
<?php
/*
*
* File ini bagian dari:
*
* OpenSID
*
* Sistem informasi desa sumber terbuka untuk memajukan desa
*
* Aplikasi dan source code ini dirilis berdasarkan lisensi GPL V3
*
* Hak Cipta 2009 - 2015 Combine Resource Institution (path_to_url
* Hak Cipta 2016 - 2024 Perkumpulan Desa Digital Terbuka (path_to_url
*
* Dengan ini diberikan izin, secara gratis, kepada siapa pun yang mendapatkan salinan
* dari perangkat lunak ini dan file dokumentasi terkait ("Aplikasi Ini"), untuk diperlakukan
* tanpa batasan, termasuk hak untuk menggunakan, menyalin, mengubah dan/atau mendistribusikan,
* asal tunduk pada syarat berikut:
*
* Pemberitahuan hak cipta di atas dan pemberitahuan izin ini harus disertakan dalam
* setiap salinan atau bagian penting Aplikasi Ini. Barang siapa yang menghapus atau menghilangkan
* pemberitahuan ini melanggar ketentuan lisensi Aplikasi Ini.
*
* PERANGKAT LUNAK INI DISEDIAKAN "SEBAGAIMANA ADANYA", TANPA JAMINAN APA PUN, BAIK TERSURAT MAUPUN
* TERSIRAT. PENULIS ATAU PEMEGANG HAK CIPTA SAMA SEKALI TIDAK BERTANGGUNG JAWAB ATAS KLAIM, KERUSAKAN ATAU
* KEWAJIBAN APAPUN ATAS PENGGUNAAN ATAU LAINNYA TERKAIT APLIKASI INI.
*
* @package OpenSID
* @author Tim Pengembang OpenDesa
* @copyright Hak Cipta 2009 - 2015 Combine Resource Institution (path_to_url
* @copyright Hak Cipta 2016 - 2024 Perkumpulan Desa Digital Terbuka (path_to_url
* @license path_to_url GPL V3
* @link path_to_url
*
*/
namespace App\Models;
use App\Scopes\RemoveRtfScope;
use App\Traits\Author;
use App\Traits\ConfigId;
use Illuminate\Database\Eloquent\Builder;
use Illuminate\Database\Eloquent\Relations\BelongsTo;
defined('BASEPATH') || exit('No direct script access allowed');
class FormatSurat extends BaseModel
{
use Author;
use ConfigId;
public const MANDIRI = 1;
public const MANDIRI_DISABLE = 0;
public const KUNCI = 1;
public const KUNCI_DISABLE = 0;
public const FAVORIT = 1;
public const FAVORIT_DISABLE = 0;
public const TINYMCE_SISTEM = 3;
public const TINYMCE_DESA = 4;
public const RTF = [1, 2];
public const TINYMCE = [3, 4];
public const SISTEM = [3];
public const DESA = [4];
public const DEFAULT_ORIENTATAIONS = 'Potrait';
public const DEFAULT_SIZES = 'F4';
/**
* Static data masa berlaku surat.
*
* @var array
*/
public const MASA_BERLAKU = [
'd' => 'Hari',
'w' => 'Minggu',
'M' => 'Bulan',
'y' => 'Tahun',
];
/**
* Static data jenis surat.
*
* @var array
*/
public const JENIS_SURAT = [
self::TINYMCE_SISTEM => 'Surat Sistem',
self::TINYMCE_DESA => 'Surat [Desa]',
];
/**
* Static data margin surat.
*
* @var array
*/
public const MARGINS = [
'kiri' => 1.78,
'atas' => 0.63,
'kanan' => 1.78,
'bawah' => 1.37,
];
/**
* Static data orientation surat.
*
* @var array
*/
public const ORIENTATAIONS = [
'Potrait',
'Lanscape',
];
/**
* Static data Size surat.
*
* @var array
*/
public const SIZES = [
'A1',
'A2',
'A3',
'A4',
'A5',
'A6',
'F4',
];
/**
* Static data atribut surat.
*
* @var array
*/
public const ATTRIBUTES = [
'text' => 'Input Teks',
'number' => 'Input Angka',
'email' => 'Input Email',
'url' => 'Input Url',
'date' => 'Input Tanggal',
'time' => 'Input Jam',
'textarea' => 'Text Area',
'select-manual' => 'Pilihan (Kustom)',
'select-otomatis' => 'Pilihan (Referensi)',
'hari' => 'Input Hari',
'hari-tanggal' => 'Input Hari dan Tanggal',
];
/**
* {@inheritDoc}
*/
protected $table = 'tweb_surat_format';
/**
* The fillable with the model.
*
* @var array
*/
protected $fillable = [
'config_id',
'nama',
'url_surat',
'kode_surat',
'lampiran',
'kunci',
'favorit',
'jenis',
'mandiri',
'masa_berlaku',
'satuan_masa_berlaku',
'qr_code',
'logo_garuda',
'kecamatan',
'syarat_surat',
'template',
'template_desa',
'form_isian',
'kode_isian',
'orientasi',
'ukuran',
'margin',
'margin_global',
'header',
'footer',
'format_nomor',
'sumber_penduduk_berulang',
'created_by',
'updated_by',
];
/**
* The fillable with the model.
*
* @var array
*/
protected $appends = [
'judul_surat',
'margin_cm_to_mm',
];
/**
* The attributes that should be cast.
*
* @var array
*/
protected $casts = [
'masa_berlaku' => 'integer',
'kunci' => 'boolean',
'favorit' => 'boolean',
'mandiri' => 'boolean',
'qr_code' => 'boolean',
'logo_garuda' => 'boolean',
'header' => 'integer',
'jenis' => 'integer',
// 'syarat_surat' => 'json',
// 'kode_isian' => 'json',
// 'margin' => 'json',
];
/**
* Define a many-to-many relationship.
*
* @return \Illuminate\Database\Eloquent\Relations\BelongsToMany
*/
public function syaratSurat()
{
return $this->belongsToMany(SyaratSurat::class, 'syarat_surat', 'surat_format_id', 'ref_syarat_id');
}
/**
* Scope query untuk layanan mandiri.
*
* @param Builder $query
*
* @return Builder
*/
public function scopeMandiri($query)
{
return $query->where('mandiri', static::MANDIRI);
}
/**
* Scope query untuk list surat yang tidak dikunci.
*
* @param Builder $query
*
* @return Builder
*/
// public function scopeKunci($query)
// {
// return $query->where('kunci', static::KUNCI_DISABLE);
// }
/**
* Getter list surat dan dokumen attribute.
*
* @return array
*/
public function getListSyaratSuratAttribute()
{
return $this->syaratSurat->map(
static fn ($syarat): array => [
'label' => $syarat->ref_syarat_nama,
'value' => $syarat->ref_syarat_id,
'form_surat' => [
[
'type' => 'select',
'required' => true,
'label' => 'Dokumen Syarat',
'name' => 'dokumen',
'multiple' => false,
'values' => $syarat->dokumen->map(static fn ($dokumen): array => [
'label' => $dokumen->nama,
'value' => $dokumen->id,
]),
],
],
]
);
}
/**
* Getter form surat attribute.
*/
public function getFormSuratAttribute(): void
{
// try {
// return app('surat')->driver($this->url_surat)->form();
// } catch (Exception $e) {
// Log::error($e);
// return null;
// }
}
/**
* Getter untuk judul_surat
*/
public function getJudulSuratAttribute(): string
{
return 'Surat ' . $this->nama;
}
/**
* Getter untuk kode_isian
*
* @return string
*/
public function getKodeIsianAttribute()
{
return json_decode($this->attributes['kode_isian'], null);
}
/**
* Getter untuk form_isian
*
* @return mixed
*/
public function getFormIsianAttribute()
{
return json_decode($this->attributes['form_isian'], null);
}
/**
* Getter untuk judul_surat
*
* @return string
*/
public function getMarginCmToMmAttribute(): array
{
$margin = json_decode($this->margin, null);
return [
$margin->kiri * 10,
$margin->atas * 10,
$margin->kanan * 10,
$margin->bawah * 10,
];
}
/**
* Getter untuk url surat sistem
*/
public function getUrlSuratSistemAttribute(): ?string
{
return null;
}
/**
* Scope query untuk IsExist
*
* @param mixed $query
* @param mixed $value
*
* @return Builder
*/
public function scopeIsExist($query, $value)
{
return $query->where('url_surat', $value)->exists();
}
/**
* Scope query untuk Kunci Surat
*
* @param mixed $query
* @param mixed $value
*
* @return Builder
*/
public function scopeKunci($query, $value = self::KUNCI)
{
return $query->where('kunci', $value);
}
/**
* Scope query untuk Favorit Surat
*
* @param mixed $query
* @param mixed $value
*
* @return Builder
*/
public function scopeFavorit($query, $value = self::FAVORIT)
{
return $query->where('favorit', $value);
}
/**
* Scope query untuk Jenis Surat
*
* @param mixed $query
* @param mixed $value
*
* @return Builder
*/
public function scopeJenis($query, $value)
{
if (empty($value)) {
return $query->whereNotNull('jenis');
}
if (is_array($value)) {
return $query->whereIn('jenis', $value);
}
return $query->where('jenis', $value);
}
/**
* Scope query untuk layanan mandiri.
*
* @param Builder $query
* @param mixed|null $url
*
* @return Builder
*/
public function scopeCetak($query, $url = null)
{
return $this->scopeKunci($query, self::KUNCI_DISABLE)->where('url_surat', $url);
}
protected static function boot()
{
parent::boot();
static::addGlobalScope(new RemoveRtfScope());
}
public static function format_penomoran_surat(array $data)
{
$thn = $data['surat']['cek_thn'] ?? date('Y');
$bln = $data['surat']['cek_bln'] ?? date('m');
$setting = ($data['surat']['format_nomor'] == '') ? setting('format_nomor_surat') : $data['surat']['format_nomor'];
self::substitusi_nomor_surat($data['input']['nomor'], $setting);
$array_replace = [
'[kode_surat]' => $data['surat']['kode_surat'],
'[tahun]' => $thn,
'[bulan_romawi]' => bulan_romawi((int) $bln),
'[kode_desa]' => identitas()->kode_desa,
];
return str_replace(array_keys($array_replace), array_values($array_replace), $setting);
}
public static function substitusi_nomor_surat($nomor, &$buffer): void
{
$buffer = str_replace('[nomor_surat]', "{$nomor}", $buffer);
if (preg_match_all('/\[nomor_surat,\s*\d+\]/', $buffer, $matches)) {
foreach ($matches[0] as $match) {
$parts = explode(',', $match);
$panjang = (int) trim(rtrim($parts[1], ']'));
$nomor_panjang = str_pad("{$nomor}", $panjang, '0', STR_PAD_LEFT);
$buffer = str_replace($match, $nomor_panjang, $buffer);
}
}
}
/**
* Get the logSurat that owns the FormatSurat
*/
public function logSurat(): BelongsTo
{
return $this->belongsTo(LogSurat::class, 'id', 'id_format_surat');
}
}
```
|
```c++
//===-- clang-offload-bundler/ClangOffloadBundler.cpp ---------------------===//
//
// See path_to_url for license information.
//
//===your_sha256_hash------===//
///
/// \file
/// This file implements a stand-alone clang-offload-bundler tool using the
/// OffloadBundler API.
///
//===your_sha256_hash------===//
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetID.h"
#include "clang/Basic/Version.h"
#include "clang/Driver/OffloadBundler.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ArchiveWriter.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <forward_list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <system_error>
#include <utility>
using namespace llvm;
using namespace llvm::object;
using namespace clang;
static void PrintVersion(raw_ostream &OS) {
OS << clang::getClangToolFullVersion("clang-offload-bundler") << '\n';
}
int main(int argc, const char **argv) {
cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
// Mark all our options with this category, everything else (except for
// -version and -help) will be hidden.
cl::OptionCategory
ClangOffloadBundlerCategory("clang-offload-bundler options");
cl::list<std::string>
InputFileNames("input",
cl::desc("Input file."
" Can be specified multiple times "
"for multiple input files."),
cl::cat(ClangOffloadBundlerCategory));
cl::list<std::string>
InputFileNamesDeprecatedOpt("inputs", cl::CommaSeparated,
cl::desc("[<input file>,...] (deprecated)"),
cl::cat(ClangOffloadBundlerCategory));
cl::list<std::string>
OutputFileNames("output",
cl::desc("Output file."
" Can be specified multiple times "
"for multiple output files."),
cl::cat(ClangOffloadBundlerCategory));
cl::list<std::string>
OutputFileNamesDeprecatedOpt("outputs", cl::CommaSeparated,
cl::desc("[<output file>,...] (deprecated)"),
cl::cat(ClangOffloadBundlerCategory));
cl::list<std::string>
TargetNames("targets", cl::CommaSeparated,
cl::desc("[<offload kind>-<target triple>,...]"),
cl::cat(ClangOffloadBundlerCategory));
cl::opt<std::string> FilesType(
"type", cl::Required,
cl::desc("Type of the files to be bundled/unbundled.\n"
"Current supported types are:\n"
" i - cpp-output\n"
" ii - c++-cpp-output\n"
" cui - cuda-cpp-output\n"
" hipi - hip-cpp-output\n"
" d - dependency\n"
" ll - llvm\n"
" bc - llvm-bc\n"
" s - assembler\n"
" o - object\n"
" a - archive of objects\n"
" gch - precompiled-header\n"
" ast - clang AST file"),
cl::cat(ClangOffloadBundlerCategory));
cl::opt<bool>
Unbundle("unbundle",
cl::desc("Unbundle bundled file into several output files.\n"),
cl::init(false), cl::cat(ClangOffloadBundlerCategory));
cl::opt<bool>
ListBundleIDs("list", cl::desc("List bundle IDs in the bundled file.\n"),
cl::init(false), cl::cat(ClangOffloadBundlerCategory));
cl::opt<bool> PrintExternalCommands(
"###",
cl::desc("Print any external commands that are to be executed "
"instead of actually executing them - for testing purposes.\n"),
cl::init(false), cl::cat(ClangOffloadBundlerCategory));
cl::opt<bool>
AllowMissingBundles("allow-missing-bundles",
cl::desc("Create empty files if bundles are missing "
"when unbundling.\n"),
cl::init(false), cl::cat(ClangOffloadBundlerCategory));
cl::opt<unsigned>
BundleAlignment("bundle-align",
cl::desc("Alignment of bundle for binary files"),
cl::init(1), cl::cat(ClangOffloadBundlerCategory));
cl::opt<bool> HipOpenmpCompatible(
"hip-openmp-compatible",
cl::desc("Treat hip and hipv4 offload kinds as "
"compatible with openmp kind, and vice versa.\n"),
cl::init(false), cl::cat(ClangOffloadBundlerCategory));
// Process commandline options and report errors
sys::PrintStackTraceOnErrorSignal(argv[0]);
cl::HideUnrelatedOptions(ClangOffloadBundlerCategory);
cl::SetVersionPrinter(PrintVersion);
cl::ParseCommandLineOptions(
argc, argv,
"A tool to bundle several input files of the specified type <type> \n"
"referring to the same source file but different targets into a single \n"
"one. The resulting file can also be unbundled into different files by \n"
"this tool if -unbundle is provided.\n");
if (Help) {
cl::PrintHelpMessage();
return 0;
}
/// Class to store bundler options in standard (non-cl::opt) data structures
// Avoid using cl::opt variables after these assignments when possible
OffloadBundlerConfig BundlerConfig;
BundlerConfig.AllowMissingBundles = AllowMissingBundles;
BundlerConfig.PrintExternalCommands = PrintExternalCommands;
BundlerConfig.HipOpenmpCompatible = HipOpenmpCompatible;
BundlerConfig.BundleAlignment = BundleAlignment;
BundlerConfig.FilesType = FilesType;
BundlerConfig.ObjcopyPath = "";
BundlerConfig.TargetNames = TargetNames;
BundlerConfig.InputFileNames = InputFileNames;
BundlerConfig.OutputFileNames = OutputFileNames;
/// The index of the host input in the list of inputs.
BundlerConfig.HostInputIndex = ~0u;
/// Whether not having host target is allowed.
BundlerConfig.AllowNoHost = false;
auto reportError = [argv](Error E) {
logAllUnhandledErrors(std::move(E), WithColor::error(errs(), argv[0]));
exit(1);
};
auto doWork = [&](std::function<llvm::Error()> Work) {
if (llvm::Error Err = Work()) {
reportError(std::move(Err));
}
};
auto warningOS = [argv]() -> raw_ostream & {
return WithColor::warning(errs(), StringRef(argv[0]));
};
/// Path to the current binary.
std::string BundlerExecutable = argv[0];
if (!llvm::sys::fs::exists(BundlerExecutable))
BundlerExecutable =
sys::fs::getMainExecutable(argv[0], &BundlerExecutable);
// Find llvm-objcopy in order to create the bundle binary.
ErrorOr<std::string> Objcopy = sys::findProgramByName(
"llvm-objcopy",
sys::path::parent_path(BundlerExecutable));
if (!Objcopy)
Objcopy = sys::findProgramByName("llvm-objcopy");
if (!Objcopy)
reportError(createStringError(Objcopy.getError(),
"unable to find 'llvm-objcopy' in path"));
else
BundlerConfig.ObjcopyPath = *Objcopy;
if (InputFileNames.getNumOccurrences() != 0 &&
InputFileNamesDeprecatedOpt.getNumOccurrences() != 0) {
reportError(createStringError(
errc::invalid_argument,
"-inputs and -input cannot be used together, use only -input instead"));
}
if (InputFileNamesDeprecatedOpt.size()) {
warningOS() << "-inputs is deprecated, use -input instead\n";
// temporary hack to support -inputs
std::vector<std::string> &s = InputFileNames;
s.insert(s.end(), InputFileNamesDeprecatedOpt.begin(),
InputFileNamesDeprecatedOpt.end());
}
BundlerConfig.InputFileNames = InputFileNames;
if (OutputFileNames.getNumOccurrences() != 0 &&
OutputFileNamesDeprecatedOpt.getNumOccurrences() != 0) {
reportError(createStringError(errc::invalid_argument,
"-outputs and -output cannot be used "
"together, use only -output instead"));
}
if (OutputFileNamesDeprecatedOpt.size()) {
warningOS() << "-outputs is deprecated, use -output instead\n";
// temporary hack to support -outputs
std::vector<std::string> &s = OutputFileNames;
s.insert(s.end(), OutputFileNamesDeprecatedOpt.begin(),
OutputFileNamesDeprecatedOpt.end());
}
BundlerConfig.OutputFileNames = OutputFileNames;
if (ListBundleIDs) {
if (Unbundle) {
reportError(
createStringError(errc::invalid_argument,
"-unbundle and -list cannot be used together"));
}
if (InputFileNames.size() != 1) {
reportError(createStringError(errc::invalid_argument,
"only one input file supported for -list"));
}
if (OutputFileNames.size()) {
reportError(createStringError(errc::invalid_argument,
"-outputs option is invalid for -list"));
}
if (TargetNames.size()) {
reportError(createStringError(errc::invalid_argument,
"-targets option is invalid for -list"));
}
doWork([&]() { return OffloadBundler::ListBundleIDsInFile(
InputFileNames.front(),
BundlerConfig); });
return 0;
}
if (OutputFileNames.size() == 0) {
reportError(
createStringError(errc::invalid_argument, "no output file specified!"));
}
if (TargetNames.getNumOccurrences() == 0) {
reportError(createStringError(
errc::invalid_argument,
"for the --targets option: must be specified at least once!"));
}
if (Unbundle) {
if (InputFileNames.size() != 1) {
reportError(createStringError(
errc::invalid_argument,
"only one input file supported in unbundling mode"));
}
if (OutputFileNames.size() != TargetNames.size()) {
reportError(createStringError(errc::invalid_argument,
"number of output files and targets should "
"match in unbundling mode"));
}
} else {
if (BundlerConfig.FilesType == "a") {
reportError(createStringError(errc::invalid_argument,
"Archive files are only supported "
"for unbundling"));
}
if (OutputFileNames.size() != 1) {
reportError(createStringError(
errc::invalid_argument,
"only one output file supported in bundling mode"));
}
if (InputFileNames.size() != TargetNames.size()) {
reportError(createStringError(
errc::invalid_argument,
"number of input files and targets should match in bundling mode"));
}
}
// Verify that the offload kinds and triples are known. We also check that we
// have exactly one host target.
unsigned Index = 0u;
unsigned HostTargetNum = 0u;
bool HIPOnly = true;
llvm::DenseSet<StringRef> ParsedTargets;
// Map {offload-kind}-{triple} to target IDs.
std::map<std::string, std::set<StringRef>> TargetIDs;
for (StringRef Target : TargetNames) {
if (ParsedTargets.contains(Target)) {
reportError(createStringError(errc::invalid_argument,
"Duplicate targets are not allowed"));
}
ParsedTargets.insert(Target);
auto OffloadInfo = OffloadTargetInfo(Target, BundlerConfig);
bool KindIsValid = OffloadInfo.isOffloadKindValid();
bool TripleIsValid = OffloadInfo.isTripleValid();
if (!KindIsValid || !TripleIsValid) {
SmallVector<char, 128u> Buf;
raw_svector_ostream Msg(Buf);
Msg << "invalid target '" << Target << "'";
if (!KindIsValid)
Msg << ", unknown offloading kind '" << OffloadInfo.OffloadKind << "'";
if (!TripleIsValid)
Msg << ", unknown target triple '" << OffloadInfo.Triple.str() << "'";
reportError(createStringError(errc::invalid_argument, Msg.str()));
}
TargetIDs[OffloadInfo.OffloadKind.str() + "-" + OffloadInfo.Triple.str()]
.insert(OffloadInfo.TargetID);
if (KindIsValid && OffloadInfo.hasHostKind()) {
++HostTargetNum;
// Save the index of the input that refers to the host.
BundlerConfig.HostInputIndex = Index;
}
if (OffloadInfo.OffloadKind != "hip" && OffloadInfo.OffloadKind != "hipv4")
HIPOnly = false;
++Index;
}
for (const auto &TargetID : TargetIDs) {
if (auto ConflictingTID =
clang::getConflictTargetIDCombination(TargetID.second)) {
SmallVector<char, 128u> Buf;
raw_svector_ostream Msg(Buf);
Msg << "Cannot bundle inputs with conflicting targets: '"
<< TargetID.first + "-" + ConflictingTID->first << "' and '"
<< TargetID.first + "-" + ConflictingTID->second << "'";
reportError(createStringError(errc::invalid_argument, Msg.str()));
}
}
// HIP uses clang-offload-bundler to bundle device-only compilation results
// for multiple GPU archs, therefore allow no host target if all entries
// are for HIP.
BundlerConfig.AllowNoHost = HIPOnly;
// Host triple is not really needed for unbundling operation, so do not
// treat missing host triple as error if we do unbundling.
if ((Unbundle && HostTargetNum > 1) ||
(!Unbundle && HostTargetNum != 1 && !BundlerConfig.AllowNoHost)) {
reportError(createStringError(errc::invalid_argument,
"expecting exactly one host target but got " +
Twine(HostTargetNum)));
}
OffloadBundler Bundler(BundlerConfig);
doWork([&]() {
if (Unbundle) {
if (BundlerConfig.FilesType == "a")
return Bundler.UnbundleArchive();
else
return Bundler.UnbundleFiles();
} else
return Bundler.BundleFiles();
});
return 0;
}
```
|
```javascript
/**
* @license Apache-2.0
*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
'use strict';
/**
* Create an iterator for generating pseudorandom numbers drawn from a discrete uniform distribution.
*
* @module @stdlib/random/iter/discrete-uniform
*
* @example
* var iterator = require( '@stdlib/random/iter/discrete-uniform' );
*
* var iter = iterator( -1, 2 );
*
* var r = iter.next().value;
* // returns <number>
*
* r = iter.next().value;
* // returns <number>
*
* r = iter.next().value;
* // returns <number>
*
* // ...
*/
// MODULES //
var main = require( './main.js' );
// EXPORTS //
module.exports = main;
```
|
```c++
/*
*
* This software may be used and distributed according to the terms of the
*/
#include "eden/fs/service/EdenServiceHandler.h"
#include <sys/types.h>
#include <algorithm>
#include <optional>
#include <stdexcept>
#include <typeinfo>
#include <fb303/ServiceData.h>
#include <fmt/format.h>
#include <folly/Conv.h>
#include <folly/FileUtil.h>
#include <folly/Portability.h>
#include <folly/String.h>
#include <folly/chrono/Conv.h>
#include <folly/executors/SerialExecutor.h>
#include <folly/futures/Future.h>
#include <folly/logging/Logger.h>
#include <folly/logging/xlog.h>
#include <folly/stop_watch.h>
#include <re2/re2.h>
#include <thrift/lib/cpp/util/EnumUtils.h>
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "ThriftGetObjectImpl.h"
#include "eden/common/telemetry/SessionInfo.h"
#include "eden/common/telemetry/Tracing.h"
#include "eden/common/utils/Bug.h"
#include "eden/common/utils/FaultInjector.h"
#include "eden/common/utils/ProcessInfoCache.h"
#include "eden/common/utils/StatTimes.h"
#include "eden/common/utils/String.h"
#include "eden/fs/config/CheckoutConfig.h"
#include "eden/fs/config/ReloadableConfig.h"
#include "eden/fs/fuse/FuseChannel.h"
#include "eden/fs/inodes/EdenMount.h"
#include "eden/fs/inodes/FileInode.h"
#include "eden/fs/inodes/GlobNode.h"
#include "eden/fs/inodes/InodeError.h"
#include "eden/fs/inodes/InodeMap.h"
#include "eden/fs/inodes/InodeTable.h"
#include "eden/fs/inodes/Overlay.h"
#include "eden/fs/inodes/ServerState.h"
#include "eden/fs/inodes/Traverse.h"
#include "eden/fs/inodes/TreeInode.h"
#include "eden/fs/inodes/VirtualInodeLoader.h"
#include "eden/fs/journal/Journal.h"
#include "eden/fs/journal/JournalDelta.h"
#include "eden/fs/model/Blob.h"
#include "eden/fs/model/BlobMetadata.h"
#include "eden/fs/model/GlobEntry.h"
#include "eden/fs/model/Hash.h"
#include "eden/fs/model/Tree.h"
#include "eden/fs/model/TreeEntry.h"
#include "eden/fs/model/git/TopLevelIgnores.h"
#include "eden/fs/nfs/Nfsd3.h"
#include "eden/fs/privhelper/PrivHelper.h"
#include "eden/fs/prjfs/PrjfsChannel.h"
#include "eden/fs/service/EdenServer.h"
#include "eden/fs/service/ThriftGetObjectImpl.h"
#include "eden/fs/service/ThriftGlobImpl.h"
#include "eden/fs/service/ThriftPermissionChecker.h"
#include "eden/fs/service/ThriftUtil.h"
#include "eden/fs/service/UsageService.h"
#include "eden/fs/service/gen-cpp2/eden_constants.h"
#include "eden/fs/service/gen-cpp2/eden_types.h"
#include "eden/fs/service/gen-cpp2/streamingeden_constants.h"
#include "eden/fs/store/BackingStore.h"
#include "eden/fs/store/Diff.h"
#include "eden/fs/store/DiffContext.h"
#include "eden/fs/store/FilteredBackingStore.h"
#include "eden/fs/store/LocalStore.h"
#include "eden/fs/store/ObjectFetchContext.h"
#include "eden/fs/store/ObjectStore.h"
#include "eden/fs/store/PathLoader.h"
#include "eden/fs/store/ScmStatusDiffCallback.h"
#include "eden/fs/store/TreeCache.h"
#include "eden/fs/store/TreeLookupProcessor.h"
#include "eden/fs/store/filter/GlobFilter.h"
#include "eden/fs/store/hg/SaplingBackingStore.h"
#include "eden/fs/telemetry/LogEvent.h"
#include "eden/fs/telemetry/TaskTrace.h"
#include "eden/fs/utils/Clock.h"
#include "eden/fs/utils/EdenError.h"
#include "eden/fs/utils/GlobMatcher.h"
#include "eden/fs/utils/NotImplemented.h"
#include "eden/fs/utils/ProcUtil.h"
#include "eden/fs/utils/SourceLocation.h"
using folly::Future;
using folly::makeFuture;
using folly::StringPiece;
using folly::Try;
using folly::Unit;
using std::string;
using std::unique_ptr;
using std::vector;
using namespace std::literals::string_view_literals;
namespace {
using namespace facebook::eden;
std::string logHash(StringPiece thriftArg) {
if (thriftArg.size() == Hash20::RAW_SIZE) {
return Hash20{folly::ByteRange{thriftArg}}.toString();
} else if (thriftArg.size() == Hash20::RAW_SIZE * 2) {
return Hash20{thriftArg}.toString();
} else {
return folly::hexlify(thriftArg);
}
}
/**
* Convert a vector of strings from a thrift argument to a field
* that we can log in an INSTRUMENT_THRIFT_CALL() log message.
*
* This truncates very log lists to only log the first few elements.
*/
std::string toLogArg(const std::vector<std::string>& args) {
constexpr size_t limit = 5;
if (args.size() <= limit) {
return fmt::format("[{}]", fmt::join(args, ", "));
} else {
return fmt::format(
"[{}, and {} more]",
fmt::join(args.begin(), args.begin() + limit, ", "),
args.size() - limit);
}
}
bool mountIsUsingFilteredFS(const EdenMountHandle& mount) {
return mount.getEdenMountPtr()
->getCheckoutConfig()
->getRepoBackingStoreType() == BackingStoreType::FILTEREDHG;
}
std::string resolveRootId(
std::string rootId,
const RootIdOptions& rootIdOptions,
const EdenMountHandle& mount) {
if (mountIsUsingFilteredFS(mount)) {
if (rootIdOptions.filterId()) {
return FilteredBackingStore::createFilteredRootId(
rootId, *rootIdOptions.filterId());
} else {
return FilteredBackingStore::createNullFilteredRootId(rootId);
}
} else {
return rootId;
}
}
// parseRootId() assumes that the provided hash will contain information
// about the active filter. Some legacy code paths do not respect
// filters (or accept Filters as arguments), so we need to construct a
// FilteredRootId using the last active filter. For non-FilteredFS repos, the
// last filterID will be std::nullopt.
std::string resolveRootIdWithLastFilter(
std::string rootId,
const EdenMountHandle& handle) {
auto filterId =
handle.getEdenMount().getCheckoutConfig()->getLastActiveFilter();
RootIdOptions rootIdOptions{};
rootIdOptions.filterId_ref().from_optional(std::move(filterId));
return resolveRootId(std::move(rootId), rootIdOptions, handle);
}
// Similar to the above function, but can be used with endpoints that pass in
// many RootIds.
std::vector<std::string> resolveRootsWithLastFilter(
std::vector<std::string>& originalRootIds,
const EdenMountHandle& mountHandle) {
std::vector<std::string> resolvedRootIds;
resolvedRootIds.reserve(originalRootIds.size());
for (auto& rev : originalRootIds) {
resolvedRootIds.push_back(
resolveRootIdWithLastFilter(std::move(rev), mountHandle));
}
return resolvedRootIds;
}
#define EDEN_MICRO reinterpret_cast<const char*>(u8"\u00B5s")
class ThriftFetchContext : public ObjectFetchContext {
public:
explicit ThriftFetchContext(
OptionalProcessId pid,
folly::StringPiece endpoint)
: pid_(pid), endpoint_(endpoint) {}
OptionalProcessId getClientPid() const override {
return pid_;
}
Cause getCause() const override {
return ObjectFetchContext::Cause::Thrift;
}
std::optional<std::string_view> getCauseDetail() const override {
return endpoint_;
}
const std::unordered_map<std::string, std::string>* FOLLY_NULLABLE
getRequestInfo() const override {
return &requestInfo_;
}
/**
* Update the request info map.
*
* This is not thread safe and the caller should make sure that this function
* isn't called in an unsafe manner.
*/
void updateRequestInfo(const std::map<std::string, std::string>& another) {
requestInfo_.insert(another.begin(), another.end());
}
void fillClientRequestInfo(
apache::thrift::optional_field_ref<ClientRequestInfo&>
clientRequestInfo) {
if (clientRequestInfo.has_value()) {
auto correlator = clientRequestInfo->correlator_ref();
auto entry_point = clientRequestInfo->entry_point_ref();
if (!(correlator->empty() || entry_point->empty())) {
updateRequestInfo(
{{ObjectFetchContext::kClientCorrelator, *correlator},
{ObjectFetchContext::kClientEntryPoint, *entry_point}});
}
}
}
private:
OptionalProcessId pid_;
std::string_view endpoint_;
std::unordered_map<std::string, std::string> requestInfo_;
};
class PrefetchFetchContext : public ObjectFetchContext {
public:
explicit PrefetchFetchContext(
OptionalProcessId pid,
std::string_view endpoint)
: pid_(pid), endpoint_(endpoint) {}
OptionalProcessId getClientPid() const override {
return pid_;
}
Cause getCause() const override {
return ObjectFetchContext::Cause::Prefetch;
}
std::optional<std::string_view> getCauseDetail() const override {
return endpoint_;
}
virtual ImportPriority getPriority() const override {
return kThriftPrefetchPriority;
}
const std::unordered_map<std::string, std::string>* FOLLY_NULLABLE
getRequestInfo() const override {
return nullptr;
}
private:
OptionalProcessId pid_;
std::string_view endpoint_;
};
/**
* Lives as long as a Thrift request and primarily exists to record logging and
* telemetry.
*/
class ThriftRequestScope {
public:
ThriftRequestScope(ThriftRequestScope&&) = delete;
ThriftRequestScope& operator=(ThriftRequestScope&&) = delete;
template <typename JoinFn>
ThriftRequestScope(
std::shared_ptr<TraceBus<ThriftRequestTraceEvent>> traceBus,
const folly::Logger& logger,
folly::LogLevel level,
SourceLocation sourceLocation,
EdenStatsPtr edenStats,
ThriftStats::DurationPtr statPtr,
OptionalProcessId pid,
JoinFn&& join)
: traceBus_{std::move(traceBus)},
requestId_(generateUniqueID()),
sourceLocation_{sourceLocation},
edenStats_{std::move(edenStats)},
statPtr_{std::move(statPtr)},
level_(level),
itcLogger_(logger),
thriftFetchContext_{makeRefPtr<ThriftFetchContext>(
pid,
sourceLocation_.function_name())},
prefetchFetchContext_{makeRefPtr<PrefetchFetchContext>(
pid,
sourceLocation_.function_name())} {
FB_LOG_RAW(
itcLogger_,
level,
sourceLocation.file_name(),
sourceLocation.line(),
"")
<< "[" << requestId_ << "] " << sourceLocation.function_name() << "("
<< join() << ")";
traceBus_->publish(ThriftRequestTraceEvent::start(
requestId_, sourceLocation_.function_name(), pid));
}
~ThriftRequestScope() {
// Logging completion time for the request
// The line number points to where the object was originally created
auto elapsed = itcTimer_.elapsed();
auto level = level_;
if (elapsed > std::chrono::seconds(1)) {
// When a request takes over a second, let's raise the loglevel to draw
// attention to it
level += 1;
}
FB_LOG_RAW(
itcLogger_,
level,
sourceLocation_.file_name(),
sourceLocation_.line(),
"")
<< "[" << requestId_ << "] "
<< fmt::format(
"{}() took {} {}",
sourceLocation_.function_name(),
elapsed.count(),
EDEN_MICRO);
if (edenStats_) {
edenStats_->addDuration(statPtr_, elapsed);
}
traceBus_->publish(ThriftRequestTraceEvent::finish(
requestId_,
sourceLocation_.function_name(),
thriftFetchContext_->getClientPid()));
}
const ObjectFetchContextPtr& getPrefetchFetchContext() {
return prefetchFetchContext_.as<ObjectFetchContext>();
}
ThriftFetchContext& getThriftFetchContext() {
return *thriftFetchContext_;
}
const ObjectFetchContextPtr& getFetchContext() {
return thriftFetchContext_.as<ObjectFetchContext>();
}
folly::StringPiece getFunctionName() {
return sourceLocation_.function_name();
}
private:
std::shared_ptr<TraceBus<ThriftRequestTraceEvent>> traceBus_;
uint64_t requestId_;
SourceLocation sourceLocation_;
EdenStatsPtr edenStats_;
ThriftStats::DurationPtr statPtr_;
folly::LogLevel level_;
folly::Logger itcLogger_;
folly::stop_watch<std::chrono::microseconds> itcTimer_ = {};
RefPtr<ThriftFetchContext> thriftFetchContext_;
RefPtr<PrefetchFetchContext> prefetchFetchContext_;
};
template <typename ReturnType>
Future<ReturnType> wrapFuture(
std::unique_ptr<ThriftRequestScope> logHelper,
folly::Future<ReturnType>&& f) {
return std::move(f).ensure([logHelper = std::move(logHelper)]() {});
}
template <typename ReturnType>
ImmediateFuture<ReturnType> wrapImmediateFuture(
std::unique_ptr<ThriftRequestScope> logHelper,
ImmediateFuture<ReturnType>&& f) {
return std::move(f).ensure([logHelper = std::move(logHelper)]() {});
}
/**
* Lives as long as a suffix glob request and primarily exists to record logging
* and telemetry.
*/
class SuffixGlobRequestScope {
public:
SuffixGlobRequestScope(SuffixGlobRequestScope&&) = delete;
SuffixGlobRequestScope& operator=(SuffixGlobRequestScope&&) = delete;
SuffixGlobRequestScope(
std::string globberLogString,
const std::shared_ptr<ServerState>& serverState,
bool isLocal,
const ObjectFetchContextPtr& context)
: globberLogString_{std::move(globberLogString)},
serverState_{serverState},
isLocal_{isLocal},
context_{context} {}
~SuffixGlobRequestScope() {
// Logging completion time for the request
auto elapsed = itcTimer_.elapsed();
auto duration = std::chrono::duration<double>{elapsed}.count();
std::string client_cmdline = "<unknown>";
if (auto clientPid = context_->getClientPid()) {
// TODO: we should look up client scope here instead of command line
// since it will give move context into the overarching process or
// system producing the expensive query
// To avoid waiting on retrieving the ProcessInfo, only get the
// client_commandline if it's immediately available
const ProcessInfo* processInfoPtr = serverState_->getProcessInfoCache()
->lookup(clientPid.value().get())
.get_optional();
if (processInfoPtr) {
client_cmdline = processInfoPtr->name;
std::replace(client_cmdline.begin(), client_cmdline.end(), '\0', ' ');
}
}
XLOG(DBG4) << "EdenFS asked to evaluate suffix glob by caller '"
<< client_cmdline << "':" << globberLogString_
<< ": duration=" << duration << "s";
serverState_->getStructuredLogger()->logEvent(SuffixGlob{
duration, globberLogString_, std::move(client_cmdline), isLocal_});
}
private:
std::string globberLogString_;
const std::shared_ptr<ServerState>& serverState_;
bool isLocal_;
const ObjectFetchContextPtr& context_;
folly::stop_watch<std::chrono::microseconds> itcTimer_ = {};
}; // namespace
/**
* Lives as long as a glob files request and primarily exists to record logging
* and telemetry.
*/
class GlobFilesRequestScope {
public:
GlobFilesRequestScope(GlobFilesRequestScope&&) = delete;
GlobFilesRequestScope& operator=(GlobFilesRequestScope&&) = delete;
explicit GlobFilesRequestScope(
const std::shared_ptr<ServerState>& serverState,
bool isOffloadable,
std::string logString,
const ObjectFetchContextPtr& context)
: serverState_{serverState},
isOffloadable_{isOffloadable},
logString_{logString},
context_{context} {}
~GlobFilesRequestScope() {
// Logging completion time for the request
auto elapsed = itcTimer_.elapsed();
auto duration = std::chrono::duration<double>{elapsed}.count();
XLOG(DBG4) << "EdenFS completed globFiles request in " << duration << "s"
<< " using " << (local ? "Local" : "SaplingRemoteAPI")
<< (fallback ? " Fallback" : "");
// Log if this request is an expensive request
if (duration >= EXPENSIVE_GLOB_FILES_DURATION) {
std::string client_cmdline = "<unknown>";
if (auto clientPid = context_->getClientPid()) {
// TODO: we should look up client scope here instead of command line
// since it will give move context into the overarching process or
// system producing the expensive query
const ProcessInfo* processInfoPtr =
serverState_->getProcessInfoCache()
->lookup(clientPid.value().get())
.get_optional();
if (processInfoPtr) {
client_cmdline = processInfoPtr->name;
std::replace(client_cmdline.begin(), client_cmdline.end(), '\0', ' ');
}
}
serverState_->getStructuredLogger()->logEvent(ExpensiveGlob{
duration, logString_, std::move(client_cmdline), local});
}
if (local) {
if (isOffloadable_) {
serverState_->getStats()->addDuration(
&ThriftStats::globFilesLocalOffloadableDuration, elapsed);
} else {
serverState_->getStats()->addDuration(
&ThriftStats::globFilesLocalDuration, elapsed);
}
serverState_->getStats()->increment(&ThriftStats::globFilesLocal);
} else {
if (fallback) {
serverState_->getStats()->addDuration(
&ThriftStats::globFilesSaplingRemoteAPIFallbackDuration, elapsed);
serverState_->getStats()->increment(
&ThriftStats::globFilesSaplingRemoteAPIFallback);
} else {
serverState_->getStats()->addDuration(
&ThriftStats::globFilesSaplingRemoteAPISuccessDuration, elapsed);
serverState_->getStats()->increment(
&ThriftStats::globFilesSaplingRemoteAPISuccess);
}
}
}
void setLocal(bool isLocal) {
local = isLocal;
}
void setFallback(bool isFallback) {
fallback = isFallback;
}
private:
bool local = true;
bool fallback = false;
const std::shared_ptr<ServerState>& serverState_;
bool isOffloadable_;
std::string logString_;
const ObjectFetchContextPtr& context_;
folly::stop_watch<std::chrono::microseconds> itcTimer_ = {};
}; // namespace
#undef EDEN_MICRO
RelativePath relpathFromUserPath(StringPiece userPath) {
if (userPath.empty() || userPath == ".") {
return RelativePath{};
} else {
return RelativePath{userPath};
}
}
facebook::eden::InodePtr inodeFromUserPath(
facebook::eden::EdenMount& mount,
StringPiece rootRelativePath,
const ObjectFetchContextPtr& context) {
auto relPath = relpathFromUserPath(rootRelativePath);
return mount.getInodeSlow(relPath, context).get();
}
bool shouldUseSaplingRemoteAPI(
bool useSaplingRemoteAPISuffixes,
const GlobParams& params) {
// The following parameters will default to local lookup
// Commands related to prefetching or the working copy
// - prefetchFiles
// - suppressFileList
// - searchRoot - root is always the repository root
// - predictiveGlob - This pathway only accepts suffixes
// - listOnlyFiles - Only files will be returned
// Ignore
// - prefetchMetadata, it is explicitly called
// out as having no effect
// - sync, not used globFiles. If sync behavior is desired
// use synchronizeWorkingCopy
// Handle unsupported flags
if (*params.prefetchFiles() || *params.suppressFileList()) {
XLOG(DBG3)
<< "globFiles request cannot be offloaded to SaplingRemoteAPI due to prefetching: prefetchFiles="
<< *params.prefetchFiles()
<< ", suppressFileList=" << *params.suppressFileList()
<< ". Falling back to local pathway";
useSaplingRemoteAPISuffixes = false;
} else if (
!((*params.searchRoot()).empty()) && !(*params.searchRoot() == ".")) {
// searchRoot is relative to root
XLOG(DBG3)
<< "globFiles request cannot be offloaded to SaplingRemoteAPI due to searchRoot '"
<< *params.searchRoot() << "'" << " not being mount root '.'"
<< ", falling back to local pathway";
useSaplingRemoteAPISuffixes = false;
} else if (params.predictiveGlob()) {
XLOG(DBG3)
<< "globFiles request cannot be offloaded to SaplingRemoteAPI due to predictiveGlob, falling back to local pathway";
useSaplingRemoteAPISuffixes = false;
} else if (!(*params.listOnlyFiles())) {
XLOG(DBG3)
<< "globFiles request cannot be offloaded to SaplingRemoteAPI due to asking for files and directories, falling back to local pathway";
useSaplingRemoteAPISuffixes = false;
}
return useSaplingRemoteAPISuffixes;
}
} // namespace
// INSTRUMENT_THRIFT_CALL returns a unique pointer to
// ThriftRequestScope object. The returned pointer can be used to call
// wrapFuture() to attach a log message on the completion of the Future. This
// must be called in a Thrift worker thread because the calling pid of
// getAndRegisterClientPid is stored in a thread local variable.
// When not attached to Future it will log the completion of the operation and
// time taken to complete it.
#define INSTRUMENT_THRIFT_CALL(level, ...) \
([&](SourceLocation loc) { \
static folly::Logger logger( \
fmt::format("eden.thrift.{}", loc.function_name())); \
return std::make_unique<ThriftRequestScope>( \
this->thriftRequestTraceBus_, \
logger, \
folly::LogLevel::level, \
loc, \
nullptr, \
nullptr, \
getAndRegisterClientPid(), \
[&] { \
return fmt::to_string( \
fmt::join(std::make_tuple(__VA_ARGS__), ", ")); \
}); \
}(EDEN_CURRENT_SOURCE_LOCATION))
#define INSTRUMENT_THRIFT_CALL_WITH_STAT(level, stat, ...) \
([&](SourceLocation loc) { \
static folly::Logger logger( \
fmt::format("eden.thrift.{}", loc.function_name())); \
return std::make_unique<ThriftRequestScope>( \
this->thriftRequestTraceBus_, \
logger, \
folly::LogLevel::level, \
loc, \
server_->getStats().copy(), \
stat, \
getAndRegisterClientPid(), \
[&] { \
return fmt::to_string( \
fmt::join(std::make_tuple(__VA_ARGS__), ", ")); \
}); \
}(EDEN_CURRENT_SOURCE_LOCATION))
ThriftRequestTraceEvent ThriftRequestTraceEvent::start(
uint64_t requestId,
folly::StringPiece method,
OptionalProcessId clientPid) {
return ThriftRequestTraceEvent{
ThriftRequestTraceEvent::START, requestId, method, clientPid};
}
ThriftRequestTraceEvent ThriftRequestTraceEvent::finish(
uint64_t requestId,
folly::StringPiece method,
OptionalProcessId clientPid) {
return ThriftRequestTraceEvent{
ThriftRequestTraceEvent::FINISH, requestId, method, clientPid};
}
template <>
struct fmt::formatter<facebook::eden::MountId> : public formatter<std::string> {
template <typename Context>
auto format(const facebook::eden::MountId& mountId, Context& ctx) const {
return formatter<std::string>::format(*mountId.mountPoint(), ctx);
}
};
namespace facebook::eden {
const char* const kServiceName = "EdenFS";
std::optional<ActivityBuffer<ThriftRequestTraceEvent>>
EdenServiceHandler::initThriftRequestActivityBuffer() {
if (server_->getServerState()
->getEdenConfig()
->enableActivityBuffer.getValue()) {
return std::make_optional<ActivityBuffer<ThriftRequestTraceEvent>>(
server_->getServerState()
->getEdenConfig()
->activityBufferMaxEvents.getValue());
}
return std::nullopt;
}
EdenServiceHandler::EdenServiceHandler(
std::vector<std::string> originalCommandLine,
EdenServer* server,
std::unique_ptr<UsageService> usageService)
: BaseService{kServiceName},
originalCommandLine_{std::move(originalCommandLine)},
server_{server},
usageService_{std::move(usageService)},
thriftRequestActivityBuffer_(initThriftRequestActivityBuffer()),
thriftRequestTraceBus_(TraceBus<ThriftRequestTraceEvent>::create(
"ThriftRequestTrace",
server_->getServerState()
->getEdenConfig()
->ThriftTraceBusCapacity.getValue())) {
struct HistConfig {
int64_t bucketSize{250};
int64_t min{0};
int64_t max{25000};
};
static constexpr std::pair<StringPiece, HistConfig> customMethodConfigs[] = {
{"listMounts", {20, 0, 1000}},
{"resetParentCommits", {20, 0, 1000}},
{"getCurrentJournalPosition", {20, 0, 1000}},
{"flushStatsNow", {20, 0, 1000}},
{"reloadConfig", {200, 0, 10000}},
};
apache::thrift::metadata::ThriftServiceMetadataResponse metadataResponse;
getProcessor()->getServiceMetadata(metadataResponse);
auto& edenService =
metadataResponse.metadata_ref()->services_ref()->at("eden.EdenService");
for (auto& function : *edenService.functions_ref()) {
HistConfig hc;
for (auto& [name, customHistConfig] : customMethodConfigs) {
if (*function.name_ref() == name) {
hc = customHistConfig;
break;
}
}
// For now, only register EdenService methods, but we could traverse up
// parent services too.
static constexpr StringPiece prefix = "EdenService.";
exportThriftFuncHist(
folly::to<std::string>(prefix, *function.name_ref()),
facebook::fb303::PROCESS,
folly::small_vector<int>({50, 90, 99}), // percentiles to record
hc.bucketSize,
hc.min,
hc.max);
}
thriftRequestTraceHandle_ = thriftRequestTraceBus_->subscribeFunction(
"Outstanding Thrift request tracing",
[this](const ThriftRequestTraceEvent& event) {
switch (event.type) {
case ThriftRequestTraceEvent::START:
outstandingThriftRequests_.wlock()->emplace(event.requestId, event);
break;
case ThriftRequestTraceEvent::FINISH:
outstandingThriftRequests_.wlock()->erase(event.requestId);
break;
}
if (thriftRequestActivityBuffer_.has_value()) {
thriftRequestActivityBuffer_->addEvent(event);
}
});
}
EdenServiceHandler::~EdenServiceHandler() = default;
EdenMountHandle EdenServiceHandler::lookupMount(const MountId& mountId) {
return lookupMount(mountId.mountPoint());
}
EdenMountHandle EdenServiceHandler::lookupMount(
const std::unique_ptr<std::string>& mountId) {
return lookupMount(*mountId);
}
EdenMountHandle EdenServiceHandler::lookupMount(
apache::thrift::field_ref<std::string&> mountId) {
return lookupMount(*mountId);
}
EdenMountHandle EdenServiceHandler::lookupMount(
apache::thrift::field_ref<const std::string&> mountId) {
return lookupMount(*mountId);
}
EdenMountHandle EdenServiceHandler::lookupMount(const std::string& mountId) {
auto mountPath = absolutePathFromThrift(mountId);
return server_->getMount(mountPath);
}
std::unique_ptr<apache::thrift::AsyncProcessor>
EdenServiceHandler::getProcessor() {
auto processor = StreamingEdenServiceSvIf::getProcessor();
if (server_->getServerState()
->getEdenConfig()
->thriftUseCustomPermissionChecking.getValue()) {
processor->addEventHandler(
std::make_shared<ThriftPermissionChecker>(server_->getServerState()));
}
return processor;
}
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_mount(
std::unique_ptr<MountArgument> argument) {
auto helper = INSTRUMENT_THRIFT_CALL(INFO, (*argument->mountPoint()));
return wrapImmediateFuture(
std::move(helper),
makeImmediateFutureWith([&] {
auto mountPoint =
absolutePathFromThrift(*argument->mountPoint_ref());
auto edenClientPath =
absolutePathFromThrift(*argument->edenClientPath_ref());
auto initialConfig = CheckoutConfig::loadFromClientDirectory(
mountPoint, edenClientPath);
return server_
->mount(std::move(initialConfig), *argument->readOnly_ref())
.unit();
}).thenError([](const folly::exception_wrapper& ex) {
XLOG(ERR) << "Error: " << ex.what();
throw newEdenError(ex);
}))
.semi();
}
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_unmount(
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(INFO, *mountPoint);
return wrapImmediateFuture(
std::move(helper),
makeImmediateFutureWith([&] {
auto mountPath = absolutePathFromThrift(*mountPoint);
return server_->unmount(mountPath);
}).thenError([](const folly::exception_wrapper& ex) {
throw newEdenError(ex);
}))
.semi();
}
void EdenServiceHandler::listMounts(std::vector<MountInfo>& results) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
for (const auto& edenMount : server_->getAllMountPoints()) {
MountInfo info;
info.mountPoint_ref() = absolutePathToThrift(edenMount->getPath());
info.edenClientPath_ref() = absolutePathToThrift(
edenMount->getCheckoutConfig()->getClientDirectory());
info.state_ref() = edenMount->getState();
info.backingRepoPath_ref() =
edenMount->getCheckoutConfig()->getRepoSource();
results.push_back(info);
}
}
folly::SemiFuture<std::unique_ptr<std::vector<CheckoutConflict>>>
EdenServiceHandler::semifuture_checkOutRevision(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::string> hash,
CheckoutMode checkoutMode,
std::unique_ptr<CheckOutRevisionParams> params) {
auto rootIdOptions = params->rootIdOptions().ensure();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG1,
*mountPoint,
logHash(*hash),
apache::thrift::util::enumName(checkoutMode, "(unknown)"),
params->hgRootManifest_ref().has_value()
? logHash(*params->hgRootManifest_ref())
: "(unspecified hg root manifest)",
rootIdOptions.filterId_ref().has_value() ? *rootIdOptions.filterId_ref()
: "no filter provided");
helper->getThriftFetchContext().fillClientRequestInfo(params->cri_ref());
auto& fetchContext = helper->getFetchContext();
auto mountHandle = lookupMount(mountPoint);
// If we were passed a FilterID, create a RootID that contains the
// filter and a varint that indicates the length of the original hash.
std::string parsedHash =
resolveRootId(std::move(*hash), rootIdOptions, mountHandle);
hash.reset();
auto mountPath = absolutePathFromThrift(*mountPoint);
auto checkoutFuture = server_->checkOutRevision(
mountPath,
parsedHash,
params->hgRootManifest_ref().to_optional(),
fetchContext,
helper->getFunctionName(),
checkoutMode);
return wrapImmediateFuture(
std::move(helper),
std::move(checkoutFuture).thenValue([](CheckoutResult&& result) {
return std::make_unique<std::vector<CheckoutConflict>>(
std::move(result.conflicts));
}))
.semi();
}
folly::SemiFuture<folly::Unit>
EdenServiceHandler::semifuture_resetParentCommits(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<WorkingDirectoryParents> parents,
std::unique_ptr<ResetParentCommitsParams> params) {
auto rootIdOptions = params->rootIdOptions_ref().ensure();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG1,
*mountPoint,
logHash(*parents->parent1_ref()),
params->hgRootManifest_ref().has_value()
? logHash(*params->hgRootManifest_ref())
: "(unspecified hg root manifest)",
rootIdOptions.filterId_ref().has_value() ? *rootIdOptions.filterId_ref()
: "no filter provided");
helper->getThriftFetchContext().fillClientRequestInfo(params->cri_ref());
auto mountHandle = lookupMount(mountPoint);
// If we were passed a FilterID, create a RootID that contains the filter and
// a varint that indicates the length of the original hash.
std::string parsedParent = resolveRootId(
std::move(*parents->parent1_ref()), rootIdOptions, mountHandle);
auto parent1 = mountHandle.getObjectStore().parseRootId(parsedParent);
auto fut = ImmediateFuture<folly::Unit>{std::in_place};
if (params->hgRootManifest_ref().has_value()) {
auto& fetchContext = helper->getFetchContext();
// The hg client has told us what the root manifest is.
//
// This is useful when a commit has just been created. We won't be able to
// ask the import helper to map the commit to its root manifest because it
// won't know about the new commit until it reopens the repo. Instead,
// import the manifest for this commit directly.
auto rootManifest = hash20FromThrift(*params->hgRootManifest_ref());
fut = mountHandle.getObjectStore().getBackingStore()->importManifestForRoot(
parent1, rootManifest, fetchContext);
}
return wrapImmediateFuture(
std::move(helper),
std::move(fut).thenValue([parent1, mountHandle](folly::Unit) {
mountHandle.getEdenMount().resetParent(parent1);
}))
.semi();
}
void EdenServiceHandler::getCurrentSnapshotInfo(
GetCurrentSnapshotInfoResponse& out,
std::unique_ptr<GetCurrentSnapshotInfoRequest> params) {
const auto& mountId = params->mountId();
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountId);
helper->getThriftFetchContext().fillClientRequestInfo(params->cri());
auto mountHandle = lookupMount(*mountId);
auto filterId =
mountHandle.getEdenMount().getCheckoutConfig()->getLastActiveFilter();
if (filterId.has_value()) {
out.filterId_ref() = std::move(filterId.value());
}
}
namespace {
int64_t getSyncTimeout(const SyncBehavior& sync) {
return sync.syncTimeoutSeconds().value_or(60);
}
/**
* Wait for all the pending notifications to be processed.
*
* When the SyncBehavior is unset, this default to a timeout of 60 seconds. A
* negative SyncBehavior mean to wait indefinitely.
*/
ImmediateFuture<folly::Unit> waitForPendingWrites(
const EdenMount& mount,
const SyncBehavior& sync) {
auto seconds = getSyncTimeout(sync);
if (seconds == 0) {
return folly::unit;
}
auto future = mount.waitForPendingWrites().semi();
if (seconds > 0) {
future = std::move(future).within(std::chrono::seconds{seconds});
}
return std::move(future);
}
} // namespace
folly::SemiFuture<folly::Unit>
EdenServiceHandler::semifuture_synchronizeWorkingCopy(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<SynchronizeWorkingCopyParams> params) {
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, *mountPoint, getSyncTimeout(*params->sync()));
auto mountHandle = lookupMount(mountPoint);
return wrapImmediateFuture(
std::move(helper),
waitForPendingWrites(mountHandle.getEdenMount(), *params->sync()))
.ensure([mountHandle] {})
.semi();
}
folly::SemiFuture<std::unique_ptr<std::vector<Blake3Result>>>
EdenServiceHandler::semifuture_getBlake3(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::vector<std::string>> paths,
std::unique_ptr<SyncBehavior> sync) {
TraceBlock block("getBlake3");
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, *mountPoint, getSyncTimeout(*sync), toLogArg(*paths));
auto& fetchContext = helper->getFetchContext();
auto mountHandle = lookupMount(mountPoint);
auto notificationFuture =
waitForPendingWrites(mountHandle.getEdenMount(), *sync);
return wrapImmediateFuture(
std::move(helper),
std::move(notificationFuture)
.thenValue(
[mountHandle,
paths = std::move(paths),
fetchContext = fetchContext.copy()](auto&&) mutable {
return applyToVirtualInode(
mountHandle.getRootInode(),
*paths,
[mountHandle, fetchContext = fetchContext.copy()](
const VirtualInode& inode, RelativePath path) {
return inode
.getBlake3(
path,
mountHandle.getObjectStorePtr(),
fetchContext)
.semi();
},
mountHandle.getObjectStorePtr(),
fetchContext);
})
.ensure([mountHandle] {})
.thenValue([](std::vector<folly::Try<Hash32>> results) {
auto out = std::make_unique<std::vector<Blake3Result>>();
out->reserve(results.size());
for (auto& result : results) {
auto& blake3Result = out->emplace_back();
if (result.hasValue()) {
blake3Result.blake3_ref() = thriftHash32(result.value());
} else {
blake3Result.error_ref() =
newEdenError(result.exception());
}
}
return out;
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<std::vector<SHA1Result>>>
EdenServiceHandler::semifuture_getSHA1(
std::unique_ptr<string> mountPoint,
std::unique_ptr<vector<string>> paths,
std::unique_ptr<SyncBehavior> sync) {
TraceBlock block("getSHA1");
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, *mountPoint, getSyncTimeout(*sync), toLogArg(*paths));
auto& fetchContext = helper->getFetchContext();
auto mountHandle = lookupMount(mountPoint);
auto notificationFuture =
waitForPendingWrites(mountHandle.getEdenMount(), *sync);
return wrapImmediateFuture(
std::move(helper),
std::move(notificationFuture)
.thenValue(
[mountHandle,
paths = std::move(paths),
fetchContext = fetchContext.copy()](auto&&) mutable {
return applyToVirtualInode(
mountHandle.getRootInode(),
*paths,
[mountHandle, fetchContext = fetchContext.copy()](
const VirtualInode& inode, RelativePath path) {
return inode
.getSHA1(
path,
mountHandle.getObjectStorePtr(),
fetchContext)
.semi();
},
mountHandle.getObjectStorePtr(),
fetchContext);
})
.ensure([mountHandle] {})
.thenValue([](std::vector<folly::Try<Hash20>> results) {
auto out = std::make_unique<std::vector<SHA1Result>>();
out->reserve(results.size());
for (auto& result : results) {
auto& sha1Result = out->emplace_back();
if (result.hasValue()) {
sha1Result.sha1_ref() = thriftHash20(result.value());
} else {
sha1Result.error_ref() =
newEdenError(result.exception());
}
}
return out;
}))
.semi();
}
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_addBindMount(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::string> repoPathStr,
std::unique_ptr<std::string> targetPath) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto repoPath = RelativePathPiece{*repoPathStr};
auto absRepoPath = mountHandle.getEdenMount().getPath() + repoPath;
auto* privHelper = server_->getServerState()->getPrivHelper();
auto fut = mountHandle.getEdenMount().ensureDirectoryExists(
repoPath, helper->getFetchContext());
return std::move(fut)
.thenValue([privHelper,
target = absolutePathFromThrift(*targetPath),
pathInMountDir = absRepoPath.copy()](TreeInodePtr) {
return privHelper->bindMount(target.view(), pathInMountDir.view());
})
.ensure([mountHandle, helper = std::move(helper)] {})
.semi();
}
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_removeBindMount(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::string> repoPathStr) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto repoPath = RelativePathPiece{*repoPathStr};
auto absRepoPath = mountHandle.getEdenMount().getPath() + repoPath;
return server_->getServerState()->getPrivHelper()->bindUnMount(
absRepoPath.view());
}
void EdenServiceHandler::getCurrentJournalPosition(
JournalPosition& out,
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto latest = mountHandle.getEdenMount().getJournal().getLatest();
out.mountGeneration_ref() = mountHandle.getEdenMount().getMountGeneration();
if (latest) {
out.sequenceNumber_ref() = latest->sequenceID;
out.snapshotHash_ref() =
mountHandle.getObjectStore().renderRootId(latest->toHash);
} else {
out.sequenceNumber_ref() = 0;
out.snapshotHash_ref() =
mountHandle.getObjectStore().renderRootId(RootId{});
}
}
apache::thrift::ServerStream<JournalPosition>
EdenServiceHandler::subscribeStreamTemporary(
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
// We need a weak ref on the mount because the thrift stream plumbing
// may outlive the mount point
std::weak_ptr<EdenMount> weakMount(mountHandle.getEdenMountPtr());
// We'll need to pass the subscriber id to both the disconnect
// and change callbacks. We can't know the id until after we've
// created them both, so we need to share an optional id between them.
auto handle = std::make_shared<std::optional<Journal::SubscriberId>>();
auto disconnected = std::make_shared<std::atomic<bool>>(false);
// This is called when the subscription channel is torn down
auto onDisconnect = [weakMount, handle, disconnected] {
XLOG(INFO) << "streaming client disconnected";
auto mount = weakMount.lock();
if (mount) {
disconnected->store(true);
mount->getJournal().cancelSubscriber(handle->value());
}
};
// Set up the actual publishing instance
auto streamAndPublisher =
apache::thrift::ServerStream<JournalPosition>::createPublisher(
std::move(onDisconnect));
// A little wrapper around the StreamPublisher.
// This is needed because the destructor for StreamPublisherState
// triggers a FATAL if the stream has not been completed.
// We don't have an easy way to trigger this outside of just calling
// it in a destructor, so that's what we do here.
struct Publisher {
apache::thrift::ServerStreamPublisher<JournalPosition> publisher;
std::shared_ptr<std::atomic<bool>> disconnected;
explicit Publisher(
apache::thrift::ServerStreamPublisher<JournalPosition> publisher,
std::shared_ptr<std::atomic<bool>> disconnected)
: publisher(std::move(publisher)),
disconnected(std::move(disconnected)) {}
~Publisher() {
// We have to send an exception as part of the completion, otherwise
// thrift doesn't seem to notify the peer of the shutdown
if (!disconnected->load()) {
std::move(publisher).complete(
folly::make_exception_wrapper<std::runtime_error>(
"subscriber terminated"));
}
}
};
auto stream = std::make_shared<Publisher>(
std::move(streamAndPublisher.second), std::move(disconnected));
// Register onJournalChange with the journal subsystem, and assign
// the subscriber id into the handle so that the callbacks can consume it.
handle->emplace(mountHandle.getEdenMount().getJournal().registerSubscriber(
[stream = std::move(stream)]() mutable {
JournalPosition pos;
// The value is intentionally undefined and should not be used. Instead,
// the subscriber should call getCurrentJournalPosition or
// getFilesChangedSince.
stream->publisher.next(pos);
}));
return std::move(streamAndPublisher.first);
}
namespace {
TraceEventTimes thriftTraceEventTimes(const TraceEventBase& event) {
using namespace std::chrono;
TraceEventTimes times;
times.timestamp_ref() =
duration_cast<nanoseconds>(event.systemTime.time_since_epoch()).count();
times.monotonic_time_ns_ref() =
duration_cast<nanoseconds>(event.monotonicTime.time_since_epoch())
.count();
return times;
}
RequestInfo thriftRequestInfo(pid_t pid, ProcessInfoCache& processInfoCache) {
RequestInfo info;
info.pid_ref() = pid;
info.processName_ref().from_optional(processInfoCache.getProcessName(pid));
return info;
}
template <typename T>
class ThriftStreamPublisherOwner {
public:
explicit ThriftStreamPublisherOwner(
apache::thrift::ServerStreamPublisher<T> publisher)
: owner(true), publisher{std::move(publisher)} {}
ThriftStreamPublisherOwner(ThriftStreamPublisherOwner&& that) noexcept
: owner{std::exchange(that.owner, false)},
publisher{std::move(that.publisher)} {}
ThriftStreamPublisherOwner& operator=(ThriftStreamPublisherOwner&&) = delete;
void next(T payload) const {
if (owner) {
publisher.next(std::move(payload));
}
}
void next(folly::exception_wrapper ew) && {
if (owner) {
owner = false;
std::move(publisher).complete(std::move(ew));
}
}
// Destroying a publisher without calling complete() aborts the process, so
// ensure complete() is called when this object is dropped.
~ThriftStreamPublisherOwner() {
if (owner) {
std::move(publisher).complete();
}
}
private:
bool owner;
apache::thrift::ServerStreamPublisher<T> publisher;
};
} // namespace
#ifndef _WIN32
namespace {
FuseCall populateFuseCall(
uint64_t unique,
const FuseTraceEvent::RequestHeader& request,
ProcessInfoCache& processInfoCache) {
FuseCall fc;
fc.opcode_ref() = request.opcode;
fc.unique_ref() = unique;
fc.nodeid_ref() = request.nodeid;
fc.uid_ref() = request.uid;
fc.gid_ref() = request.gid;
fc.pid_ref() = request.pid;
fc.opcodeName_ref() = fuseOpcodeName(request.opcode);
fc.processName_ref().from_optional(
processInfoCache.getProcessName(request.pid));
return fc;
}
NfsCall populateNfsCall(const NfsTraceEvent& event) {
NfsCall nfsCall;
nfsCall.xid_ref() = event.getXid();
nfsCall.procNumber_ref() = event.getProcNumber();
nfsCall.procName_ref() = nfsProcName(event.getProcNumber());
return nfsCall;
}
/**
* Returns true if event should not be traced.
*/
bool isEventMasked(
int64_t eventCategoryMask,
ProcessAccessLog::AccessType accessType) {
using AccessType = ProcessAccessLog::AccessType;
switch (accessType) {
case AccessType::FsChannelRead:
return 0 == (eventCategoryMask & streamingeden_constants::FS_EVENT_READ_);
case AccessType::FsChannelWrite:
return 0 ==
(eventCategoryMask & streamingeden_constants::FS_EVENT_WRITE_);
case AccessType::FsChannelOther:
default:
return 0 ==
(eventCategoryMask & streamingeden_constants::FS_EVENT_OTHER_);
}
}
bool isEventMasked(int64_t eventCategoryMask, const FuseTraceEvent& event) {
return isEventMasked(
eventCategoryMask, fuseOpcodeAccessType(event.getRequest().opcode));
}
bool isEventMasked(int64_t eventCategoryMask, const NfsTraceEvent& event) {
return isEventMasked(
eventCategoryMask, nfsProcAccessType(event.getProcNumber()));
}
} // namespace
#endif //!_WIN32
#ifdef _WIN32
PrjfsCall populatePrjfsCall(
const PrjfsTraceCallType callType,
const PrjfsTraceEvent::PrjfsOperationData& data) {
PrjfsCall prjfsCall;
prjfsCall.callType_ref() = callType;
prjfsCall.commandId_ref() = data.commandId;
prjfsCall.pid_ref() = data.pid;
return prjfsCall;
}
PrjfsCall populatePrjfsCall(const PrjfsTraceEvent& event) {
return populatePrjfsCall(event.getCallType(), event.getData());
}
#endif
ThriftRequestMetadata populateThriftRequestMetadata(
const ThriftRequestTraceEvent& request) {
ThriftRequestMetadata thriftRequestMetadata;
thriftRequestMetadata.requestId() = request.requestId;
thriftRequestMetadata.method() = request.method;
if (auto client_pid = request.clientPid) {
thriftRequestMetadata.clientPid() = client_pid.value().get();
}
return thriftRequestMetadata;
}
/**
* Helper function to convert a ThriftRequestTraceEvent to a ThriftRequestEvent
* type. Used in EdenServiceHandler::traceThriftRequestEvents and
* EdenServiceHandler::getRetroactiveThriftRequestEvents.
*/
void convertThriftRequestTraceEventToThriftRequestEvent(
const ThriftRequestTraceEvent& event,
ThriftRequestEvent& te) {
te.times_ref() = thriftTraceEventTimes(event);
switch (event.type) {
case ThriftRequestTraceEvent::START:
te.eventType() = ThriftRequestEventType::START;
break;
case ThriftRequestTraceEvent::FINISH:
te.eventType() = ThriftRequestEventType::FINISH;
break;
}
te.requestMetadata_ref() = populateThriftRequestMetadata(event);
}
apache::thrift::ServerStream<ThriftRequestEvent>
EdenServiceHandler::traceThriftRequestEvents() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
struct SubscriptionHandleOwner {
TraceBus<ThriftRequestTraceEvent>::SubscriptionHandle handle;
};
auto h = std::make_shared<SubscriptionHandleOwner>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<ThriftRequestEvent>::createPublisher([h] {
// on disconnect, release subscription handle
});
h->handle = thriftRequestTraceBus_->subscribeFunction(
"Live Thrift request tracing",
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)}](
const ThriftRequestTraceEvent& event) mutable {
ThriftRequestEvent thriftEvent;
convertThriftRequestTraceEventToThriftRequestEvent(event, thriftEvent);
publisher_2.next(thriftEvent);
});
return std::move(serverStream);
}
apache::thrift::ServerStream<TaskEvent> EdenServiceHandler::traceTaskEvents(
std::unique_ptr<::facebook::eden::TraceTaskEventsRequest> /* request */) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
struct SubscriptionHandleOwner {
TraceBus<TaskTraceEvent>::SubscriptionHandle handle;
};
auto h = std::make_shared<SubscriptionHandleOwner>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<TaskEvent>::createPublisher([h] {
// on disconnect, release subscription handle
});
h->handle = TaskTraceEvent::getTraceBus()->subscribeFunction(
"Live Thrift request tracing",
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)}](
const TaskTraceEvent& event) mutable {
TaskEvent taskEvent;
taskEvent.times() = thriftTraceEventTimes(event);
taskEvent.name() = event.name;
taskEvent.threadName() = event.threadName;
taskEvent.threadId() = event.threadId;
taskEvent.duration() = event.duration.count();
taskEvent.start() = event.start.count();
publisher_2.next(taskEvent);
});
return std::move(serverStream);
}
apache::thrift::ServerStream<FsEvent> EdenServiceHandler::traceFsEvents(
std::unique_ptr<std::string> mountPoint,
int64_t eventCategoryMask) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto& edenMount = mountHandle.getEdenMount();
// Treat an empty bitset as an unfiltered stream. This is for clients that
// predate the addition of the mask and for clients that don't care.
// 0 would be meaningless anyway: it would never return any events.
if (0 == eventCategoryMask) {
eventCategoryMask = ~0;
}
struct Context {
// While subscribed to FuseChannel's TraceBus, request detailed argument
// strings.
TraceDetailedArgumentsHandle argHandle;
#ifdef _WIN32
TraceSubscriptionHandle<PrjfsTraceEvent> subHandle;
#else
std::variant<
TraceSubscriptionHandle<FuseTraceEvent>,
TraceSubscriptionHandle<NfsTraceEvent>>
subHandle;
#endif // _WIN32
};
auto context = std::make_shared<Context>();
#ifdef _WIN32
auto prjfsChannel = edenMount.getPrjfsChannel()->getInner();
if (prjfsChannel) {
context->argHandle = prjfsChannel->traceDetailedArguments();
} else {
EDEN_BUG() << "tracing isn't supported yet for the "
<< fmt::underlying(
edenMount.getCheckoutConfig()->getMountProtocol())
<< " filesystem type";
}
#else
auto* fuseChannel = edenMount.getFuseChannel();
auto* nfsdChannel = edenMount.getNfsdChannel();
if (fuseChannel) {
context->argHandle = fuseChannel->traceDetailedArguments();
} else if (nfsdChannel) {
context->argHandle = nfsdChannel->traceDetailedArguments();
} else {
EDEN_BUG() << "tracing isn't supported yet for the "
<< fmt::underlying(
edenMount.getCheckoutConfig()->getMountProtocol())
<< " filesystem type";
}
#endif // _WIN32
auto [serverStream, publisher] =
apache::thrift::ServerStream<FsEvent>::createPublisher([context] {
// on disconnect, release context and the TraceSubscriptionHandle
});
#ifdef _WIN32
if (prjfsChannel) {
context->subHandle = prjfsChannel->getTraceBusPtr()->subscribeFunction(
fmt::format("strace-{}", edenMount.getPath().basename()),
[publisher = ThriftStreamPublisherOwner{std::move(publisher)}](
const PrjfsTraceEvent& event) {
FsEvent te;
auto times = thriftTraceEventTimes(event);
te.times_ref() = times;
// Legacy timestamp fields.
te.timestamp_ref() = *times.timestamp_ref();
te.monotonic_time_ns_ref() = *times.monotonic_time_ns_ref();
te.prjfsRequest_ref() = populatePrjfsCall(event);
switch (event.getType()) {
case PrjfsTraceEvent::START:
te.type_ref() = FsEventType::START;
if (auto& arguments = event.getArguments()) {
te.arguments_ref() = *arguments;
}
break;
case PrjfsTraceEvent::FINISH:
te.type_ref() = FsEventType::FINISH;
break;
}
te.requestInfo_ref() = RequestInfo{};
publisher.next(te);
});
}
#else
if (fuseChannel) {
context->subHandle = fuseChannel->getTraceBus().subscribeFunction(
fmt::format("strace-{}", edenMount.getPath().basename()),
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)},
serverState = server_->getServerState(),
eventCategoryMask](const FuseTraceEvent& event) {
if (isEventMasked(eventCategoryMask, event)) {
return;
}
FsEvent te;
auto times = thriftTraceEventTimes(event);
te.times_ref() = times;
// Legacy timestamp fields.
te.timestamp_ref() = *times.timestamp_ref();
te.monotonic_time_ns_ref() = *times.monotonic_time_ns_ref();
te.fuseRequest_ref() = populateFuseCall(
event.getUnique(),
event.getRequest(),
*serverState->getProcessInfoCache());
switch (event.getType()) {
case FuseTraceEvent::START:
te.type_ref() = FsEventType::START;
if (auto& arguments = event.getArguments()) {
te.arguments_ref() = *arguments;
}
break;
case FuseTraceEvent::FINISH:
te.type_ref() = FsEventType::FINISH;
te.result_ref().from_optional(event.getResponseCode());
break;
}
te.requestInfo_ref() = thriftRequestInfo(
event.getRequest().pid, *serverState->getProcessInfoCache());
publisher_2.next(te);
});
} else if (nfsdChannel) {
context->subHandle = nfsdChannel->getTraceBus().subscribeFunction(
fmt::format("strace-{}", edenMount.getPath().basename()),
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)},
eventCategoryMask](const NfsTraceEvent& event) {
if (isEventMasked(eventCategoryMask, event)) {
return;
}
FsEvent te;
auto times = thriftTraceEventTimes(event);
te.times_ref() = times;
// Legacy timestamp fields.
te.timestamp_ref() = *times.timestamp_ref();
te.monotonic_time_ns_ref() = *times.monotonic_time_ns_ref();
te.nfsRequest_ref() = populateNfsCall(event);
switch (event.getType()) {
case NfsTraceEvent::START:
te.type_ref() = FsEventType::START;
if (auto arguments = event.getArguments()) {
te.arguments_ref() = arguments.value();
}
break;
case NfsTraceEvent::FINISH:
te.type_ref() = FsEventType::FINISH;
break;
}
te.requestInfo_ref() = RequestInfo{};
publisher_2.next(te);
});
}
#endif // _WIN32
return std::move(serverStream);
}
/**
* Helper function to get a cast a BackingStore shared_ptr to a
* SaplingBackingStore shared_ptr. Returns an error if the type of backingStore
* provided is not truly an SaplingBackingStore. Used in
* EdenServiceHandler::traceHgEvents,
* EdenServiceHandler::getRetroactiveHgEvents and
* EdenServiceHandler::debugOutstandingHgEvents.
*/
std::shared_ptr<SaplingBackingStore> castToSaplingBackingStore(
std::shared_ptr<BackingStore>& backingStore,
AbsolutePathPiece mountPath) {
std::shared_ptr<SaplingBackingStore> saplingBackingStore{nullptr};
// If FilteredFS is enabled, we'll see a FilteredBackingStore first
auto filteredBackingStore =
std::dynamic_pointer_cast<FilteredBackingStore>(backingStore);
if (filteredBackingStore) {
// FilteredBackingStore -> SaplingBackingStore
saplingBackingStore = std::dynamic_pointer_cast<SaplingBackingStore>(
filteredBackingStore->getBackingStore());
} else {
// BackingStore -> SaplingBackingStore
saplingBackingStore =
std::dynamic_pointer_cast<SaplingBackingStore>(backingStore);
}
if (!saplingBackingStore) {
// typeid() does not evaluate expressions
auto& r = *backingStore.get();
throw newEdenError(
EdenErrorType::GENERIC_ERROR,
fmt::format(
"mount {} must use SaplingBackingStore, type is {}",
mountPath,
typeid(r).name()));
}
return saplingBackingStore;
}
/**
* Helper function to convert an HgImportTraceEvent to a thrift HgEvent type.
* Used in EdenServiceHandler::traceHgEvents,
* EdenServiceHandler::getRetroactiveHgEvents and
* EdenServiceHandler::debugOutstandingHgEvents.
*/
void convertHgImportTraceEventToHgEvent(
const HgImportTraceEvent& event,
ProcessInfoCache& processInfoCache,
HgEvent& te) {
te.times_ref() = thriftTraceEventTimes(event);
switch (event.eventType) {
case HgImportTraceEvent::QUEUE:
te.eventType_ref() = HgEventType::QUEUE;
break;
case HgImportTraceEvent::START:
te.eventType_ref() = HgEventType::START;
break;
case HgImportTraceEvent::FINISH:
te.eventType_ref() = HgEventType::FINISH;
break;
}
switch (event.resourceType) {
case HgImportTraceEvent::BLOB:
te.resourceType_ref() = HgResourceType::BLOB;
break;
case HgImportTraceEvent::TREE:
te.resourceType_ref() = HgResourceType::TREE;
break;
case HgImportTraceEvent::BLOBMETA:
te.resourceType_ref() = HgResourceType::BLOBMETA;
break;
case HgImportTraceEvent::TREEMETA:
te.resourceType_ref() = HgResourceType::TREEMETA;
break;
}
switch (event.importPriority) {
case ImportPriority::Class::Low:
te.importPriority_ref() = HgImportPriority::LOW;
break;
case ImportPriority::Class::Normal:
te.importPriority_ref() = HgImportPriority::NORMAL;
break;
case ImportPriority::Class::High:
te.importPriority_ref() = HgImportPriority::HIGH;
break;
}
switch (event.importCause) {
case ObjectFetchContext::Cause::Unknown:
te.importCause_ref() = HgImportCause::UNKNOWN;
break;
case ObjectFetchContext::Cause::Fs:
te.importCause_ref() = HgImportCause::FS;
break;
case ObjectFetchContext::Cause::Thrift:
te.importCause_ref() = HgImportCause::THRIFT;
break;
case ObjectFetchContext::Cause::Prefetch:
te.importCause_ref() = HgImportCause::PREFETCH;
break;
}
if (event.fetchedSource.has_value()) {
switch (event.fetchedSource.value()) {
case ObjectFetchContext::FetchedSource::Local:
te.fetchedSource_ref() = FetchedSource::LOCAL;
break;
case ObjectFetchContext::FetchedSource::Remote:
te.fetchedSource_ref() = FetchedSource::REMOTE;
break;
case ObjectFetchContext::FetchedSource::Unknown:
te.fetchedSource_ref() = FetchedSource::UNKNOWN;
break;
}
} else {
te.fetchedSource_ref() = FetchedSource::NOT_AVAILABLE_YET;
}
te.unique_ref() = event.unique;
te.manifestNodeId_ref() = event.manifestNodeId.toString();
te.path_ref() = event.getPath();
if (auto pid = event.pid) {
te.requestInfo_ref() =
thriftRequestInfo(pid.value().get(), processInfoCache);
}
}
apache::thrift::ServerStream<HgEvent> EdenServiceHandler::traceHgEvents(
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto backingStore = mountHandle.getObjectStore().getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(
backingStore, mountHandle.getEdenMount().getPath());
struct Context {
TraceSubscriptionHandle<HgImportTraceEvent> subHandle;
};
auto context = std::make_shared<Context>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<HgEvent>::createPublisher([context] {
// on disconnect, release context and the TraceSubscriptionHandle
});
context->subHandle = saplingBackingStore->getTraceBus().subscribeFunction(
fmt::format(
"hgtrace-{}", mountHandle.getEdenMount().getPath().basename()),
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)},
processInfoCache =
mountHandle.getEdenMount().getServerState()->getProcessInfoCache()](
const HgImportTraceEvent& event) {
HgEvent thriftEvent;
convertHgImportTraceEventToHgEvent(
event, *processInfoCache, thriftEvent);
publisher_2.next(thriftEvent);
});
return std::move(serverStream);
}
/**
* Helper function to convert an InodeTraceEvent to a thrift InodeEvent type.
* Used in EdenServiceHandler::traceInodeEvents and
* EdenServiceHandler::getRetroactiveInodeEvents. Note paths are not set here
* and are set by the calling functions. For traceInodeEvents full paths may
* need to be computed whereas for getRetroactiveInodeEvents full paths would
* have already been computed when the event gets added to the ActivityBuffer.
*/
void ConvertInodeTraceEventToThriftInodeEvent(
InodeTraceEvent traceEvent,
InodeEvent& thriftEvent) {
thriftEvent.times() = thriftTraceEventTimes(traceEvent);
thriftEvent.ino() = traceEvent.ino.getRawValue();
thriftEvent.inodeType() = traceEvent.inodeType;
thriftEvent.eventType() = traceEvent.eventType;
thriftEvent.progress() = traceEvent.progress;
thriftEvent.duration() = traceEvent.duration.count();
// TODO: trace requesting pid
// thriftEvent.requestInfo() = thriftRequestInfo(pid);
}
apache::thrift::ServerStream<InodeEvent> EdenServiceHandler::traceInodeEvents(
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
auto inodeMap = mountHandle.getEdenMount().getInodeMap();
struct Context {
TraceSubscriptionHandle<InodeTraceEvent> subHandle;
};
auto context = std::make_shared<Context>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<InodeEvent>::createPublisher([context] {
// on disconnect, release context and the TraceSubscriptionHandle
});
context->subHandle =
mountHandle.getEdenMount().getInodeTraceBus().subscribeFunction(
fmt::format(
"inodetrace-{}", mountHandle.getEdenMount().getPath().basename()),
[publisher_2 = ThriftStreamPublisherOwner{std::move(publisher)},
inodeMap](const InodeTraceEvent& event) {
InodeEvent thriftEvent;
ConvertInodeTraceEventToThriftInodeEvent(event, thriftEvent);
try {
auto relativePath = inodeMap->getPathForInode(event.ino);
thriftEvent.path() =
relativePath ? relativePath->asString() : event.getPath();
} catch (const std::system_error& /* e */) {
thriftEvent.path() = event.getPath();
}
publisher_2.next(thriftEvent);
});
return std::move(serverStream);
}
namespace {
void checkMountGeneration(
const JournalPosition& position,
const EdenMount& mount,
std::string_view fieldName) {
if (folly::to_unsigned(*position.mountGeneration()) !=
mount.getMountGeneration()) {
throw newEdenError(
ERANGE,
EdenErrorType::MOUNT_GENERATION_CHANGED,
fieldName,
".mountGeneration does not match the current "
"mountGeneration. "
"You need to compute a new basis for delta queries.");
}
}
void publishFile(
const folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>&
publisher,
folly::StringPiece path,
ScmFileStatus status,
dtype_t type) {
ChangedFileResult fileResult;
fileResult.name() = path.str();
fileResult.status() = status;
fileResult.dtype() = static_cast<Dtype>(type);
publisher.rlock()->next(std::move(fileResult));
}
/**
* This method computes all uncommited changes and save the result to publisher
*/
void sumUncommitedChanges(
const JournalDeltaRange& range,
const folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>&
publisher,
std::optional<std::reference_wrapper<GlobFilter>> filter) {
for (auto& entry : range.changedFilesInOverlay) {
const auto& changeInfo = entry.second;
// the path is filtered don't consider it
if (filter) {
// TODO(T167750650): This .get() will block Thrift threads and could lead
// to Queue Timeouts. Instead of calling .get(), we should chain futures
// together.
if (filter->get()
.getFilterCoverageForPath(entry.first, folly::StringPiece(""))
.get() == FilterCoverage::RECURSIVELY_FILTERED) {
continue;
}
}
ScmFileStatus status;
if (!changeInfo.existedBefore && changeInfo.existedAfter) {
status = ScmFileStatus::ADDED;
} else if (changeInfo.existedBefore && !changeInfo.existedAfter) {
status = ScmFileStatus::REMOVED;
} else {
status = ScmFileStatus::MODIFIED;
}
publishFile(publisher, entry.first.asString(), status, dtype_t::Unknown);
}
for (const auto& name : range.uncleanPaths) {
if (filter) {
// TODO(T167750650): This .get() will block Thrift threads and could lead
// to Queue Timeouts. Instead of calling .get(), we should chain futures
// together.
if (filter->get()
.getFilterCoverageForPath(name, folly::StringPiece(""))
.get() == FilterCoverage::RECURSIVELY_FILTERED) {
continue;
}
}
publishFile(
publisher, name.asString(), ScmFileStatus::MODIFIED, dtype_t::Unknown);
}
}
class StreamingDiffCallback : public DiffCallback {
public:
explicit StreamingDiffCallback(
std::shared_ptr<
folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>>
publisher)
: publisher_{std::move(publisher)} {}
void ignoredPath(RelativePathPiece, dtype_t) override {}
void addedPath(RelativePathPiece path, dtype_t type) override {
publishFile(*publisher_, path.view(), ScmFileStatus::ADDED, type);
}
void removedPath(RelativePathPiece path, dtype_t type) override {
publishFile(*publisher_, path.view(), ScmFileStatus::REMOVED, type);
}
void modifiedPath(RelativePathPiece path, dtype_t type) override {
publishFile(*publisher_, path.view(), ScmFileStatus::MODIFIED, type);
}
void diffError(RelativePathPiece /*path*/, const folly::exception_wrapper& ew)
override {
auto publisher = std::move(*publisher_->wlock());
std::move(publisher).next(newEdenError(ew));
}
private:
std::shared_ptr<
folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>>
publisher_;
};
/**
* Compute the difference between the passed in roots.
*
* The order of the roots matters: a file added in toRoot will be returned as
* ScmFileStatus::ADDED, while if the order of arguments were reversed, it
* would be returned as ScmFileStatus::REMOVED.
*/
ImmediateFuture<folly::Unit> diffBetweenRoots(
const RootId& fromRoot,
const RootId& toRoot,
const CheckoutConfig& checkoutConfig,
const std::shared_ptr<ObjectStore>& objectStore,
folly::CancellationToken cancellation,
const ObjectFetchContextPtr& fetchContext,
DiffCallback* callback) {
auto diffContext = std::make_unique<DiffContext>(
callback,
cancellation,
fetchContext,
true,
checkoutConfig.getCaseSensitive(),
checkoutConfig.getEnableWindowsSymlinks(),
objectStore,
nullptr);
auto fut = diffRoots(diffContext.get(), fromRoot, toRoot);
return std::move(fut).ensure([diffContext = std::move(diffContext)] {});
}
} // namespace
apache::thrift::ResponseAndServerStream<ChangesSinceResult, ChangedFileResult>
EdenServiceHandler::streamChangesSince(
std::unique_ptr<StreamChangesSinceParams> params) {
auto helper = INSTRUMENT_THRIFT_CALL_WITH_STAT(
DBG3, &ThriftStats::streamChangesSince, *params->mountPoint_ref());
auto mountHandle = lookupMount(params->mountPoint());
const auto& fromPosition = *params->fromPosition_ref();
auto& fetchContext = helper->getFetchContext();
// Streaming in Thrift can be done via a Stream Generator, or via a Stream
// Publisher. We're using the latter here as the former can only be used with
// coroutines which EdenFS hasn't been converted to. Generators also have the
// property to be driven by the client, internally, Thrift will wait for the
// client to have consumed an element before requesting more from the server.
// Publishers on the other hand are driven by the server and are publishing
// as fast as possible.
//
// What this means is that in the case where EdenFS can publish elements
// faster than the client can read them, EdenFS's memory usage can grow
// potentially unbounded.
checkMountGeneration(
fromPosition, mountHandle.getEdenMount(), "fromPosition"sv);
// The +1 is because the core merge stops at the item prior to
// its limitSequence parameter and we want the changes *since*
// the provided sequence number.
auto summed = mountHandle.getJournal().accumulateRange(
*fromPosition.sequenceNumber_ref() + 1);
ChangesSinceResult result;
if (!summed) {
// No changes, just return the fromPosition and an empty stream.
result.toPosition_ref() = fromPosition;
return {
std::move(result),
apache::thrift::ServerStream<ChangedFileResult>::createEmpty()};
}
if (summed->isTruncated) {
throw newEdenError(
EDOM,
EdenErrorType::JOURNAL_TRUNCATED,
"Journal entry range has been truncated.");
}
auto cancellationSource = std::make_shared<folly::CancellationSource>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<ChangedFileResult>::createPublisher(
[cancellationSource] { cancellationSource->requestCancellation(); });
auto sharedPublisherLock = std::make_shared<
folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>>(
ThriftStreamPublisherOwner{std::move(publisher)});
RootIdCodec& rootIdCodec = mountHandle.getObjectStore();
JournalPosition toPosition;
toPosition.mountGeneration_ref() =
mountHandle.getEdenMount().getMountGeneration();
toPosition.sequenceNumber_ref() = summed->toSequence;
toPosition.snapshotHash_ref() =
rootIdCodec.renderRootId(summed->snapshotTransitions.back());
result.toPosition_ref() = toPosition;
sumUncommitedChanges(*summed, *sharedPublisherLock, std::nullopt);
if (summed->snapshotTransitions.size() > 1) {
auto callback =
std::make_shared<StreamingDiffCallback>(sharedPublisherLock);
std::vector<ImmediateFuture<folly::Unit>> futures;
for (auto rootIt = summed->snapshotTransitions.begin();
std::next(rootIt) != summed->snapshotTransitions.end();
++rootIt) {
const auto& from = *rootIt;
const auto& to = *(rootIt + 1);
// We want to make sure the diff is performed on a background thread so
// the Thrift client can interrupt us whenever desired. To do this, let's
// start from an not ready ImmediateFuture.
futures.push_back(makeNotReadyImmediateFuture().thenValue(
[from,
to,
mountHandle,
token = cancellationSource->getToken(),
fetchContext = fetchContext.copy(),
callback = callback.get()](auto&&) {
return diffBetweenRoots(
from,
to,
*mountHandle.getEdenMount().getCheckoutConfig(),
mountHandle.getObjectStorePtr(),
token,
fetchContext,
callback);
}));
}
folly::futures::detachOn(
server_->getServerState()->getThreadPool().get(),
collectAllSafe(std::move(futures))
// Make sure that the edenMount, callback, helper and
// cancellationSource lives for the duration of the stream by
// copying them.
.thenTry(
[mountHandle,
sharedPublisherLock,
callback = std::move(callback),
helper = std::move(helper),
cancellationSource](
folly::Try<std::vector<folly::Unit>>&& result) mutable {
if (result.hasException()) {
auto sharedPublisher =
std::move(*sharedPublisherLock->wlock());
std::move(sharedPublisher)
.next(newEdenError(std::move(result).exception()));
}
})
.semi());
}
return {std::move(result), std::move(serverStream)};
}
apache::thrift::ResponseAndServerStream<ChangesSinceResult, ChangedFileResult>
EdenServiceHandler::streamSelectedChangesSince(
std::unique_ptr<StreamSelectedChangesSinceParams> params) {
auto helper = INSTRUMENT_THRIFT_CALL_WITH_STAT(
DBG3,
&ThriftStats::streamSelectedChangesSince,
*params->changesParams_ref()->mountPoint_ref());
auto mountHandle = lookupMount(params->changesParams()->get_mountPoint());
const auto& fromPosition = *params->changesParams()->fromPosition_ref();
auto& fetchContext = helper->getFetchContext();
checkMountGeneration(
fromPosition, mountHandle.getEdenMount(), "fromPosition"sv);
auto summed = mountHandle.getJournal().accumulateRange(
*fromPosition.sequenceNumber_ref() + 1);
ChangesSinceResult result;
if (!summed) {
// No changes, just return the fromPosition and an empty stream.
result.toPosition_ref() = fromPosition;
return {
std::move(result),
apache::thrift::ServerStream<ChangedFileResult>::createEmpty()};
}
if (summed->isTruncated) {
throw newEdenError(
EDOM,
EdenErrorType::JOURNAL_TRUNCATED,
"Journal entry range has been truncated.");
}
auto cancellationSource = std::make_shared<folly::CancellationSource>();
auto [serverStream, publisher] =
apache::thrift::ServerStream<ChangedFileResult>::createPublisher(
[cancellationSource] { cancellationSource->requestCancellation(); });
auto sharedPublisherLock = std::make_shared<
folly::Synchronized<ThriftStreamPublisherOwner<ChangedFileResult>>>(
ThriftStreamPublisherOwner{std::move(publisher)});
RootIdCodec& rootIdCodec = mountHandle.getObjectStore();
JournalPosition toPosition;
toPosition.mountGeneration_ref() =
mountHandle.getEdenMount().getMountGeneration();
toPosition.sequenceNumber_ref() = summed->toSequence;
toPosition.snapshotHash_ref() =
rootIdCodec.renderRootId(summed->snapshotTransitions.back());
result.toPosition_ref() = toPosition;
auto caseSensitivity =
mountHandle.getEdenMount().getCheckoutConfig()->getCaseSensitive();
auto filter =
std::make_unique<GlobFilter>(params->get_globs(), caseSensitivity);
sumUncommitedChanges(
*summed, *sharedPublisherLock, std::reference_wrapper(*filter));
if (summed->snapshotTransitions.size() > 1) {
// create filtered backing store
std::shared_ptr<FilteredBackingStore> backingStore =
std::make_shared<FilteredBackingStore>(
mountHandle.getEdenMountPtr()->getObjectStore()->getBackingStore(),
std::move(filter));
// pass filtered backing store to object store
auto objectStore = ObjectStore::create(
backingStore,
server_->getLocalStore(),
server_->getTreeCache(),
server_->getServerState()->getStats().copy(),
server_->getServerState()->getProcessInfoCache(),
server_->getServerState()->getStructuredLogger(),
server_->getServerState()->getEdenConfig(),
mountHandle.getEdenMount()
.getCheckoutConfig()
->getEnableWindowsSymlinks(),
caseSensitivity);
auto callback =
std::make_shared<StreamingDiffCallback>(sharedPublisherLock);
std::vector<ImmediateFuture<folly::Unit>> futures;
// now iterate all commits
for (auto rootIt = summed->snapshotTransitions.begin();
std::next(rootIt) != summed->snapshotTransitions.end();
++rootIt) {
const auto from =
backingStore->createFilteredRootId(rootIt->value(), rootIt->value());
const auto& toRootId = *(rootIt + 1);
const auto to = backingStore->createFilteredRootId(
toRootId.value(), toRootId.value());
futures.push_back(makeNotReadyImmediateFuture().thenValue(
[from,
to,
mountHandle,
objectStore,
token = cancellationSource->getToken(),
fetchContext = fetchContext.copy(),
callback = callback.get()](auto&&) {
return diffBetweenRoots(
RootId{from},
RootId{to},
*mountHandle.getEdenMount().getCheckoutConfig(),
objectStore,
token,
fetchContext,
callback);
}));
}
folly::futures::detachOn(
server_->getServerState()->getThreadPool().get(),
collectAllSafe(std::move(futures))
.thenTry(
[mountHandle,
sharedPublisherLock,
callback = std::move(callback),
helper = std::move(helper),
cancellationSource](
folly::Try<std::vector<folly::Unit>>&& result) mutable {
if (result.hasException()) {
auto sharedPublisher =
std::move(*sharedPublisherLock->wlock());
std::move(sharedPublisher)
.next(newEdenError(std::move(result).exception()));
}
})
.semi());
}
return {std::move(result), std::move(serverStream)};
}
void EdenServiceHandler::getFilesChangedSince(
FileDelta& out,
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<JournalPosition> fromPosition) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
checkMountGeneration(
*fromPosition, mountHandle.getEdenMount(), "fromPosition"sv);
// The +1 is because the core merge stops at the item prior to
// its limitSequence parameter and we want the changes *since*
// the provided sequence number.
auto summed = mountHandle.getJournal().accumulateRange(
*fromPosition->sequenceNumber_ref() + 1);
// We set the default toPosition to be where we where if summed is null
out.toPosition_ref()->sequenceNumber_ref() =
*fromPosition->sequenceNumber_ref();
out.toPosition_ref()->snapshotHash_ref() = *fromPosition->snapshotHash_ref();
out.toPosition_ref()->mountGeneration_ref() =
mountHandle.getEdenMount().getMountGeneration();
out.fromPosition_ref() = *out.toPosition_ref();
if (summed) {
if (summed->isTruncated) {
throw newEdenError(
EDOM,
EdenErrorType::JOURNAL_TRUNCATED,
"Journal entry range has been truncated.");
}
RootIdCodec& rootIdCodec = mountHandle.getObjectStore();
out.toPosition_ref()->sequenceNumber_ref() = summed->toSequence;
out.toPosition_ref()->snapshotHash_ref() =
rootIdCodec.renderRootId(summed->snapshotTransitions.back());
out.toPosition_ref()->mountGeneration_ref() =
mountHandle.getEdenMount().getMountGeneration();
out.fromPosition_ref()->sequenceNumber_ref() = summed->fromSequence;
out.fromPosition_ref()->snapshotHash_ref() =
rootIdCodec.renderRootId(summed->snapshotTransitions.front());
out.fromPosition_ref()->mountGeneration_ref() =
*out.toPosition_ref()->mountGeneration_ref();
for (const auto& entry : summed->changedFilesInOverlay) {
auto& path = entry.first;
auto& changeInfo = entry.second;
if (changeInfo.isNew()) {
out.createdPaths_ref()->emplace_back(path.asString());
} else {
out.changedPaths_ref()->emplace_back(path.asString());
}
}
for (auto& path : summed->uncleanPaths) {
out.uncleanPaths_ref()->emplace_back(path.asString());
}
out.snapshotTransitions_ref()->reserve(summed->snapshotTransitions.size());
for (auto& hash : summed->snapshotTransitions) {
out.snapshotTransitions_ref()->push_back(rootIdCodec.renderRootId(hash));
}
}
}
void EdenServiceHandler::setJournalMemoryLimit(
std::unique_ptr<PathString> mountPoint,
int64_t limit) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
if (limit < 0) {
throw newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"memory limit must be non-negative");
}
mountHandle.getJournal().setMemoryLimit(static_cast<size_t>(limit));
}
int64_t EdenServiceHandler::getJournalMemoryLimit(
std::unique_ptr<PathString> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
return static_cast<int64_t>(mountHandle.getJournal().getMemoryLimit());
}
void EdenServiceHandler::flushJournal(std::unique_ptr<PathString> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *mountPoint);
auto mountHandle = lookupMount(mountPoint);
mountHandle.getJournal().flush();
}
void EdenServiceHandler::debugGetRawJournal(
DebugGetRawJournalResponse& out,
std::unique_ptr<DebugGetRawJournalParams> params) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *params->mountPoint());
auto mountHandle = lookupMount(params->mountPoint());
auto mountGeneration =
static_cast<ssize_t>(mountHandle.getEdenMount().getMountGeneration());
std::optional<size_t> limitopt = std::nullopt;
if (auto limit = params->limit_ref()) {
limitopt = static_cast<size_t>(*limit);
}
out.allDeltas_ref() = mountHandle.getJournal().getDebugRawJournalInfo(
*params->fromSequenceNumber_ref(),
limitopt,
mountGeneration,
mountHandle.getObjectStore());
}
folly::SemiFuture<std::unique_ptr<std::vector<EntryInformationOrError>>>
EdenServiceHandler::semifuture_getEntryInformation(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::vector<std::string>> paths,
std::unique_ptr<SyncBehavior> sync) {
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, *mountPoint, getSyncTimeout(*sync), toLogArg(*paths));
auto mountHandle = lookupMount(mountPoint);
auto& fetchContext = helper->getFetchContext();
return wrapImmediateFuture(
std::move(helper),
waitForPendingWrites(mountHandle.getEdenMount(), *sync)
.thenValue([mountHandle,
paths = std::move(paths),
fetchContext = fetchContext.copy()](auto&&) {
bool windowsSymlinksEnabled =
mountHandle.getEdenMount()
.getCheckoutConfig()
->getEnableWindowsSymlinks();
return applyToVirtualInode(
mountHandle.getRootInode(),
*paths,
[windowsSymlinksEnabled](
const VirtualInode& inode, RelativePath) {
return filteredEntryDtype(
inode.getDtype(), windowsSymlinksEnabled);
},
mountHandle.getObjectStorePtr(),
fetchContext);
})
.thenValue([](vector<Try<dtype_t>> done) {
auto out =
std::make_unique<vector<EntryInformationOrError>>();
out->reserve(done.size());
for (auto& item : done) {
EntryInformationOrError result;
if (item.hasException()) {
result.error_ref() = newEdenError(item.exception());
} else {
EntryInformation info;
info.dtype_ref() = static_cast<Dtype>(item.value());
result.info_ref() = info;
}
out->emplace_back(std::move(result));
}
return out;
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<std::vector<FileInformationOrError>>>
EdenServiceHandler::semifuture_getFileInformation(
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::vector<std::string>> paths,
std::unique_ptr<SyncBehavior> sync) {
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, *mountPoint, getSyncTimeout(*sync), toLogArg(*paths));
auto mountHandle = lookupMount(mountPoint);
auto& fetchContext = helper->getFetchContext();
auto lastCheckoutTime =
mountHandle.getEdenMount().getLastCheckoutTime().toTimespec();
return wrapImmediateFuture(
std::move(helper),
waitForPendingWrites(mountHandle.getEdenMount(), *sync)
.thenValue([mountHandle,
paths = std::move(paths),
lastCheckoutTime,
fetchContext = fetchContext.copy()](auto&&) {
return applyToVirtualInode(
mountHandle.getRootInode(),
*paths,
[mountHandle,
lastCheckoutTime,
fetchContext = fetchContext.copy()](
const VirtualInode& inode, RelativePath) {
return inode
.stat(
lastCheckoutTime,
mountHandle.getObjectStorePtr(),
fetchContext)
.thenValue([](struct stat st) {
FileInformation info;
info.size_ref() = st.st_size;
auto ts = stMtime(st);
info.mtime_ref()->seconds_ref() = ts.tv_sec;
info.mtime_ref()->nanoSeconds_ref() = ts.tv_nsec;
info.mode_ref() = st.st_mode;
FileInformationOrError result;
result.info_ref() = info;
return result;
})
.semi();
},
mountHandle.getObjectStorePtr(),
fetchContext);
})
.thenValue([](vector<Try<FileInformationOrError>>&& done) {
auto out =
std::make_unique<vector<FileInformationOrError>>();
out->reserve(done.size());
for (auto& item : done) {
if (item.hasException()) {
FileInformationOrError result;
result.error_ref() = newEdenError(item.exception());
out->emplace_back(std::move(result));
} else {
out->emplace_back(item.value());
}
}
return out;
}))
.ensure([mountHandle] {})
.semi();
}
namespace {
SourceControlType entryTypeToThriftType(std::optional<TreeEntryType> type) {
if (!type.has_value()) {
return SourceControlType::UNKNOWN;
}
switch (type.value()) {
case TreeEntryType::TREE:
return SourceControlType::TREE;
case TreeEntryType::REGULAR_FILE:
return SourceControlType::REGULAR_FILE;
case TreeEntryType::EXECUTABLE_FILE:
return SourceControlType::EXECUTABLE_FILE;
case TreeEntryType::SYMLINK:
return SourceControlType::SYMLINK;
default:
throw std::system_error(EINVAL, std::generic_category());
}
}
ImmediateFuture<
std::vector<std::pair<PathComponent, folly::Try<EntryAttributes>>>>
getAllEntryAttributes(
EntryAttributeFlags requestedAttributes,
const EdenMount& edenMount,
std::string path,
const ObjectFetchContextPtr& fetchContext,
bool shouldFetchTreeMetadata) {
auto virtualInode =
edenMount.getVirtualInode(RelativePathPiece{path}, fetchContext);
return std::move(virtualInode)
.thenValue([path = std::move(path),
requestedAttributes,
objectStore = edenMount.getObjectStore(),
fetchContext = fetchContext.copy(),
shouldFetchTreeMetadata](VirtualInode tree) mutable {
if (!tree.isDirectory()) {
return ImmediateFuture<std::vector<
std::pair<PathComponent, folly::Try<EntryAttributes>>>>(
newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
fmt::format("{}: path must be a directory", path)));
}
return tree.getChildrenAttributes(
requestedAttributes,
RelativePath{path},
objectStore,
fetchContext,
shouldFetchTreeMetadata);
});
}
template <typename SerializedT, typename T>
bool fillErrorRef(
SerializedT& result,
std::optional<folly::Try<T>> rawResult,
folly::StringPiece path,
folly::StringPiece attributeName) {
if (!rawResult.has_value()) {
result.error_ref() = newEdenError(
EdenErrorType::GENERIC_ERROR,
fmt::format(
"{}: {} requested, but no {} available",
path,
attributeName,
attributeName));
return true;
}
if (rawResult.value().hasException()) {
result.error_ref() = newEdenError(rawResult.value().exception());
return true;
}
return false;
}
FileAttributeDataOrErrorV2 serializeEntryAttributes(
ObjectStore& objectStore,
folly::StringPiece entryPath,
const folly::Try<EntryAttributes>& attributes,
EntryAttributeFlags requestedAttributes) {
FileAttributeDataOrErrorV2 fileResult;
if (attributes.hasException()) {
fileResult.error_ref() = newEdenError(attributes.exception());
return fileResult;
}
FileAttributeDataV2 fileData;
if (requestedAttributes.contains(ENTRY_ATTRIBUTE_SHA1)) {
Sha1OrError sha1;
if (!fillErrorRef(sha1, attributes->sha1, entryPath, "sha1")) {
sha1.sha1_ref() = thriftHash20(attributes->sha1.value().value());
}
fileData.sha1() = std::move(sha1);
}
if (requestedAttributes.contains(ENTRY_ATTRIBUTE_BLAKE3)) {
Blake3OrError blake3;
if (!fillErrorRef(blake3, attributes->blake3, entryPath, "blake3")) {
blake3.blake3_ref() = thriftHash32(attributes->blake3.value().value());
}
fileData.blake3() = std::move(blake3);
}
if (requestedAttributes.contains(ENTRY_ATTRIBUTE_SIZE)) {
SizeOrError size;
if (!fillErrorRef(size, attributes->size, entryPath, "size")) {
size.size_ref() = attributes->size.value().value();
}
fileData.size() = std::move(size);
}
if (requestedAttributes.contains(ENTRY_ATTRIBUTE_SOURCE_CONTROL_TYPE)) {
SourceControlTypeOrError type;
if (!fillErrorRef(type, attributes->type, entryPath, "type")) {
type.sourceControlType_ref() =
entryTypeToThriftType(attributes->type.value().value());
}
fileData.sourceControlType() = std::move(type);
}
if (requestedAttributes.contains(ENTRY_ATTRIBUTE_OBJECT_ID)) {
ObjectIdOrError objectId;
if (!fillErrorRef(objectId, attributes->objectId, entryPath, "objectid")) {
const std::optional<ObjectId>& oid = attributes->objectId.value().value();
if (oid) {
objectId.objectId_ref() = objectStore.renderObjectId(*oid);
}
}
fileData.objectId() = std::move(objectId);
}
fileResult.fileAttributeData_ref() = fileData;
return fileResult;
}
DirListAttributeDataOrError serializeEntryAttributes(
ObjectStore& objectStore,
const folly::Try<
std::vector<std::pair<PathComponent, folly::Try<EntryAttributes>>>>&
entries,
EntryAttributeFlags requestedAttributes) {
DirListAttributeDataOrError result;
if (entries.hasException()) {
result.error_ref() = newEdenError(*entries.exception().get_exception());
return result;
}
std::map<std::string, FileAttributeDataOrErrorV2> thriftEntryResult;
for (auto& [path_component, attributes] : entries.value()) {
thriftEntryResult.emplace(
path_component.asString(),
serializeEntryAttributes(
objectStore,
path_component.piece().view(),
attributes,
requestedAttributes));
}
result.dirListAttributeData_ref() = std::move(thriftEntryResult);
return result;
}
} // namespace
folly::SemiFuture<std::unique_ptr<ReaddirResult>>
EdenServiceHandler::semifuture_readdir(std::unique_ptr<ReaddirParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
auto paths = *params->directoryPaths();
// Get requested attributes for each path
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3,
*params->mountPoint(),
getSyncTimeout(*params->sync()),
toLogArg(paths));
auto& fetchContext = helper->getFetchContext();
auto requestedAttributes =
EntryAttributeFlags::raw(*params->requestedAttributes());
auto shouldFetchTreeMetadata = server_->getServerState()
->getEdenConfig()
->shouldFetchTreeMetadata.getValue();
return wrapImmediateFuture(
std::move(helper),
waitForPendingWrites(mountHandle.getEdenMount(), *params->sync())
.thenValue(
[mountHandle,
requestedAttributes,
paths = std::move(paths),
fetchContext = fetchContext.copy(),
shouldFetchTreeMetadata](auto&&) mutable
-> ImmediateFuture<
std::vector<DirListAttributeDataOrError>> {
std::vector<ImmediateFuture<DirListAttributeDataOrError>>
futures;
futures.reserve(paths.size());
for (auto& path : paths) {
futures.emplace_back(
getAllEntryAttributes(
requestedAttributes,
mountHandle.getEdenMount(),
std::move(path),
fetchContext,
shouldFetchTreeMetadata)
.thenTry([requestedAttributes, mountHandle](
folly::Try<std::vector<std::pair<
PathComponent,
folly::Try<EntryAttributes>>>>
entries) {
return serializeEntryAttributes(
mountHandle.getObjectStore(),
entries,
requestedAttributes);
})
);
}
// Collect all futures into a single tuple
return facebook::eden::collectAllSafe(
std::move(futures));
})
.thenValue(
[](std::vector<DirListAttributeDataOrError>&& allRes)
-> std::unique_ptr<ReaddirResult> {
auto res = std::make_unique<ReaddirResult>();
res->dirLists() = std::move(allRes);
return res;
})
.ensure([mountHandle] {}))
.semi();
}
ImmediateFuture<std::vector<folly::Try<EntryAttributes>>>
EdenServiceHandler::getEntryAttributes(
const EdenMount& edenMount,
const std::vector<std::string>& paths,
EntryAttributeFlags reqBitmask,
AttributesRequestScope reqScope,
SyncBehavior sync,
const ObjectFetchContextPtr& fetchContext,
bool shouldFetchTreeMetadata) {
return waitForPendingWrites(edenMount, sync)
.thenValue([this,
&edenMount,
&paths,
fetchContext = fetchContext.copy(),
reqBitmask,
reqScope,
shouldFetchTreeMetadata](auto&&) mutable {
vector<ImmediateFuture<EntryAttributes>> futures;
for (const auto& path : paths) {
futures.emplace_back(getEntryAttributesForPath(
edenMount,
reqBitmask,
reqScope,
path,
fetchContext,
shouldFetchTreeMetadata));
}
// Collect all futures into a single tuple
return facebook::eden::collectAll(std::move(futures));
});
}
namespace {
bool dtypeMatchesRequestScope(
VirtualInode inode,
AttributesRequestScope reqScope) {
if (reqScope == AttributesRequestScope::TREES_AND_FILES) {
return true;
}
if (inode.isDirectory()) {
return reqScope == AttributesRequestScope::TREES;
} else {
return reqScope == AttributesRequestScope::FILES;
}
}
} // namespace
ImmediateFuture<EntryAttributes> EdenServiceHandler::getEntryAttributesForPath(
const EdenMount& edenMount,
EntryAttributeFlags reqBitmask,
AttributesRequestScope reqScope,
std::string_view path,
const ObjectFetchContextPtr& fetchContext,
bool shouldFetchTreeMetadata) {
if (path.empty()) {
return ImmediateFuture<EntryAttributes>(newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"path cannot be the empty string"));
}
try {
RelativePathPiece relativePath{path};
return edenMount.getVirtualInode(relativePath, fetchContext)
.thenValue([&edenMount,
reqBitmask,
reqScope,
relativePath = relativePath.copy(),
fetchContext = fetchContext.copy(),
shouldFetchTreeMetadata](const VirtualInode& virtualInode) {
if (dtypeMatchesRequestScope(virtualInode, reqScope)) {
return virtualInode.getEntryAttributes(
reqBitmask,
relativePath,
edenMount.getObjectStore(),
fetchContext,
shouldFetchTreeMetadata);
}
return makeImmediateFuture<EntryAttributes>(PathError(
reqScope == AttributesRequestScope::TREES ? ENOTDIR : EISDIR,
relativePath));
});
} catch (const std::exception& e) {
return ImmediateFuture<EntryAttributes>(
newEdenError(EINVAL, EdenErrorType::ARGUMENT_ERROR, e.what()));
}
}
// TODO(kmancini): we shouldn't need this for the long term, but needs to be
// updated if attributes are added.
constexpr EntryAttributeFlags kAllEntryAttributes = ENTRY_ATTRIBUTE_SIZE |
ENTRY_ATTRIBUTE_SHA1 | ENTRY_ATTRIBUTE_SOURCE_CONTROL_TYPE;
folly::SemiFuture<std::unique_ptr<GetAttributesFromFilesResult>>
EdenServiceHandler::semifuture_getAttributesFromFiles(
std::unique_ptr<GetAttributesFromFilesParams> params) {
if (params->scope_ref().has_value()) {
return folly::SemiFuture<
std::unique_ptr<GetAttributesFromFilesResult>>(newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"scope is not supported by legacy getAttributesFromFiles endpoint"));
}
auto mountPoint = *params->mountPoint();
auto mountPath = absolutePathFromThrift(mountPoint);
auto mountHandle = server_->getMount(mountPath);
std::vector<std::string>& paths = params->paths_ref().value();
auto reqBitmask = EntryAttributeFlags::raw(*params->requestedAttributes());
// Get requested attributes for each path
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3, mountPoint, getSyncTimeout(*params->sync()), toLogArg(paths));
auto& fetchContext = helper->getFetchContext();
// Buck2 relies on getAttributesFromFiles returning certain
// specific errors. So we need to preserve behavior of all
// ways fetching sll attributes.
// TODO(kmancini): When Buck2 migrates to our
// explicit type information, we can get shape up
// this API better.
auto config = server_->getServerState()->getEdenConfig();
auto entryAttributesFuture = getEntryAttributes(
mountHandle.getEdenMount(),
paths,
kAllEntryAttributes,
AttributesRequestScope::FILES,
*params->sync(),
fetchContext,
config->shouldFetchTreeMetadata.getValue());
return wrapImmediateFuture(
std::move(helper),
std::move(entryAttributesFuture)
.thenValue([&paths, reqBitmask](
std::vector<folly::Try<EntryAttributes>>&&
allRes) {
auto res = std::make_unique<GetAttributesFromFilesResult>();
size_t index = 0;
for (const auto& tryAttributes : allRes) {
FileAttributeDataOrError file_res;
// check for exceptions. if found, return EdenError
// early
if (tryAttributes.hasException()) {
file_res.error_ref() =
newEdenError(tryAttributes.exception());
} else { /* No exceptions, fill in data */
FileAttributeData file_data;
const auto& attributes = tryAttributes.value();
// clients rely on these top level exceptions to
// detect symlinks and directories.
// TODO(kmancini): When Buck2 migrates to our
// explicit type information, we can get shape up
// this API better.
if (!attributes.sha1.has_value()) {
file_res.error_ref() = newEdenError(
EdenErrorType::GENERIC_ERROR,
fmt::format(
"{}: sha1 requested, but no type available",
paths.at(index)));
} else if (attributes.sha1.value().hasException()) {
file_res.error_ref() =
newEdenError(attributes.sha1.value().exception());
} else if (!attributes.size.has_value()) {
file_res.error_ref() = newEdenError(
EdenErrorType::GENERIC_ERROR,
fmt::format(
"{}: size requested, but no type available",
paths.at(index)));
} else if (attributes.size.value().hasException()) {
file_res.error_ref() =
newEdenError(attributes.size.value().exception());
} else if (!attributes.type.has_value()) {
file_res.error_ref() = newEdenError(
EdenErrorType::GENERIC_ERROR,
fmt::format(
"{}: type requested, but no type available",
paths.at(index)));
} else if (attributes.type.value().hasException()) {
file_res.error_ref() =
newEdenError(attributes.type.value().exception());
} else {
// Only fill in requested fields
if (reqBitmask.contains(ENTRY_ATTRIBUTE_SHA1)) {
file_data.sha1_ref() =
thriftHash20(attributes.sha1.value().value());
}
if (reqBitmask.contains(ENTRY_ATTRIBUTE_SIZE)) {
file_data.fileSize_ref() =
attributes.size.value().value();
}
if (reqBitmask.contains(
ENTRY_ATTRIBUTE_SOURCE_CONTROL_TYPE)) {
file_data.type_ref() = entryTypeToThriftType(
attributes.type.value().value());
}
file_res.data_ref() = file_data;
}
}
res->res_ref()->emplace_back(file_res);
++index;
}
return res;
}))
.ensure([params = std::move(params), mountHandle]() {
// keeps the params memory around for the duration of the thrift call,
// so that we can safely use the paths by reference to avoid making
// copies.
})
.semi();
}
folly::SemiFuture<std::unique_ptr<GetAttributesFromFilesResultV2>>
EdenServiceHandler::semifuture_getAttributesFromFilesV2(
std::unique_ptr<GetAttributesFromFilesParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
auto reqScope =
params->scope().value_or(AttributesRequestScope::TREES_AND_FILES);
auto reqBitmask = EntryAttributeFlags::raw(*params->requestedAttributes());
std::vector<std::string>& paths = params->paths().value();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3,
*params->mountPoint(),
getSyncTimeout(*params->sync()),
toLogArg(paths));
auto& fetchContext = helper->getFetchContext();
auto config = server_->getServerState()->getEdenConfig();
auto entryAttributesFuture = getEntryAttributes(
mountHandle.getEdenMount(),
paths,
reqBitmask,
reqScope,
*params->sync(),
fetchContext,
config->shouldFetchTreeMetadata.getValue());
return wrapImmediateFuture(
std::move(helper),
std::move(entryAttributesFuture)
.thenValue(
[reqBitmask, mountHandle, &paths](
std::vector<folly::Try<EntryAttributes>>&& allRes) {
auto res =
std::make_unique<GetAttributesFromFilesResultV2>();
size_t index = 0;
for (const auto& tryAttributes : allRes) {
res->res_ref()->emplace_back(serializeEntryAttributes(
mountHandle.getObjectStore(),
basename(paths.at(index)),
tryAttributes,
reqBitmask));
++index;
}
return res;
}))
.ensure([mountHandle, params = std::move(params)]() {
// keeps the params memory around for the duration of the thrift call,
// so that we can safely use the paths by reference to avoid making
// copies.
})
.semi();
}
folly::SemiFuture<std::unique_ptr<SetPathObjectIdResult>>
EdenServiceHandler::semifuture_setPathObjectId(
std::unique_ptr<SetPathObjectIdParams> params) {
#ifndef _WIN32
auto mountHandle = lookupMount(params->mountPoint());
std::vector<SetPathObjectIdObjectAndPath> objects;
std::vector<std::string> object_strings;
auto objectSize =
params->objects().is_set() ? params->objects()->size() + 1 : 1;
objects.reserve(objectSize);
object_strings.reserve(objectSize);
// TODO deprecate non-batch fields once all clients moves to the batch fields.
// Rust clients might set to default and is_set() would return false negative
if (params->objectId().is_set() && !params->objectId()->empty()) {
SetPathObjectIdObjectAndPath objectAndPath;
objectAndPath.path = RelativePath{*params->path()};
objectAndPath.id =
mountHandle.getObjectStore().parseObjectId(*params->objectId());
objectAndPath.type = *params->type();
object_strings.emplace_back(objectAndPath.toString());
objects.emplace_back(std::move(objectAndPath));
}
for (auto& object : *params->objects()) {
SetPathObjectIdObjectAndPath objectAndPath;
objectAndPath.path = RelativePath{*object.path()};
objectAndPath.id =
mountHandle.getObjectStore().parseObjectId(object.objectId().value());
objectAndPath.type = *object.type();
object_strings.emplace_back(objectAndPath.toString());
objects.emplace_back(std::move(objectAndPath));
}
auto helper = INSTRUMENT_THRIFT_CALL(
DBG1, *params->mountPoint(), toLogArg(object_strings));
if (auto requestInfo = params->requestInfo_ref()) {
helper->getThriftFetchContext().updateRequestInfo(std::move(*requestInfo));
}
ObjectFetchContextPtr context = helper->getFetchContext().copy();
return wrapImmediateFuture(
std::move(helper),
mountHandle.getEdenMount()
.setPathsToObjectIds(
std::move(objects), (*params->mode()), context)
.thenValue([](auto&& resultAndTimes) {
return std::make_unique<SetPathObjectIdResult>(
std::move(resultAndTimes.result));
}))
.ensure([mountHandle] {})
.semi();
#else
(void)params;
NOT_IMPLEMENTED();
#endif
}
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_removeRecursively(
std::unique_ptr<RemoveRecursivelyParams> params) {
auto mountPoint = *params->mountPoint();
auto repoPath = *params->path();
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, mountPoint, repoPath);
auto mountHandle = lookupMount(mountPoint);
auto relativePath = RelativePath{repoPath};
auto& fetchContext = helper->getFetchContext();
return wrapImmediateFuture(
std::move(helper),
waitForPendingWrites(mountHandle.getEdenMount(), *params->sync())
.thenValue([mountHandle,
relativePath,
fetchContext = fetchContext.copy()](folly::Unit) {
return mountHandle.getEdenMount().getInodeSlow(
relativePath, fetchContext);
})
.thenValue([relativePath, fetchContext = fetchContext.copy()](
InodePtr inode) {
return inode->getParentRacy()->removeRecursively(
relativePath.basename(),
InvalidationRequired::Yes,
fetchContext);
}))
.ensure([mountHandle] {})
.semi();
}
namespace {
template <typename ReturnType>
ImmediateFuture<std::unique_ptr<ReturnType>> detachIfBackgrounded(
ImmediateFuture<std::unique_ptr<ReturnType>> future,
const std::shared_ptr<ServerState>& serverState,
bool background) {
if (!background) {
return future;
} else {
folly::futures::detachOn(
serverState->getThreadPool().get(), std::move(future).semi());
return ImmediateFuture<std::unique_ptr<ReturnType>>(
std::make_unique<ReturnType>());
}
}
template <typename ReturnType>
folly::SemiFuture<std::unique_ptr<ReturnType>> serialDetachIfBackgrounded(
ImmediateFuture<std::unique_ptr<ReturnType>> future,
EdenServer* const server,
bool background) {
// If we're already using serial execution across the board, just do a normal
// detachIfBackgrounded
if (server->usingThriftSerialExecution()) {
return detachIfBackgrounded(
std::move(future), server->getServerState(), background)
.semi();
}
folly::Executor::KeepAlive<> serial;
if (server->getServerState()
->getEdenConfig()
->thriftUseSmallSerialExecutor.getValue()) {
serial = folly::SmallSerialExecutor::create(
server->getServer()->getThreadManager().get());
} else {
serial = folly::SerialExecutor::create(
server->getServer()->getThreadManager().get());
}
if (background) {
folly::futures::detachOn(serial, std::move(future).semi());
future = ImmediateFuture<std::unique_ptr<ReturnType>>(
std::make_unique<ReturnType>());
}
if (future.isReady()) {
return std::move(future).semi();
}
return std::move(future).semi().via(serial);
}
ImmediateFuture<folly::Unit> detachIfBackgrounded(
ImmediateFuture<folly::Unit> future,
const std::shared_ptr<ServerState>& serverState,
bool background) {
if (!background) {
return future;
} else {
folly::futures::detachOn(
serverState->getThreadPool().get(), std::move(future).semi());
return ImmediateFuture<folly::Unit>(folly::unit);
}
}
folly::SemiFuture<folly::Unit> serialDetachIfBackgrounded(
ImmediateFuture<folly::Unit> future,
EdenServer* const server,
bool background) {
// If we're already using serial execution across the board, just do a normal
// detachIfBackgrounded
if (server->usingThriftSerialExecution()) {
return detachIfBackgrounded(
std::move(future), server->getServerState(), background)
.semi();
}
folly::Executor::KeepAlive<> serial;
if (server->getServerState()
->getEdenConfig()
->thriftUseSmallSerialExecutor.getValue()) {
serial = folly::SmallSerialExecutor::create(
server->getServer()->getThreadManager().get());
} else {
serial = folly::SerialExecutor::create(
server->getServer()->getThreadManager().get());
}
if (background) {
folly::futures::detachOn(serial, std::move(future).semi());
future = ImmediateFuture<folly::Unit>(folly::unit);
}
if (future.isReady()) {
return std::move(future).semi();
}
return std::move(future).semi().via(serial);
}
void maybeLogExpensiveGlob(
const std::vector<std::string>& globs,
const folly::StringPiece searchRoot,
const ThriftGlobImpl& globber,
const ObjectFetchContextPtr& context,
const std::shared_ptr<ServerState>& serverState) {
bool shouldLogExpensiveGlob = false;
if (searchRoot.empty()) {
for (const auto& glob : globs) {
if (string_view{glob}.starts_with("**")) {
shouldLogExpensiveGlob = true;
}
}
}
if (shouldLogExpensiveGlob) {
auto logString = globber.logString(globs);
std::string client_cmdline = "<unknown>";
if (auto clientPid = context->getClientPid()) {
// TODO: we should look up client scope here instead of command line
// since it will give move context into the overarching process or
// system producing the expensive query
const ProcessInfo* processInfoPtr = serverState->getProcessInfoCache()
->lookup(clientPid.value().get())
.get_optional();
if (processInfoPtr) {
client_cmdline = processInfoPtr->name;
std::replace(client_cmdline.begin(), client_cmdline.end(), '\0', ' ');
}
}
XLOG(WARN) << "EdenFS asked to evaluate expensive glob by caller "
<< client_cmdline << " : " << logString;
serverState->getStructuredLogger()->logEvent(
StarGlob{std::move(logString), std::move(client_cmdline)});
}
}
} // namespace
#ifndef _WIN32
namespace {
ImmediateFuture<folly::Unit> ensureMaterializedImpl(
std::shared_ptr<EdenMount> edenMount,
const std::vector<std::string>& repoPaths,
std::unique_ptr<ThriftRequestScope> helper,
bool followSymlink) {
std::vector<ImmediateFuture<folly::Unit>> futures;
futures.reserve(repoPaths.size());
auto& fetchContext = helper->getFetchContext();
for (auto& path : repoPaths) {
futures.emplace_back(
makeNotReadyImmediateFuture()
.thenValue([edenMount = edenMount.get(),
path = RelativePath{path},
fetchContext = fetchContext.copy()](auto&&) {
return edenMount->getInodeSlow(path, fetchContext);
})
.thenValue([fetchContext = fetchContext.copy(),
followSymlink](InodePtr inode) {
return inode->ensureMaterialized(fetchContext, followSymlink)
.ensure([inode]() {});
}));
}
return wrapImmediateFuture(
std::move(helper), collectAll(std::move(futures)).unit());
}
} // namespace
#endif
folly::SemiFuture<folly::Unit>
EdenServiceHandler::semifuture_ensureMaterialized(
std::unique_ptr<EnsureMaterializedParams> params) {
#ifndef _WIN32
auto mountPoint = *params->mountPoint();
auto helper =
INSTRUMENT_THRIFT_CALL(DBG4, mountPoint, toLogArg(*params->paths()));
auto mountHandle = lookupMount(mountPoint);
// The background mode is not fully running on background, instead, it will
// start to load inodes in a blocking way, and then collect unready
// materialization process then throws to the background. This is most
// effecient way for the local execution of virtualized buck-out as avoid
// cache exchange by materializing smaller random reads, and not prevent
// execution starting by read large files on the background.
bool background = *params->background();
auto waitForPendingWritesFuture =
waitForPendingWrites(mountHandle.getEdenMount(), *params->sync());
auto ensureMaterializedFuture =
std::move(waitForPendingWritesFuture)
.thenValue([params = std::move(params),
mountHandle,
helper = std::move(helper)](auto&&) mutable {
return ensureMaterializedImpl(
mountHandle.getEdenMountPtr(),
(*params->paths()),
std::move(helper),
(*params->followSymlink()));
})
.ensure([mountHandle] {})
.semi();
if (background) {
folly::futures::detachOn(
server_->getServerState()->getThreadPool().get(),
std::move(ensureMaterializedFuture));
return folly::unit;
} else {
return ensureMaterializedFuture;
}
#else
(void)params;
NOT_IMPLEMENTED();
#endif
}
folly::SemiFuture<std::unique_ptr<Glob>>
EdenServiceHandler::semifuture_predictiveGlobFiles(
std::unique_ptr<GlobParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
if (!params->revisions_ref().value().empty()) {
params->revisions_ref() = resolveRootsWithLastFilter(
params->revisions_ref().value(), mountHandle);
}
ThriftGlobImpl globber{*params};
auto helper =
INSTRUMENT_THRIFT_CALL(DBG3, *params->mountPoint(), globber.logString());
/* set predictive glob fetch parameters */
// if numResults is not specified, use default predictivePrefetchProfileSize
auto& serverState = server_->getServerState();
auto numResults =
serverState->getEdenConfig()->predictivePrefetchProfileSize.getValue();
// if user is not specified, get user info from the server state
auto user = folly::StringPiece{serverState->getUserInfo().getUsername()};
auto backingStore = mountHandle.getObjectStore().getBackingStore();
// if repo is not specified, get repository name from the backingstore
auto repo_optional = backingStore->getRepoName();
if (repo_optional == std::nullopt) {
// typeid() does not evaluate expressions
auto& r = *backingStore.get();
throw std::runtime_error(folly::to<std::string>(
"mount must use SaplingBackingStore, type is ", typeid(r).name()));
}
auto repo = repo_optional.value();
auto os = getOperatingSystemName();
// sandcastleAlias, startTime, and endTime are optional parameters
std::optional<std::string> sandcastleAlias;
std::optional<uint64_t> startTime;
std::optional<uint64_t> endTime;
// check if this is a sandcastle job (getenv will return nullptr if the env
// variable is not set)
auto scAliasEnv = std::getenv("SANDCASTLE_ALIAS");
sandcastleAlias = scAliasEnv ? std::make_optional(std::string(scAliasEnv))
: sandcastleAlias;
// check specified predictive parameters
const auto& predictiveGlob = params->predictiveGlob_ref();
if (predictiveGlob.has_value()) {
numResults = predictiveGlob->numTopDirectories_ref().value_or(numResults);
user = predictiveGlob->user_ref().has_value()
? predictiveGlob->user_ref().value()
: user;
repo = predictiveGlob->repo_ref().has_value()
? predictiveGlob->repo_ref().value()
: repo;
os = predictiveGlob->os_ref().has_value() ? predictiveGlob->os_ref().value()
: os;
startTime = predictiveGlob->startTime_ref().has_value()
? predictiveGlob->startTime_ref().value()
: startTime;
endTime = predictiveGlob->endTime_ref().has_value()
? predictiveGlob->endTime_ref().value()
: endTime;
}
auto& fetchContext = helper->getPrefetchFetchContext();
bool isBackground = *params->background();
auto future =
ImmediateFuture{
usageService_->getTopUsedDirs(
user, repo, numResults, os, startTime, endTime, sandcastleAlias)}
.thenValue([globber = std::move(globber),
mountHandle,
serverState,
fetchContext = fetchContext.copy()](
std::vector<std::string>&& globs) mutable {
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
globs,
fetchContext);
})
.thenTry([mountHandle,
params = std::move(params),
helper = std::move(helper)](
folly::Try<std::unique_ptr<Glob>> tryGlob) {
if (tryGlob.hasException()) {
auto& ew = tryGlob.exception();
XLOG(ERR) << "Error fetching predictive file globs: "
<< folly::exceptionStr(ew);
}
return tryGlob;
});
if (server_->getServerState()
->getEdenConfig()
->runSerialPrefetch.getValue()) {
// The glob code has a very large fan-out that can easily overload the
// Thrift CPU worker pool. To combat with that, we limit the execution to a
// single thread by using `folly::SerialExecutor` so the glob queries will
// not overload the executor.
return serialDetachIfBackgrounded<Glob>(
std::move(future), server_, isBackground);
} else {
return detachIfBackgrounded<Glob>(
std::move(future), serverState, isBackground)
.semi();
}
}
folly::SemiFuture<std::unique_ptr<Glob>>
EdenServiceHandler::semifuture_globFiles(std::unique_ptr<GlobParams> params) {
TaskTraceBlock block{"EdenServiceHandler::globFiles"};
auto mountHandle = lookupMount(params->mountPoint());
if (!params->revisions_ref().value().empty()) {
params->revisions_ref() = resolveRootsWithLastFilter(
params->revisions_ref().value(), mountHandle);
}
ThriftGlobImpl globber{*params};
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3,
*params->mountPoint_ref(),
toLogArg(*params->globs_ref()),
globber.logString());
auto& context = helper->getFetchContext();
auto isBackground = *params->background();
ImmediateFuture<folly::Unit> backgroundFuture{std::in_place};
if (isBackground) {
backgroundFuture = makeNotReadyImmediateFuture();
}
maybeLogExpensiveGlob(
*params->globs(),
*params->searchRoot_ref(),
globber,
context,
server_->getServerState());
std::unique_ptr<SuffixGlobRequestScope> suffixGlobRequestScope;
auto edenConfig = server_->getServerState()->getEdenConfig();
ImmediateFuture<unique_ptr<Glob>> globFut{std::in_place};
// Offload suffix queries to EdenAPI
bool useSaplingRemoteAPISuffixes = shouldUseSaplingRemoteAPI(
edenConfig->enableEdenAPISuffixQuery.getValue(), *params);
// Matches **/*.suffix
// Captures the .suffix
static const re2::RE2 suffixRegex("\\*\\*/\\*(\\.[A-z0-9]+)");
std::vector<std::string> suffixGlobs;
std::vector<std::string> nonSuffixGlobs;
// Copying to new vectors, since we want to keep the original around
// in case we need to fall back to the legacy pathway
for (const auto& glob : *params->globs_ref()) {
std::string capture;
if (re2::RE2::FullMatch(glob, suffixRegex, &capture)) {
suffixGlobs.push_back(capture);
} else {
nonSuffixGlobs.push_back(glob);
}
}
bool requestIsOffloadable = !suffixGlobs.empty() && nonSuffixGlobs.empty() &&
(((*params->searchRoot()).empty()) || (*params->searchRoot() == "."));
auto globFilesRequestScope = std::make_shared<GlobFilesRequestScope>(
server_->getServerState(),
requestIsOffloadable,
globber.logString(*params->globs()),
context);
if (requestIsOffloadable) {
XLOG(DBG5)
<< "globFiles request is only suffix globs, can be offloaded to EdenAPI";
auto suffixGlobLogString = globber.logString(suffixGlobs);
suffixGlobRequestScope = std::make_unique<SuffixGlobRequestScope>(
suffixGlobLogString,
server_->getServerState(),
!useSaplingRemoteAPISuffixes,
context);
}
if (useSaplingRemoteAPISuffixes) {
if (requestIsOffloadable) {
XLOG(DBG5) << "globFiles request offloaded to EdenAPI";
// Only use BSSM if there are only suffix queries
globFilesRequestScope->setLocal(false);
// Attempt to resolve all EdenAPI futures. If any of
// them result in an error we will fall back to local lookup
auto combinedFuture =
std::move(backgroundFuture)
.thenValue([revisions = params->revisions_ref().value(),
mountHandle,
suffixGlobs = std::move(suffixGlobs),
serverState = server_->getServerState(),
includeDotfiles = *params->includeDotfiles(),
context = context.copy()](auto&&) mutable {
auto& store = mountHandle.getObjectStore();
const auto& edenMount = mountHandle.getEdenMountPtr();
const auto& rootInode = mountHandle.getRootInode();
if (revisions.empty()) {
return getLocalGlobResults(
edenMount,
serverState,
includeDotfiles,
suffixGlobs,
rootInode,
context);
}
std::vector<ImmediateFuture<BackingStore::GetGlobFilesResult>>
globFilesResultFutures;
for (auto& id : revisions) {
// ID is either a 20b binary hash or a 40b human readable
// text version globFiles takes as input the human readable
// version, so convert using the store's parse method
globFilesResultFutures.push_back(store.getGlobFiles(
store.parseRootId(id), suffixGlobs, context));
}
return collectAllSafe(std::move(globFilesResultFutures));
});
globFut =
std::move(combinedFuture)
.thenValue([mountHandle,
rootInode = mountHandle.getRootInode(),
wantDtype = params->wantDtype_ref().value(),
includeDotfiles =
params->includeDotfiles_ref().value(),
&context](auto&& globResults) mutable {
auto edenMount = mountHandle.getEdenMountPtr();
std::vector<ImmediateFuture<GlobEntry>> globEntryFuts;
for (auto& glob : globResults) {
std::string originHash =
mountHandle.getObjectStore().renderRootId(glob.rootId);
for (auto& entry : glob.globFiles) {
if (!includeDotfiles) {
bool skip_due_to_dotfile = false;
auto rp = RelativePath(std::string_view{entry});
for (auto component : rp.components()) {
// Use facebook::eden string_view
if (string_view{component.view()}.starts_with(".")) {
XLOG(DBG5) << "Skipping dotfile: " << component.view()
<< " in " << entry;
skip_due_to_dotfile = true;
break;
}
}
if (skip_due_to_dotfile) {
continue;
}
}
if (wantDtype) {
ImmediateFuture<GlobEntry> entryFuture{std::in_place};
if (glob.isLocal) {
entryFuture =
rootInode
->getChildRecursive(
RelativePathPiece{entry}, context)
.thenValue([entry, originHash](
InodePtr child) mutable {
return ImmediateFuture<GlobEntry>{GlobEntry{
std::move(entry),
static_cast<OsDtype>(child->getType()),
std::move(originHash)}};
});
} else {
// TODO(T192408118) get the root tree a single time per
// glob instead of per-entry
entryFuture =
edenMount->getObjectStore()
->getRootTree(
std::move(glob.rootId), context.copy())
.thenValue([entry,
edenMount,
context = context.copy()](
auto&& tree) mutable {
auto stringPiece = folly::StringPiece{entry};
return ::facebook::eden::getTreeOrTreeEntry(
std::move(tree.tree),
RelativePath{stringPiece},
edenMount->getObjectStore(),
std::move(context));
})
.thenValue([entry, originHash](
auto&& treeEntry) mutable {
if (TreeEntry* treeEntryPtr =
std::get_if<TreeEntry>(&treeEntry)) {
auto dtype = treeEntryPtr->getDtype();
return ImmediateFuture<GlobEntry>{GlobEntry{
std::move(entry),
static_cast<OsDtype>(dtype),
std::move(originHash)}};
} else {
EDEN_BUG()
<< "Received a Tree when expecting TreeEntry for path "
<< entry;
}
});
}
globEntryFuts.emplace_back(
std::move(entryFuture)
.thenError([entry,
originHash,
isLocal = glob.isLocal](
const folly::exception_wrapper&
ex) mutable {
XLOGF(
ERR,
"Error for getting file dtypes for {} file {}: {}",
isLocal ? "local" : "remote",
entry,
ex.what());
return ImmediateFuture<GlobEntry>{GlobEntry{
std::move(entry),
DT_UNKNOWN,
std::move(originHash)}};
}));
} else {
globEntryFuts.emplace_back(ImmediateFuture<GlobEntry>{
folly::Try<GlobEntry>{GlobEntry{
std::move(entry), DT_UNKNOWN, originHash}}});
}
}
}
return collectAllSafe(std::move(globEntryFuts))
.thenValue([wantDtype](auto&& globEntries) {
XLOG(DBG5) << "Building Glob";
auto glob = std::make_unique<Glob>();
std::sort(
globEntries.begin(),
globEntries.end(),
[](GlobEntry a, GlobEntry b) {
return a.file < b.file;
});
for (GlobEntry& globEntry : globEntries) {
glob->matchingFiles_ref().value().emplace_back(
globEntry.file);
if (wantDtype) {
// This can happen if a file is missing on disk but
// exists on the server. Instead of silently failing
// use the local lookup.
if (globEntry.dType == DT_UNKNOWN) {
throw newEdenError(
ENOENT,
EdenErrorType::POSIX_ERROR,
"could not get Dtype for file ",
globEntry.file);
}
glob->dtypes_ref().value().emplace_back(
globEntry.dType);
}
glob->originHashes_ref().value().emplace_back(
globEntry.originHash);
}
XLOG(DBG5)
<< "Glob successfuly created, returning SaplingRemoteAPI results";
return glob;
});
})
.thenError([mountHandle,
globFilesRequestScope,
serverState = server_->getServerState(),
globs = std::move(*params->globs()),
globber = std::move(globber),
&context](
const folly::exception_wrapper& ex) mutable {
// Fallback to local if an error was encountered while using the
// SaplingRemoteAPI method
XLOG(DBG3) << "Encountered error when evaluating globFiles: "
<< ex.what();
XLOG(DBG3) << "Using local globFiles";
globFilesRequestScope->setFallback(true);
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
std::move(globs),
context);
});
} else {
globFut = std::move(backgroundFuture)
.thenValue([mountHandle,
serverState = server_->getServerState(),
globs = std::move(*params->globs()),
globber = std::move(globber),
&context](auto&&) mutable {
XLOG(DBG3)
<< "No suffixes, or mixed suffixes and non-suffixes";
XLOG(DBG3) << "Using local globFiles";
// TODO: Insert ODS log for globs here
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
std::move(globs),
context);
});
}
} else {
globFut = std::move(backgroundFuture)
.thenValue([mountHandle,
serverState = server_->getServerState(),
globs = std::move(*params->globs()),
globber = std::move(globber),
&context](auto&&) mutable {
XLOG(DBG3) << "Using local globFiles";
// TODO: Insert ODS log for globs here
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
std::move(globs),
context);
});
}
globFut = std::move(globFut).ensure(
[mountHandle,
helper = std::move(helper),
params = std::move(params),
suffixGlobRequestScope = std::move(suffixGlobRequestScope),
globFilesRequestScope = std::move(globFilesRequestScope)] {});
// The glob code has a very large fan-out that can easily overload the Thrift
// CPU worker pool. To combat with that, we limit the execution to a single
// thread by using `folly::SerialExecutor` so the glob queries will not
// overload the executor.
return serialDetachIfBackgrounded<Glob>(
std::move(globFut), server_, isBackground);
}
// DEPRECATED. Use semifuture_prefetchFilesV2 instead.
folly::SemiFuture<folly::Unit> EdenServiceHandler::semifuture_prefetchFiles(
std::unique_ptr<PrefetchParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
if (!params->revisions_ref().value().empty()) {
params->revisions_ref() = resolveRootsWithLastFilter(
params->revisions_ref().value(), mountHandle);
}
ThriftGlobImpl globber{*params};
auto helper = INSTRUMENT_THRIFT_CALL(
DBG2,
*params->mountPoint_ref(),
toLogArg(*params->globs_ref()),
globber.logString());
auto& context = helper->getFetchContext();
auto isBackground = *params->background();
ImmediateFuture<folly::Unit> backgroundFuture{std::in_place};
if (isBackground) {
backgroundFuture = makeNotReadyImmediateFuture();
}
maybeLogExpensiveGlob(
*params->globs(),
*params->searchRoot_ref(),
globber,
context,
server_->getServerState());
auto globFut =
std::move(backgroundFuture)
.thenValue([mountHandle,
serverState = server_->getServerState(),
globs = std::move(*params->globs()),
globber = std::move(globber),
context = helper->getPrefetchFetchContext().copy()](
auto&&) mutable {
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
std::move(globs),
context);
})
.ensure([mountHandle] {})
.thenValue([](std::unique_ptr<Glob>) { return folly::unit; });
globFut = std::move(globFut).ensure(
[helper = std::move(helper), params = std::move(params)] {});
if (server_->getServerState()
->getEdenConfig()
->runSerialPrefetch.getValue()) {
// The glob code has a very large fan-out that can easily overload the
// Thrift CPU worker pool. To combat with that, we limit the execution to a
// single thread by using `folly::SerialExecutor` so the glob queries will
// not overload the executor.
return serialDetachIfBackgrounded(
std::move(globFut), server_, isBackground);
} else {
return detachIfBackgrounded(
std::move(globFut), server_->getServerState(), isBackground)
.semi();
}
}
folly::SemiFuture<std::unique_ptr<PrefetchResult>>
EdenServiceHandler::semifuture_prefetchFilesV2(
std::unique_ptr<PrefetchParams> params) {
TaskTraceBlock block{"EdenServiceHandler::prefetchFilesV2"};
auto mountHandle = lookupMount(params->mountPoint());
if (!params->revisions_ref().value().empty()) {
params->revisions_ref() = resolveRootsWithLastFilter(
params->revisions_ref().value(), mountHandle);
}
ThriftGlobImpl globber{*params};
auto helper = INSTRUMENT_THRIFT_CALL(
DBG2,
*params->mountPoint_ref(),
toLogArg(*params->globs_ref()),
globber.logString());
auto& context = helper->getFetchContext();
auto isBackground = *params->background();
auto returnPrefetchedFiles = *params->returnPrefetchedFiles();
ImmediateFuture<folly::Unit> backgroundFuture{std::in_place};
if (isBackground) {
backgroundFuture = makeNotReadyImmediateFuture();
}
maybeLogExpensiveGlob(
*params->globs(),
*params->searchRoot_ref(),
globber,
context,
server_->getServerState());
auto globFut =
std::move(backgroundFuture)
.thenValue([mountHandle,
serverState = server_->getServerState(),
globs = std::move(*params->globs()),
globber = std::move(globber),
context = helper->getPrefetchFetchContext().copy()](
auto&&) mutable {
return globber.glob(
mountHandle.getEdenMountPtr(),
serverState,
std::move(globs),
context);
});
// If returnPrefetchedFiles is set then return the list of globs
auto prefetchResult = std::move(globFut)
.thenValue([returnPrefetchedFiles](
std::unique_ptr<Glob> glob) mutable {
std::unique_ptr<PrefetchResult> result =
std::make_unique<PrefetchResult>();
if (!returnPrefetchedFiles) {
return result;
}
result->prefetchedFiles_ref() = std::move(*glob);
return result;
})
.ensure([mountHandle,
helper = std::move(helper),
params = std::move(params)] {});
if (server_->getServerState()
->getEdenConfig()
->runSerialPrefetch.getValue()) {
// The glob code has a very large fan-out that can easily overload the
// Thrift CPU worker pool. To combat with that, we limit the execution to a
// single thread by using `folly::SerialExecutor` so the glob queries will
// not overload the executor.
return serialDetachIfBackgrounded<PrefetchResult>(
std::move(prefetchResult), server_, isBackground);
} else {
return detachIfBackgrounded<PrefetchResult>(
std::move(prefetchResult),
server_->getServerState(),
isBackground)
.semi();
}
}
folly::SemiFuture<struct folly::Unit> EdenServiceHandler::semifuture_chown(
[[maybe_unused]] std::unique_ptr<std::string> mountPoint,
[[maybe_unused]] int32_t uid,
[[maybe_unused]] int32_t gid) {
#ifndef _WIN32
auto handle = lookupMount(mountPoint);
return handle.getEdenMount().chown(uid, gid).ensure([handle] {}).semi();
#else
NOT_IMPLEMENTED();
#endif // !_WIN32
}
folly::SemiFuture<std::unique_ptr<ChangeOwnershipResponse>>
EdenServiceHandler::semifuture_changeOwnership(
unique_ptr<ChangeOwnershipRequest> request) {
#ifndef _WIN32
auto handle = lookupMount(*request->mountPoint_ref());
return handle.getEdenMount()
.chown(*request->uid(), *request->gid())
.ensure([handle] {})
.thenValue([](folly::Unit&&) {
return std::make_unique<ChangeOwnershipResponse>();
})
.semi();
#else
NOT_IMPLEMENTED();
#endif // !_WIN32
}
folly::SemiFuture<std::unique_ptr<GetScmStatusResult>>
EdenServiceHandler::semifuture_getScmStatusV2(
unique_ptr<GetScmStatusParams> params) {
auto* context = getRequestContext();
auto rootIdOptions = params->rootIdOptions_ref().ensure();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG3,
*params->mountPoint_ref(),
folly::to<string>("commitHash=", logHash(*params->commit_ref())),
folly::to<string>("listIgnored=", *params->listIgnored_ref()),
folly::to<string>(
"filterId=",
rootIdOptions.filterId_ref().has_value()
? *rootIdOptions.filterId_ref()
: "(none)"));
helper->getThriftFetchContext().fillClientRequestInfo(params->cri_ref());
auto& fetchContext = helper->getFetchContext();
auto mountHandle = lookupMount(params->mountPoint());
// If we were passed a FilterID, create a RootID that contains the filter and
// a varint that indicates the length of the original hash.
std::string parsedCommit = resolveRootId(
std::move(*params->commit_ref()), rootIdOptions, mountHandle);
auto rootId = mountHandle.getObjectStore().parseRootId(parsedCommit);
const auto& enforceParents = server_->getServerState()
->getReloadableConfig()
->getEdenConfig()
->enforceParents.getValue();
return wrapImmediateFuture(
std::move(helper),
mountHandle.getEdenMount()
.diff(
mountHandle.getRootInode(),
rootId,
context->getConnectionContext()->getCancellationToken(),
fetchContext,
*params->listIgnored_ref(),
enforceParents)
.ensure([mountHandle] {})
.thenValue([this](std::unique_ptr<ScmStatus>&& status) {
auto result = std::make_unique<GetScmStatusResult>();
result->status_ref() = std::move(*status);
result->version_ref() = server_->getVersion();
return result;
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<ScmStatus>>
EdenServiceHandler::semifuture_getScmStatus(
unique_ptr<string> mountPoint,
bool listIgnored,
unique_ptr<string> commitHash) {
auto* context = getRequestContext();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG2,
*mountPoint,
folly::to<string>("listIgnored=", listIgnored ? "true" : "false"),
folly::to<string>("commitHash=", logHash(*commitHash)));
auto& fetchContext = helper->getFetchContext();
// Unlike getScmStatusV2(), this older getScmStatus() call does not enforce
// that the caller specified the current commit. In the future we might
// want to enforce that even for this call, if we confirm that all existing
// callers of this method can deal with the error.
auto mountHandle = lookupMount(mountPoint);
// parseRootId assumes that the passed in hash will contain information about
// the active filter. This legacy code path does not respect filters, so the
// last active filter will always be passed in if it exists. For non-FFS
// repos, the last filterID will be std::nullopt.
std::string parsedCommit =
resolveRootIdWithLastFilter(std::move(*commitHash), mountHandle);
auto hash = mountHandle.getObjectStore().parseRootId(parsedCommit);
return wrapImmediateFuture(
std::move(helper),
mountHandle.getEdenMount().diff(
mountHandle.getRootInode(),
hash,
context->getConnectionContext()->getCancellationToken(),
fetchContext,
listIgnored,
/*enforceCurrentParent=*/false))
.ensure([mountHandle] {})
.semi();
}
folly::SemiFuture<unique_ptr<ScmStatus>>
EdenServiceHandler::semifuture_getScmStatusBetweenRevisions(
unique_ptr<string> mountPoint,
unique_ptr<string> oldHash,
unique_ptr<string> newHash) {
auto* context = getRequestContext();
auto helper = INSTRUMENT_THRIFT_CALL(
DBG2,
*mountPoint,
folly::to<string>("oldHash=", logHash(*oldHash)),
folly::to<string>("newHash=", logHash(*newHash)));
auto mountHandle = lookupMount(mountPoint);
auto& fetchContext = helper->getFetchContext();
// parseRootId assumes that the passed in hash will contain information about
// the active filter. This legacy code path does not respect filters, so the
// last active filter will always be passed in if it exists. For non-FFS
// repos, the last filterID will be std::nullopt.
std::string resolvedOldHash =
resolveRootIdWithLastFilter(std::move(*oldHash), mountHandle);
std::string resolvedNewHash =
resolveRootIdWithLastFilter(std::move(*newHash), mountHandle);
auto callback = std::make_unique<ScmStatusDiffCallback>();
auto diffFuture = diffBetweenRoots(
mountHandle.getObjectStore().parseRootId(resolvedOldHash),
mountHandle.getObjectStore().parseRootId(resolvedNewHash),
*mountHandle.getEdenMount().getCheckoutConfig(),
mountHandle.getObjectStorePtr(),
context->getConnectionContext()->getCancellationToken(),
fetchContext,
callback.get());
return wrapImmediateFuture(
std::move(helper),
std::move(diffFuture)
.thenValue([callback = std::move(callback)](auto&&) {
return std::make_unique<ScmStatus>(
callback->extractStatus());
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<MatchFileSystemResponse>>
EdenServiceHandler::semifuture_matchFilesystem(
std::unique_ptr<MatchFileSystemRequest> params) {
auto helper =
INSTRUMENT_THRIFT_CALL(DBG2, *params->mountPoint(), *params->paths());
#ifdef _WIN32
auto mountHandle = lookupMount(params->mountPoint()->mountPoint());
if (auto* prjfsChannel = mountHandle.getEdenMount().getPrjfsChannel()) {
std::vector<ImmediateFuture<folly::Unit>> results;
results.reserve(params->paths()->size());
for (auto& path : *params->paths()) {
results.push_back(prjfsChannel->matchEdenViewOfFileToFS(
relpathFromUserPath(path), helper->getFetchContext()));
}
return wrapImmediateFuture(
std::move(helper),
ImmediateFuture{
collectAll(std::move(results))
.ensure([mountHandle]() {})
.thenValue([](std::vector<folly::Try<folly::Unit>>
raw_results) {
std::vector<MatchFilesystemPathResult> results;
results.reserve(raw_results.size());
for (auto& raw_result : raw_results) {
MatchFilesystemPathResult result{};
if (raw_result.hasException()) {
result.error() =
newEdenError(raw_result.exception());
}
results.push_back(std::move(result));
}
auto final_result =
std::make_unique<MatchFileSystemResponse>();
final_result->results() = std::move(results);
return final_result;
})})
.semi();
}
#endif
throw newEdenError(
ENOTSUP,
EdenErrorType::POSIX_ERROR,
"matchFilesystemStat only supported for PrjFs repos which {} is not",
*params->mountPoint());
}
void EdenServiceHandler::debugGetScmTree(
vector<ScmTreeEntry>& entries,
unique_ptr<string> mountPoint,
unique_ptr<string> idStr,
bool localStoreOnly) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *mountPoint, logHash(*idStr));
auto mountHandle = lookupMount(mountPoint);
auto& store = mountHandle.getObjectStore();
auto id = store.parseObjectId(*idStr);
std::shared_ptr<const Tree> tree;
if (localStoreOnly) {
auto localStore = server_->getLocalStore();
tree = localStore->getTree(id).get();
} else {
tree = store.getTree(id, helper->getFetchContext()).get();
}
if (!tree) {
throw newEdenError(
ENOENT,
EdenErrorType::POSIX_ERROR,
"no tree found for id ",
store.renderObjectId(id));
}
for (const auto& entry : *tree) {
const auto& [name, treeEntry] = entry;
entries.emplace_back();
auto& out = entries.back();
out.name_ref() = name.asString();
out.mode_ref() = modeFromTreeEntryType(treeEntry.getType());
out.id_ref() = store.renderObjectId(treeEntry.getHash());
}
}
folly::SemiFuture<std::unique_ptr<DebugGetScmBlobResponse>>
EdenServiceHandler::semifuture_debugGetBlob(
std::unique_ptr<DebugGetScmBlobRequest> request) {
const auto& mountid = request->mountId();
const auto& idStr = request->id();
const auto& origins = request->origins();
auto helper =
INSTRUMENT_THRIFT_CALL(DBG2, *mountid, logHash(*idStr), *origins);
auto mountHandle = lookupMount(*mountid);
auto edenMount = mountHandle.getEdenMountPtr();
auto id = edenMount->getObjectStore()->parseObjectId(*idStr);
auto originFlags = DataFetchOriginFlags::raw(*origins);
auto store = edenMount->getObjectStore();
std::vector<ImmediateFuture<ScmBlobWithOrigin>> blobFutures;
if (originFlags.contains(FROMWHERE_MEMORY_CACHE)) {
blobFutures.emplace_back(transformToBlobFromOrigin(
edenMount,
id,
folly::Try<std::shared_ptr<const Blob>>{
edenMount->getBlobCache()->get(id).object},
DataFetchOrigin::MEMORY_CACHE));
}
if (originFlags.contains(FROMWHERE_DISK_CACHE)) {
auto localStore = server_->getLocalStore();
blobFutures.emplace_back(
localStore->getBlob(id).thenTry([edenMount, id](auto&& blob) {
return transformToBlobFromOrigin(
edenMount, id, std::move(blob), DataFetchOrigin::DISK_CACHE);
}));
}
if (originFlags.contains(FROMWHERE_LOCAL_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetScmBlob",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
blobFutures.emplace_back(transformToBlobFromOrigin(
edenMount,
id,
saplingBackingStore->getBlobLocal(proxyHash),
DataFetchOrigin::LOCAL_BACKING_STORE));
}
if (originFlags.contains(FROMWHERE_REMOTE_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetScmBlob",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
blobFutures.emplace_back(transformToBlobFromOrigin(
edenMount,
id,
saplingBackingStore->getBlobRemote(proxyHash),
DataFetchOrigin::REMOTE_BACKING_STORE));
}
if (originFlags.contains(FROMWHERE_ANYWHERE)) {
blobFutures.emplace_back(
store->getBlob(id, helper->getFetchContext())
.thenTry([edenMount, id](auto&& blob) {
return transformToBlobFromOrigin(
edenMount, id, std::move(blob), DataFetchOrigin::ANYWHERE);
}));
}
return wrapImmediateFuture(
std::move(helper),
collectAllSafe(std::move(blobFutures))
.thenValue([](std::vector<ScmBlobWithOrigin> blobs) {
auto response = std::make_unique<DebugGetScmBlobResponse>();
response->blobs() = std::move(blobs);
return response;
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<DebugGetBlobMetadataResponse>>
EdenServiceHandler::semifuture_debugGetBlobMetadata(
std::unique_ptr<DebugGetBlobMetadataRequest> request) {
const auto& mountid = request->mountId();
const auto& idStr = request->id();
const auto& origins = request->origins();
auto helper =
INSTRUMENT_THRIFT_CALL(DBG2, *mountid, logHash(*idStr), *origins);
auto mountHandle = lookupMount(*mountid);
auto edenMount = mountHandle.getEdenMountPtr();
auto id = edenMount->getObjectStore()->parseObjectId(*idStr);
auto originFlags = DataFetchOriginFlags::raw(*origins);
auto store = edenMount->getObjectStore();
auto& fetchContext = helper->getFetchContext();
std::vector<ImmediateFuture<BlobMetadataWithOrigin>> blobFutures;
if (originFlags.contains(FROMWHERE_MEMORY_CACHE)) {
auto metadata = store->getBlobMetadataFromInMemoryCache(id, fetchContext);
blobFutures.emplace_back(transformToBlobMetadataFromOrigin(
edenMount, id, metadata, DataFetchOrigin::MEMORY_CACHE));
}
if (originFlags.contains(FROMWHERE_DISK_CACHE)) {
auto localStore = server_->getLocalStore();
blobFutures.emplace_back(localStore->getBlobMetadata(id).thenTry(
[edenMount, id](auto&& metadata) {
return transformToBlobMetadataFromOrigin(
edenMount,
id,
std::move(metadata.value()),
DataFetchOrigin::DISK_CACHE);
}));
}
if (originFlags.contains(FROMWHERE_LOCAL_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetScmBlob",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
auto metadata =
saplingBackingStore->getLocalBlobMetadata(proxyHash).value_or(nullptr);
blobFutures.emplace_back(transformToBlobMetadataFromOrigin(
edenMount,
id,
std::move(metadata),
DataFetchOrigin::LOCAL_BACKING_STORE));
}
if (originFlags.contains(FROMWHERE_REMOTE_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetScmBlob",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
blobFutures.emplace_back(
ImmediateFuture{saplingBackingStore->getBlobMetadataEnqueue(
id, proxyHash, fetchContext)}
.thenValue([edenMount, id](BackingStore::GetBlobMetaResult result) {
return transformToBlobMetadataFromOrigin(
edenMount,
id,
std::move(result.blobMeta),
DataFetchOrigin::REMOTE_BACKING_STORE);
}));
}
if (originFlags.contains(FROMWHERE_ANYWHERE)) {
blobFutures.emplace_back(store->getBlobMetadata(id, fetchContext)
.thenTry([edenMount, id](auto&& metadata) {
return transformToBlobMetadataFromOrigin(
std::move(metadata),
DataFetchOrigin::ANYWHERE);
}));
}
return wrapImmediateFuture(
std::move(helper),
collectAllSafe(std::move(blobFutures))
.thenValue([](std::vector<BlobMetadataWithOrigin> blobs) {
auto response =
std::make_unique<DebugGetBlobMetadataResponse>();
response->metadatas() = std::move(blobs);
return response;
}))
.semi();
}
folly::SemiFuture<std::unique_ptr<DebugGetScmTreeResponse>>
EdenServiceHandler::semifuture_debugGetTree(
std::unique_ptr<DebugGetScmTreeRequest> request) {
const auto& mountid = request->mountId();
const auto& idStr = request->id();
const auto& origins = request->origins();
auto helper =
INSTRUMENT_THRIFT_CALL(DBG2, *mountid, logHash(*idStr), *origins);
auto mountHandle = lookupMount(*mountid);
auto edenMount = mountHandle.getEdenMountPtr();
auto id = edenMount->getObjectStore()->parseObjectId(*idStr);
auto originFlags = DataFetchOriginFlags::raw(*origins);
auto store = edenMount->getObjectStore();
std::vector<ImmediateFuture<ScmTreeWithOrigin>> treeFutures;
if (originFlags.contains(FROMWHERE_MEMORY_CACHE)) {
treeFutures.emplace_back(transformToTreeFromOrigin(
edenMount,
id,
folly::Try<std::shared_ptr<const Tree>>{store->getTreeCache()->get(id)},
DataFetchOrigin::MEMORY_CACHE));
}
if (originFlags.contains(FROMWHERE_DISK_CACHE)) {
auto localStore = server_->getLocalStore();
treeFutures.emplace_back(localStore->getTree(id).thenTry(
[edenMount, id, store](auto&& tree) mutable {
return transformToTreeFromOrigin(
std::move(edenMount),
id,
std::move(tree),
DataFetchOrigin::DISK_CACHE);
}));
}
if (originFlags.contains(FROMWHERE_LOCAL_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetTree",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
treeFutures.emplace_back(transformToTreeFromOrigin(
edenMount,
id,
folly::Try<std::shared_ptr<const Tree>>{
saplingBackingStore->getTreeLocal(id, proxyHash)},
DataFetchOrigin::LOCAL_BACKING_STORE));
}
if (originFlags.contains(FROMWHERE_REMOTE_BACKING_STORE)) {
auto proxyHash = HgProxyHash::load(
server_->getLocalStore().get(),
id,
"debugGetTree",
*server_->getServerState()->getStats());
auto backingStore = edenMount->getObjectStore()->getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(backingStore, edenMount->getPath());
treeFutures.emplace_back(transformToTreeFromOrigin(
edenMount,
id,
saplingBackingStore->getTreeRemote(
proxyHash.path().copy(),
proxyHash.revHash(),
id,
helper->getFetchContext()),
DataFetchOrigin::REMOTE_BACKING_STORE));
}
if (originFlags.contains(FROMWHERE_ANYWHERE)) {
treeFutures.emplace_back(store->getTree(id, helper->getFetchContext())
.thenTry([edenMount, id](auto&& tree) mutable {
return transformToTreeFromOrigin(
std::move(edenMount),
id,
std::move(tree),
DataFetchOrigin::ANYWHERE);
}));
}
return wrapImmediateFuture(
std::move(helper),
collectAllSafe(std::move(treeFutures))
.thenValue([](std::vector<ScmTreeWithOrigin> trees) {
auto response = std::make_unique<DebugGetScmTreeResponse>();
response->trees() = std::move(trees);
return response;
}))
.ensure([mountHandle] {})
.semi();
}
namespace {
class InodeStatusCallbacks : public TraversalCallbacks {
public:
explicit InodeStatusCallbacks(
EdenMount* mount,
int64_t flags,
std::vector<TreeInodeDebugInfo>& results)
: mount_{mount}, flags_{flags}, results_{results} {}
void visitTreeInode(
RelativePathPiece path,
InodeNumber ino,
const std::optional<ObjectId>& hash,
uint64_t fsRefcount,
const std::vector<ChildEntry>& entries) override {
#ifndef _WIN32
auto* inodeMetadataTable = mount_->getInodeMetadataTable();
#endif
TreeInodeDebugInfo info;
info.inodeNumber_ref() = ino.get();
info.path_ref() = path.asString();
info.materialized_ref() = !hash.has_value();
if (hash.has_value()) {
info.treeHash_ref() =
mount_->getObjectStore()->renderObjectId(hash.value());
}
info.refcount_ref() = fsRefcount;
info.entries_ref()->reserve(entries.size());
for (auto& entry : entries) {
TreeInodeEntryDebugInfo entryInfo;
entryInfo.name_ref() = entry.name.asString();
entryInfo.inodeNumber_ref() = entry.ino.get();
// This could be enabled on Windows if InodeMetadataTable was removed.
#ifndef _WIN32
if (auto metadata = (flags_ & eden_constants::DIS_COMPUTE_ACCURATE_MODE_)
? inodeMetadataTable->getOptional(entry.ino)
: std::nullopt) {
entryInfo.mode_ref() = metadata->mode;
} else {
entryInfo.mode_ref() = dtype_to_mode(entry.dtype);
}
#else
entryInfo.mode_ref() = dtype_to_mode(entry.dtype);
#endif
entryInfo.loaded_ref() = entry.loadedChild != nullptr;
entryInfo.materialized_ref() = !entry.hash.has_value();
if (entry.hash.has_value()) {
entryInfo.hash_ref() =
mount_->getObjectStore()->renderObjectId(entry.hash.value());
}
if ((flags_ & eden_constants::DIS_COMPUTE_BLOB_SIZES_) &&
dtype_t::Dir != entry.dtype) {
if (entry.hash.has_value()) {
// schedule fetching size from ObjectStore::getBlobSize
requestedSizes_.push_back(RequestedSize{
results_.size(), info.entries_ref()->size(), entry.hash.value()});
} else {
#ifndef _WIN32
entryInfo.fileSize_ref() =
mount_->getOverlayFileAccess()->getFileSize(
entry.ino, entry.loadedChild.get());
#else
// This following code ends up doing a stat in the working directory.
// This is safe to do as Windows works very differently from
// Linux/macOS when dealing with materialized files. In this code, we
// know that the file is materialized because we do not have a hash
// for it, and every materialized file is present on disk and
// reading/stating it is guaranteed to be done without EdenFS
// involvement. If somehow EdenFS is wrong, and this ends up
// triggering a recursive call into EdenFS, we are detecting this and
// simply bailing out very early in the callback.
auto filePath = mount_->getPath() + path + entry.name;
struct stat fileStat;
if (::stat(filePath.c_str(), &fileStat) == 0) {
entryInfo.fileSize_ref() = fileStat.st_size;
} else {
// Couldn't read the file, let's pretend it has a size of 0.
entryInfo.fileSize_ref() = 0;
}
#endif
}
}
info.entries_ref()->push_back(entryInfo);
}
results_.push_back(std::move(info));
}
bool shouldRecurse(const ChildEntry& entry) override {
if (flags_ & eden_constants::DIS_NOT_RECURSIVE_) {
return false;
}
if ((flags_ & eden_constants::DIS_REQUIRE_LOADED_) && !entry.loadedChild) {
return false;
}
if ((flags_ & eden_constants::DIS_REQUIRE_MATERIALIZED_) &&
entry.hash.has_value()) {
return false;
}
return true;
}
void fillBlobSizes(const ObjectFetchContextPtr& fetchContext) {
std::vector<ImmediateFuture<folly::Unit>> futures;
futures.reserve(requestedSizes_.size());
for (auto& request : requestedSizes_) {
futures.push_back(mount_->getObjectStore()
->getBlobSize(request.hash, fetchContext)
.thenValue([this, request](uint64_t blobSize) {
results_.at(request.resultIndex)
.entries_ref()
->at(request.entryIndex)
.fileSize_ref() = blobSize;
}));
}
collectAll(std::move(futures)).get();
}
private:
struct RequestedSize {
size_t resultIndex;
size_t entryIndex;
ObjectId hash;
};
EdenMount* mount_;
int64_t flags_;
std::vector<TreeInodeDebugInfo>& results_;
std::vector<RequestedSize> requestedSizes_;
};
} // namespace
void EdenServiceHandler::debugInodeStatus(
vector<TreeInodeDebugInfo>& inodeInfo,
unique_ptr<string> mountPoint,
unique_ptr<std::string> path,
int64_t flags,
std::unique_ptr<SyncBehavior> sync) {
if (0 == flags) {
flags = eden_constants::DIS_REQUIRE_LOADED_ |
eden_constants::DIS_COMPUTE_BLOB_SIZES_;
}
auto helper = INSTRUMENT_THRIFT_CALL(
DBG2, *mountPoint, *path, flags, getSyncTimeout(*sync));
auto mountHandle = lookupMount(mountPoint);
waitForPendingWrites(mountHandle.getEdenMount(), *sync)
.thenValue([mountHandle,
&inodeInfo,
path = std::move(path),
flags,
helper = std::move(helper)](auto&&) mutable {
auto inode =
inodeFromUserPath(
mountHandle.getEdenMount(), *path, helper->getFetchContext())
.asTreePtr();
auto inodePath = inode->getPath().value();
InodeStatusCallbacks callbacks{
&mountHandle.getEdenMount(), flags, inodeInfo};
traverseObservedInodes(*inode, inodePath, callbacks);
callbacks.fillBlobSizes(helper->getFetchContext());
})
.ensure([mountHandle] {})
.get();
}
void EdenServiceHandler::debugOutstandingFuseCalls(
[[maybe_unused]] std::vector<FuseCall>& outstandingCalls,
[[maybe_unused]] std::unique_ptr<std::string> mountPoint) {
#ifndef _WIN32
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto mountHandle = lookupMount(mountPoint);
if (auto* fuseChannel = mountHandle.getEdenMount().getFuseChannel()) {
for (const auto& call : fuseChannel->getOutstandingRequests()) {
outstandingCalls.push_back(populateFuseCall(
call.unique,
call.request,
*server_->getServerState()->getProcessInfoCache()));
}
}
#else
NOT_IMPLEMENTED();
#endif // !_WIN32
}
void EdenServiceHandler::debugOutstandingNfsCalls(
std::vector<NfsCall>& outstandingCalls,
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto mountHandle = lookupMount(mountPoint);
if (auto* nfsdChannel = mountHandle.getEdenMount().getNfsdChannel()) {
for (const auto& call : nfsdChannel->getOutstandingRequests()) {
NfsCall nfsCall;
nfsCall.xid_ref() = call.xid;
outstandingCalls.push_back(nfsCall);
}
}
}
void EdenServiceHandler::debugOutstandingPrjfsCalls(
[[maybe_unused]] std::vector<PrjfsCall>& outstandingCalls,
[[maybe_unused]] std::unique_ptr<std::string> mountPoint) {
#ifdef _WIN32
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto mountHandle = lookupMount(mountPoint);
if (auto* prjfsChannel = mountHandle.getEdenMount().getPrjfsChannel()) {
for (const auto& call :
prjfsChannel->getInner()->getOutstandingRequests()) {
outstandingCalls.push_back(populatePrjfsCall(call.type, call.data));
}
}
#else
NOT_IMPLEMENTED();
#endif // _WIN32
}
void EdenServiceHandler::debugOutstandingThriftRequests(
std::vector<ThriftRequestMetadata>& outstandingRequests) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
const auto requestsLockedPtr = outstandingThriftRequests_.rlock();
for (const auto& item : *requestsLockedPtr) {
outstandingRequests.emplace_back(
populateThriftRequestMetadata(item.second));
}
}
void EdenServiceHandler::debugOutstandingHgEvents(
std::vector<HgEvent>& outstandingEvents,
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto mountHandle = lookupMount(mountPoint);
auto backingStore = mountHandle.getObjectStore().getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(
backingStore, mountHandle.getEdenMount().getPath());
const auto hgEvents = saplingBackingStore->getOutstandingHgEvents();
auto processInfoCache =
mountHandle.getEdenMount().getServerState()->getProcessInfoCache();
for (const auto& event : hgEvents) {
HgEvent thriftEvent;
convertHgImportTraceEventToHgEvent(event, *processInfoCache, thriftEvent);
outstandingEvents.emplace_back(thriftEvent);
}
}
void EdenServiceHandler::debugStartRecordingActivity(
ActivityRecorderResult& result,
std::unique_ptr<std::string> mountPoint,
std::unique_ptr<std::string> outputDir) {
AbsolutePathPiece path;
try {
path = absolutePathFromThrift(*outputDir);
} catch (const std::exception&) {
throw newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"path for output directory is invalid");
}
auto mountHandle = lookupMount(mountPoint);
auto lockedPtr = mountHandle.getEdenMount().getActivityRecorder().wlock();
// bool check on the wrapped pointer as lockedPtr is truthy as long
// as we have the lock
if (!lockedPtr->get()) {
auto recorder =
server_->makeActivityRecorder(mountHandle.getEdenMountPtr());
lockedPtr->swap(recorder);
}
uint64_t unique = lockedPtr->get()->addSubscriber(path);
// unique_ref is signed but overflow is very unlikely because unique is UNIX
// timestamp in seconds.
result.unique_ref() = unique;
}
void EdenServiceHandler::debugStopRecordingActivity(
ActivityRecorderResult& result,
std::unique_ptr<std::string> mountPoint,
int64_t unique) {
auto mountHandle = lookupMount(mountPoint);
auto lockedPtr = mountHandle.getEdenMount().getActivityRecorder().wlock();
auto* activityRecorder = lockedPtr->get();
if (!activityRecorder) {
return;
}
auto outputPath = activityRecorder->removeSubscriber(unique);
if (outputPath.has_value()) {
result.unique_ref() = unique;
result.path_ref() = outputPath.value();
}
if (activityRecorder->getSubscribers().empty()) {
lockedPtr->reset();
}
}
void EdenServiceHandler::debugListActivityRecordings(
ListActivityRecordingsResult& result,
std::unique_ptr<std::string> mountPoint) {
auto mountHandle = lookupMount(mountPoint);
auto lockedPtr = mountHandle.getEdenMount().getActivityRecorder().rlock();
auto* activityRecorder = lockedPtr->get();
if (!activityRecorder) {
return;
}
std::vector<ActivityRecorderResult> recordings;
auto subscribers = activityRecorder->getSubscribers();
recordings.reserve(subscribers.size());
for (auto const& subscriber : subscribers) {
ActivityRecorderResult recording;
recording.unique_ref() = std::get<0>(subscriber);
recording.path_ref() = std::get<1>(subscriber);
recordings.push_back(std::move(recording));
}
result.recordings_ref() = recordings;
}
void EdenServiceHandler::debugGetInodePath(
InodePathDebugInfo& info,
std::unique_ptr<std::string> mountPoint,
int64_t inodeNumber) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
auto inodeNum = static_cast<InodeNumber>(inodeNumber);
auto mountHandle = lookupMount(mountPoint);
auto inodeMap = mountHandle.getEdenMount().getInodeMap();
auto relativePath = inodeMap->getPathForInode(inodeNum);
// Check if the inode is loaded
info.loaded_ref() = inodeMap->lookupLoadedInode(inodeNum) != nullptr;
// If getPathForInode returned none then the inode is unlinked
info.linked_ref() = relativePath != std::nullopt;
info.path_ref() = relativePath ? relativePath->asString() : "";
}
void EdenServiceHandler::clearFetchCounts() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
for (auto& handle : server_->getMountPoints()) {
handle.getObjectStore().clearFetchCounts();
}
}
void EdenServiceHandler::clearFetchCountsByMount(
std::unique_ptr<std::string> mountPoint) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
auto mount = lookupMount(mountPoint);
mount.getObjectStore().clearFetchCounts();
}
void EdenServiceHandler::startRecordingBackingStoreFetch() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
for (auto& backingStore : server_->getBackingStores()) {
backingStore->startRecordingFetch();
}
}
void EdenServiceHandler::stopRecordingBackingStoreFetch(
GetFetchedFilesResult& results) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
for (const auto& backingStore : server_->getBackingStores()) {
auto filePaths = backingStore->stopRecordingFetch();
std::shared_ptr<SaplingBackingStore> saplingBackingStore{nullptr};
// If FilteredFS is enabled, we'll see a FilteredBackingStore first
auto filteredBackingStore =
std::dynamic_pointer_cast<FilteredBackingStore>(backingStore);
if (filteredBackingStore) {
// FilteredBackingStore -> SaplingBackingStore
saplingBackingStore = std::dynamic_pointer_cast<SaplingBackingStore>(
filteredBackingStore->getBackingStore());
} else {
// BackingStore -> SaplingBackingStore
saplingBackingStore =
std::dynamic_pointer_cast<SaplingBackingStore>(backingStore);
}
// recording is only implemented for SaplingBackingStore at the moment
if (saplingBackingStore) {
(*results.fetchedFilePaths_ref())["SaplingBackingStore"].insert(
filePaths.begin(), filePaths.end());
}
}
} // namespace eden
void EdenServiceHandler::getAccessCounts(
GetAccessCountsResult& result,
int64_t duration) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
result.cmdsByPid_ref() =
server_->getServerState()->getProcessInfoCache()->getAllProcessNames();
auto seconds = std::chrono::seconds{duration};
for (auto& handle : server_->getMountPoints()) {
auto& mount = handle.getEdenMount();
auto& mountStr = mount.getPath().value();
auto& pal = mount.getProcessAccessLog();
auto& pidFetches = mount.getObjectStore()->getPidFetches();
MountAccesses& ma = result.accessesByMount_ref()[mountStr];
for (auto& [pid, accessCounts] : pal.getAccessCounts(seconds)) {
ma.accessCountsByPid_ref()[pid] = accessCounts;
}
auto pidFetchesLockedPtr = pidFetches.rlock();
for (auto& [pid, fetchCount] : *pidFetchesLockedPtr) {
ma.fetchCountsByPid_ref()[pid.get()] = fetchCount;
}
}
}
void EdenServiceHandler::clearAndCompactLocalStore() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG1);
server_->getLocalStore()->clearCachesAndCompactAll();
}
void EdenServiceHandler::debugClearLocalStoreCaches() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG1);
server_->getLocalStore()->clearCaches();
}
void EdenServiceHandler::debugCompactLocalStorage() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG1);
server_->getLocalStore()->compactStorage();
}
// TODO(T119221752): add more BackingStore subclasses to this command. We
// currently only support SaplingBackingStores
int64_t EdenServiceHandler::debugDropAllPendingRequests() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG1);
auto stores = server_->getSaplingBackingStores();
int64_t numDropped = 0;
for (auto& store : stores) {
numDropped += store->dropAllPendingRequestsFromQueue();
}
return numDropped;
}
int64_t EdenServiceHandler::unloadInodeForPath(
[[maybe_unused]] unique_ptr<string> mountPoint,
[[maybe_unused]] std::unique_ptr<std::string> path,
[[maybe_unused]] std::unique_ptr<TimeSpec> age) {
#ifndef _WIN32
auto helper = INSTRUMENT_THRIFT_CALL(DBG1, *mountPoint, *path);
auto mountHandle = lookupMount(mountPoint);
TreeInodePtr inode =
inodeFromUserPath(
mountHandle.getEdenMount(), *path, helper->getFetchContext())
.asTreePtr();
auto cutoff = std::chrono::system_clock::now() -
std::chrono::seconds(*age->seconds_ref()) -
std::chrono::nanoseconds(*age->nanoSeconds_ref());
auto cutoff_ts = folly::to<timespec>(cutoff);
return inode->unloadChildrenLastAccessedBefore(cutoff_ts);
#else
NOT_IMPLEMENTED();
#endif
}
folly::SemiFuture<std::unique_ptr<DebugInvalidateResponse>>
EdenServiceHandler::semifuture_debugInvalidateNonMaterialized(
std::unique_ptr<DebugInvalidateRequest> params) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG1, *params->mount()->mountPoint());
auto mountHandle = lookupMount(params->mount()->mountPoint());
auto& fetchContext = helper->getFetchContext();
if (!folly::kIsWindows) {
if (!(params->age()->seconds() == 0 && params->age()->nanoSeconds() == 0)) {
throw newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"Non-zero age is not supported on non-Windows platforms");
}
} else {
// TODO: We may need to restrict 0s age on Windows as that can lead to
// weird behavior where files are invalidated while being read causing the
// read to fail.
}
auto cutoff = std::chrono::system_clock::time_point::max();
if (*params->age()->seconds() != 0) {
cutoff = std::chrono::system_clock::now() -
std::chrono::seconds(*params->age()->seconds());
}
ImmediateFuture<folly::Unit> backgroundFuture{std::in_place};
if (*params->background()) {
backgroundFuture = makeNotReadyImmediateFuture();
}
auto invalFut =
std::move(backgroundFuture)
.thenValue([mountHandle, sync = *params->sync()](auto&&) {
return waitForPendingWrites(mountHandle.getEdenMount(), sync);
})
.thenValue(
[mountHandle, path = *params->path(), &fetchContext](auto&&) {
return inodeFromUserPath(
mountHandle.getEdenMount(), path, fetchContext)
.asTreePtr();
})
.thenValue([this, mountHandle, cutoff, &fetchContext](
TreeInodePtr inode) mutable {
if (inode == mountHandle.getRootInode()) {
return server_->garbageCollectWorkingCopy(
mountHandle.getEdenMount(),
mountHandle.getRootInode(),
cutoff,
fetchContext);
} else {
return inode
->invalidateChildrenNotMaterialized(cutoff, fetchContext)
.ensure(
[inode]() { inode->unloadChildrenUnreferencedByFs(); });
}
})
.thenValue([](uint64_t numInvalidated) {
auto ret = std::make_unique<DebugInvalidateResponse>();
ret->numInvalidated() = numInvalidated;
return ret;
})
.ensure([helper = std::move(helper), mountHandle] {});
if (!*params->background()) {
return std::move(invalFut).semi();
} else {
folly::futures::detachOn(
server_->getServerState()->getThreadPool().get(),
std::move(invalFut).semi());
return std::make_unique<DebugInvalidateResponse>();
}
}
void EdenServiceHandler::getStatInfo(
InternalStats& result,
std::unique_ptr<GetStatInfoParams> params) {
int64_t statsMask = *params->statsMask();
// return all stats when mask not provided
// TODO: remove when no old clients exists
if (0 == statsMask) {
statsMask = ~0;
}
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
if (statsMask & eden_constants::STATS_MOUNTS_STATS_) {
auto mountList = server_->getMountPoints();
std::map<PathString, MountInodeInfo> mountPointInfo = {};
std::map<PathString, JournalInfo> mountPointJournalInfo = {};
for (auto& handle : mountList) {
auto& mount = handle.getEdenMount();
auto inodeMap = mount.getInodeMap();
// Set LoadedInde Count and unloaded Inode count for the mountPoint.
MountInodeInfo mountInodeInfo;
auto counts = inodeMap->getInodeCounts();
mountInodeInfo.unloadedInodeCount_ref() = counts.unloadedInodeCount;
mountInodeInfo.loadedFileCount_ref() = counts.fileCount;
mountInodeInfo.loadedTreeCount_ref() = counts.treeCount;
JournalInfo journalThrift;
if (auto journalStats = mount.getJournal().getStats()) {
journalThrift.entryCount_ref() = journalStats->entryCount;
journalThrift.durationSeconds_ref() =
journalStats->getDurationInSeconds();
} else {
journalThrift.entryCount_ref() = 0;
journalThrift.durationSeconds_ref() = 0;
}
journalThrift.memoryUsage_ref() =
mount.getJournal().estimateMemoryUsage();
auto mountPath = absolutePathToThrift(mount.getPath());
mountPointJournalInfo[mountPath] = journalThrift;
mountPointInfo[mountPath] = mountInodeInfo;
}
result.mountPointInfo_ref() = mountPointInfo;
result.mountPointJournalInfo_ref() = mountPointJournalInfo;
}
auto counters = fb303::ServiceData::get()->getCounters();
if (statsMask & eden_constants::STATS_COUNTERS_) {
// Get the counters and set number of inodes unloaded by periodic unload
// job.
result.counters_ref() = counters;
size_t periodicUnloadCount{0};
for (auto& handle : server_->getMountPoints()) {
auto& mount = handle.getEdenMount();
periodicUnloadCount +=
counters[mount.getCounterName(CounterName::PERIODIC_INODE_UNLOAD)];
}
result.periodicUnloadCount_ref() = periodicUnloadCount;
}
if (statsMask & eden_constants::STATS_PRIVATE_BYTES_) {
auto privateDirtyBytes = facebook::eden::proc_util::calculatePrivateBytes();
if (privateDirtyBytes) {
result.privateBytes_ref() = privateDirtyBytes.value();
}
}
if (statsMask & eden_constants::STATS_RSS_BYTES_) {
auto memoryStats = facebook::eden::proc_util::readMemoryStats();
if (memoryStats) {
result.vmRSSBytes_ref() = memoryStats->resident;
}
}
if (statsMask & eden_constants::STATS_SMAPS_) {
// Note: this will be removed in a subsequent commit.
// We now report periodically via ServiceData
std::string smaps;
if (folly::readFile("/proc/self/smaps", smaps)) {
result.smaps_ref() = std::move(smaps);
}
}
if (statsMask & eden_constants::STATS_CACHE_STATS_) {
const auto blobCacheStats = server_->getBlobCache()->getStats(counters);
result.blobCacheStats_ref() = CacheStats{};
result.blobCacheStats_ref()->entryCount_ref() = blobCacheStats.objectCount;
result.blobCacheStats_ref()->totalSizeInBytes_ref() =
blobCacheStats.totalSizeInBytes;
result.blobCacheStats_ref()->hitCount_ref() = blobCacheStats.hitCount;
result.blobCacheStats_ref()->missCount_ref() = blobCacheStats.missCount;
result.blobCacheStats_ref()->evictionCount_ref() =
blobCacheStats.evictionCount;
result.blobCacheStats_ref()->dropCount_ref() = blobCacheStats.dropCount;
const auto treeCacheStats = server_->getTreeCache()->getStats(counters);
result.treeCacheStats_ref() = CacheStats{};
result.treeCacheStats_ref()->entryCount_ref() = treeCacheStats.objectCount;
result.treeCacheStats_ref()->totalSizeInBytes_ref() =
treeCacheStats.totalSizeInBytes;
result.treeCacheStats_ref()->hitCount_ref() = treeCacheStats.hitCount;
result.treeCacheStats_ref()->missCount_ref() = treeCacheStats.missCount;
result.treeCacheStats_ref()->evictionCount_ref() =
treeCacheStats.evictionCount;
result.treeCacheStats_ref()->dropCount_ref() = treeCacheStats.dropCount;
}
}
void EdenServiceHandler::flushStatsNow() {
auto helper = INSTRUMENT_THRIFT_CALL(DBG3);
server_->flushStatsNow();
}
folly::SemiFuture<Unit>
EdenServiceHandler::semifuture_invalidateKernelInodeCache(
[[maybe_unused]] std::unique_ptr<std::string> mountPoint,
[[maybe_unused]] std::unique_ptr<std::string> path) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2, *mountPoint, *path);
auto mountHandle = lookupMount(mountPoint);
#ifndef _WIN32
InodePtr inode = inodeFromUserPath(
mountHandle.getEdenMount(), *path, helper->getFetchContext());
if (auto* fuseChannel = mountHandle.getEdenMount().getFuseChannel()) {
// Invalidate cached pages and attributes
fuseChannel->invalidateInode(inode->getNodeId(), 0, 0);
const auto treePtr = inode.asTreePtrOrNull();
// Invalidate all parent/child relationships potentially cached.
if (treePtr != nullptr) {
const auto& dir = treePtr->getContents().rlock();
for (const auto& entry : dir->entries) {
fuseChannel->invalidateEntry(inode->getNodeId(), entry.first);
}
}
// Wait for all of the invalidations to complete
return fuseChannel->completeInvalidations().semi();
}
if (auto* nfsChannel = mountHandle.getEdenMount().getNfsdChannel()) {
inode->forceMetadataUpdate();
auto& fetchContext = helper->getFetchContext();
auto rawInodePtr = inode.get();
return wrapImmediateFuture(
std::move(helper),
rawInodePtr->stat(fetchContext)
.thenValue(
[nfsChannel,
canonicalMountPoint =
absolutePathFromThrift(*mountPoint),
inode = std::move(inode),
path = std::move(path),
mountHandle,
fetchContext =
fetchContext.copy()](struct stat&& stat) mutable
-> ImmediateFuture<folly::Unit> {
nfsChannel->invalidate(
canonicalMountPoint + RelativePath{*path},
stat.st_mode);
const auto treePtr = inode.asTreePtrOrNull();
// Invalidate all children as well. There isn't really
// a way to invalidate the entry cache for nfs so we
// settle for invalidating the children themselves.
if (treePtr != nullptr) {
const auto& dir = treePtr->getContents().rlock();
std::vector<ImmediateFuture<folly::Unit>>
childInvalidations{};
for (const auto& entry : dir->entries) {
auto childPath = RelativePath{*path} + entry.first;
auto childInode = inodeFromUserPath(
mountHandle.getEdenMount(),
childPath.asString(),
fetchContext);
childInode->forceMetadataUpdate();
childInvalidations.push_back(
childInode->stat(fetchContext)
.thenValue(
[nfsChannel,
canonicalMountPoint,
childPath](struct stat&& stat) {
nfsChannel->invalidate(
canonicalMountPoint + childPath,
stat.st_mode);
return folly::Unit();
}));
}
return collectAll(std::move(childInvalidations))
.unit();
}
return folly::unit;
})
.thenTry([nfsChannel](folly::Try<folly::Unit> res) {
return nfsChannel->completeInvalidations().thenTry(
[res = std::move(res)](auto&&) mutable {
return res;
});
}))
.semi();
}
#else
auto toInvalidate = relpathFromUserPath(*path);
XLOG(WARN) << "Manually invalidating \"" << toInvalidate
<< "\". This is unsupported and may lead to strange behavior.";
if (auto* prjfsChannel = mountHandle.getEdenMount().getPrjfsChannel()) {
return makeImmediateFutureWith(
[&] { return prjfsChannel->removeCachedFile(toInvalidate); })
.semi();
}
#endif // !_WIN32
return EDEN_BUG_FUTURE(folly::Unit) << "Unsupported Channel type.";
}
void EdenServiceHandler::enableTracing() {
XLOG(INFO) << "Enabling tracing";
eden::enableTracing();
}
void EdenServiceHandler::disableTracing() {
XLOG(INFO) << "Disabling tracing";
eden::disableTracing();
}
void EdenServiceHandler::getTracePoints(std::vector<TracePoint>& result) {
auto compactTracePoints = getAllTracepoints();
for (auto& point : compactTracePoints) {
TracePoint tp;
tp.timestamp_ref() = point.timestamp.count();
tp.traceId_ref() = point.traceId;
tp.blockId_ref() = point.blockId;
tp.parentBlockId_ref() = point.parentBlockId;
if (point.name) {
tp.name_ref() = std::string(point.name);
}
if (point.start) {
tp.event_ref() = TracePointEvent::START;
} else if (point.stop) {
tp.event_ref() = TracePointEvent::STOP;
}
result.emplace_back(std::move(tp));
}
}
void EdenServiceHandler::getRetroactiveThriftRequestEvents(
GetRetroactiveThriftRequestEventsResult& result) {
if (!thriftRequestActivityBuffer_.has_value()) {
throw newEdenError(
ENOTSUP,
EdenErrorType::POSIX_ERROR,
"ActivityBuffer not initialized in thrift server.");
}
std::vector<ThriftRequestEvent> thriftEvents;
auto bufferEvents = thriftRequestActivityBuffer_->getAllEvents();
thriftEvents.reserve(bufferEvents.size());
for (auto const& event : bufferEvents) {
ThriftRequestEvent thriftEvent;
convertThriftRequestTraceEventToThriftRequestEvent(event, thriftEvent);
thriftEvents.emplace_back(std::move(thriftEvent));
}
result.events() = std::move(thriftEvents);
}
void EdenServiceHandler::getRetroactiveHgEvents(
GetRetroactiveHgEventsResult& result,
std::unique_ptr<GetRetroactiveHgEventsParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
auto backingStore = mountHandle.getObjectStore().getBackingStore();
std::shared_ptr<SaplingBackingStore> saplingBackingStore =
castToSaplingBackingStore(
backingStore, mountHandle.getEdenMount().getPath());
std::vector<HgEvent> thriftEvents;
auto bufferEvents = saplingBackingStore->getActivityBuffer().getAllEvents();
thriftEvents.reserve(bufferEvents.size());
for (auto const& event : bufferEvents) {
HgEvent thriftEvent{};
convertHgImportTraceEventToHgEvent(
event, *server_->getServerState()->getProcessInfoCache(), thriftEvent);
thriftEvents.emplace_back(std::move(thriftEvent));
}
result.events() = std::move(thriftEvents);
}
void EdenServiceHandler::getRetroactiveInodeEvents(
GetRetroactiveInodeEventsResult& result,
std::unique_ptr<GetRetroactiveInodeEventsParams> params) {
auto mountHandle = lookupMount(params->mountPoint());
if (!mountHandle.getEdenMount().getActivityBuffer().has_value()) {
throw newEdenError(
ENOTSUP,
EdenErrorType::POSIX_ERROR,
"ActivityBuffer not initialized in EdenFS mount.");
}
std::vector<InodeEvent> thriftEvents;
auto bufferEvents =
mountHandle.getEdenMount().getActivityBuffer()->getAllEvents();
thriftEvents.reserve(bufferEvents.size());
for (auto const& event : bufferEvents) {
InodeEvent thriftEvent{};
ConvertInodeTraceEventToThriftInodeEvent(event, thriftEvent);
thriftEvent.path() = event.getPath();
thriftEvents.emplace_back(std::move(thriftEvent));
}
result.events() = std::move(thriftEvents);
}
namespace {
std::optional<folly::exception_wrapper> getFaultError(
apache::thrift::optional_field_ref<std::string&> errorType,
apache::thrift::optional_field_ref<std::string&> errorMessage) {
if (!errorType.has_value() && !errorMessage.has_value()) {
return std::nullopt;
}
auto createException =
[](StringPiece type, const std::string& msg) -> folly::exception_wrapper {
if (type == "runtime_error") {
return std::runtime_error(msg);
} else if (type.startsWith("errno:")) {
auto errnum = folly::to<int>(type.subpiece(6));
return std::system_error(errnum, std::generic_category(), msg);
} else if (type == "quiet") {
return QuietFault(msg);
}
// If we want to support other error types in the future they should
// be added here.
throw newEdenError(
EdenErrorType::GENERIC_ERROR, "unknown error type ", type);
};
return createException(
errorType.value_or("runtime_error"),
errorMessage.value_or("injected error"));
}
} // namespace
void EdenServiceHandler::injectFault(unique_ptr<FaultDefinition> fault) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto& injector = server_->getServerState()->getFaultInjector();
if (*fault->block_ref()) {
injector.injectBlock(
*fault->keyClass_ref(),
*fault->keyValueRegex_ref(),
*fault->count_ref());
return;
}
if (*fault->kill()) {
injector.injectKill(
*fault->keyClass(), *fault->keyValueRegex(), *fault->count());
return;
}
auto error = getFaultError(fault->errorType_ref(), fault->errorMessage_ref());
std::chrono::milliseconds delay(*fault->delayMilliseconds_ref());
if (error.has_value()) {
if (delay.count() > 0) {
injector.injectDelayedError(
*fault->keyClass_ref(),
*fault->keyValueRegex_ref(),
delay,
error.value(),
*fault->count_ref());
} else {
injector.injectError(
*fault->keyClass_ref(),
*fault->keyValueRegex_ref(),
error.value(),
*fault->count_ref());
}
} else {
if (delay.count() > 0) {
injector.injectDelay(
*fault->keyClass_ref(),
*fault->keyValueRegex_ref(),
delay,
*fault->count_ref());
} else {
injector.injectNoop(
*fault->keyClass_ref(),
*fault->keyValueRegex_ref(),
*fault->count_ref());
}
}
}
bool EdenServiceHandler::removeFault(unique_ptr<RemoveFaultArg> fault) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto& injector = server_->getServerState()->getFaultInjector();
return injector.removeFault(
*fault->keyClass_ref(), *fault->keyValueRegex_ref());
}
int64_t EdenServiceHandler::unblockFault(unique_ptr<UnblockFaultArg> info) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto& injector = server_->getServerState()->getFaultInjector();
auto error = getFaultError(info->errorType_ref(), info->errorMessage_ref());
if (!info->keyClass_ref().has_value()) {
if (info->keyValueRegex_ref().has_value()) {
throw newEdenError(
EINVAL,
EdenErrorType::ARGUMENT_ERROR,
"cannot specify a key value regex without a key class");
}
if (error.has_value()) {
return injector.unblockAllWithError(error.value());
} else {
return injector.unblockAll();
}
}
const auto& keyClass = info->keyClass_ref().value();
std::string keyValueRegex = info->keyValueRegex_ref().value_or(".*");
if (error.has_value()) {
return injector.unblockWithError(keyClass, keyValueRegex, error.value());
} else {
return injector.unblock(keyClass, keyValueRegex);
}
}
void EdenServiceHandler::getBlockedFaults(
GetBlockedFaultsResponse& out,
std::unique_ptr<GetBlockedFaultsRequest> request) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG2);
auto& injector = server_->getServerState()->getFaultInjector();
auto result = injector.getBlockedFaults(*request->keyclass_ref());
out.keyValues() = std::move(result);
}
void EdenServiceHandler::reloadConfig() {
auto helper = INSTRUMENT_THRIFT_CALL(INFO);
server_->reloadConfig();
}
void EdenServiceHandler::fillDaemonInfo(DaemonInfo& info) {
fb303::cpp2::fb303_status status = [&] {
switch (server_->getStatus()) {
case EdenServer::RunState::STARTING:
return facebook::fb303::cpp2::fb303_status::STARTING;
case EdenServer::RunState::RUNNING:
return facebook::fb303::cpp2::fb303_status::ALIVE;
case EdenServer::RunState::SHUTTING_DOWN:
return facebook::fb303::cpp2::fb303_status::STOPPING;
}
EDEN_BUG() << "unexpected EdenServer status "
<< enumValue(server_->getStatus());
}();
info.pid_ref() = ProcessId::current().get();
info.commandLine_ref() = originalCommandLine_;
info.status_ref() = status;
auto now = std::chrono::steady_clock::now();
std::chrono::duration<float> uptime = now - server_->getStartTime();
info.uptime_ref() = uptime.count();
}
void EdenServiceHandler::getDaemonInfo(DaemonInfo& result) {
auto helper = INSTRUMENT_THRIFT_CALL(DBG4);
fillDaemonInfo(result);
}
apache::thrift::ResponseAndServerStream<DaemonInfo, std::string>
EdenServiceHandler::streamStartStatus() {
DaemonInfo result;
fillDaemonInfo(result);
if (result.status() != facebook::fb303::cpp2::fb303_status::STARTING) {
return {
result,
apache::thrift::ServerStream<EdenStartStatusUpdate>::createEmpty()};
}
try {
auto serverStream = server_->createStartupStatusThriftStream();
return {std::move(result), std::move(serverStream)};
} catch (EdenError& error) {
if (error.errorType() == EdenErrorType::POSIX_ERROR &&
error.errorCode() == EALREADY) {
// We raced with eden start completing. Let's re-collect the status and
// return as if EdenFS has completed. The EdenFS status should be set
// before the startup logger completes, so at this point the status
// should be something other than starting. Client should not necessarily
// rely on this though.
fillDaemonInfo(result);
return {
result,
apache::thrift::ServerStream<EdenStartStatusUpdate>::createEmpty()};
}
throw;
}
}
void EdenServiceHandler::checkPrivHelper(PrivHelperInfo& result) {
auto privhelper = server_->getServerState()->getPrivHelper();
result.connected_ref() = privhelper->checkConnection();
}
int64_t EdenServiceHandler::getPid() {
return ProcessId::current().get();
}
void EdenServiceHandler::getCheckoutProgressInfo(
CheckoutProgressInfoResponse& ret,
unique_ptr<CheckoutProgressInfoRequest> params) {
auto mountPath = absolutePathFromThrift(*params->mountPoint());
auto mountHandle = server_->getMount(mountPath);
auto checkoutProgress = mountHandle.getEdenMount().getCheckoutProgress();
if (checkoutProgress.has_value()) {
CheckoutProgressInfo progressInfoRet;
progressInfoRet.updatedInodes_ref() = std::move(checkoutProgress.value());
ret.checkoutProgressInfo_ref() = std::move(progressInfoRet);
} else {
ret.set_noProgress();
}
}
void EdenServiceHandler::initiateShutdown(std::unique_ptr<std::string> reason) {
auto helper = INSTRUMENT_THRIFT_CALL(INFO);
XLOG(INFO) << "initiateShutdown requested, reason: " << *reason;
server_->stop();
}
void EdenServiceHandler::getConfig(
EdenConfigData& result,
unique_ptr<GetConfigParams> params) {
auto state = server_->getServerState();
auto config = state->getEdenConfig(*params->reload_ref());
result = config->toThriftConfigData();
}
OptionalProcessId EdenServiceHandler::getAndRegisterClientPid() {
#ifndef _WIN32
// The Cpp2RequestContext for a thrift request is kept in a thread local
// on the thread which the request originates. This means this must be run
// on the Thread in which a thrift request originates.
auto connectionContext = getRequestContext();
// if connectionContext will be a null pointer in an async method, so we
// need to check for this
if (connectionContext) {
if (auto peerCreds = connectionContext->getConnectionContext()
->getPeerEffectiveCreds()) {
pid_t clientPid = peerCreds->pid;
server_->getServerState()->getProcessInfoCache()->add(clientPid);
return ProcessId(clientPid);
}
}
return std::nullopt;
#else
// Unix domain sockets on Windows don't support peer credentials.
return std::nullopt;
#endif
}
} // namespace facebook::eden
```
|
```xml
import * as fs from 'fs-extra';
import * as path from 'path';
import '../../common/extensions';
import { ICommandManager } from '../../common/application/types';
import { IDisposable, Resource } from '../../common/types';
import { debounceSync } from '../../common/utils/decorators';
import { EXTENSION_ROOT_DIR } from '../../constants';
import { IServiceContainer } from '../../ioc/types';
import { PythonEnvironment } from '../../pythonEnvironments/info';
import { captureTelemetry } from '../../telemetry';
import { EventName } from '../../telemetry/constants';
import { Commands } from '../commands';
import { JediLanguageClientMiddleware } from './languageClientMiddleware';
import { ILanguageServerAnalysisOptions, ILanguageServerManager, ILanguageServerProxy } from '../types';
import { traceDecoratorError, traceDecoratorVerbose, traceVerbose } from '../../logging';
export class JediLanguageServerManager implements ILanguageServerManager {
private resource!: Resource;
private interpreter: PythonEnvironment | undefined;
private middleware: JediLanguageClientMiddleware | undefined;
private disposables: IDisposable[] = [];
private static commandDispose: IDisposable;
private connected = false;
private lsVersion: string | undefined;
constructor(
private readonly serviceContainer: IServiceContainer,
private readonly analysisOptions: ILanguageServerAnalysisOptions,
private readonly languageServerProxy: ILanguageServerProxy,
commandManager: ICommandManager,
) {
if (JediLanguageServerManager.commandDispose) {
JediLanguageServerManager.commandDispose.dispose();
}
JediLanguageServerManager.commandDispose = commandManager.registerCommand(Commands.RestartLS, () => {
this.restartLanguageServer().ignoreErrors();
});
}
private static versionTelemetryProps(instance: JediLanguageServerManager) {
return {
lsVersion: instance.lsVersion,
};
}
public dispose(): void {
this.stopLanguageServer().ignoreErrors();
JediLanguageServerManager.commandDispose.dispose();
this.disposables.forEach((d) => d.dispose());
}
@traceDecoratorError('Failed to start language server')
public async start(resource: Resource, interpreter: PythonEnvironment | undefined): Promise<void> {
this.resource = resource;
this.interpreter = interpreter;
this.analysisOptions.onDidChange(this.restartLanguageServerDebounced, this, this.disposables);
try {
// Version is actually hardcoded in our requirements.txt.
const requirementsTxt = await fs.readFile(
path.join(EXTENSION_ROOT_DIR, 'python_files', 'jedilsp_requirements', 'requirements.txt'),
'utf-8',
);
// Search using a regex in the text
const match = /jedi-language-server==([0-9\.]*)/.exec(requirementsTxt);
if (match && match.length === 2) {
[, this.lsVersion] = match;
}
} catch (ex) {
// Getting version here is best effort and does not affect how LS works and
// failing to get version should not stop LS from working.
traceVerbose('Failed to get jedi-language-server version: ', ex);
}
await this.analysisOptions.initialize(resource, interpreter);
await this.startLanguageServer();
}
public connect(): void {
if (!this.connected) {
this.connected = true;
this.middleware?.connect();
}
}
public disconnect(): void {
if (this.connected) {
this.connected = false;
this.middleware?.disconnect();
}
}
@debounceSync(1000)
protected restartLanguageServerDebounced(): void {
this.restartLanguageServer().ignoreErrors();
}
@traceDecoratorError('Failed to restart language server')
@traceDecoratorVerbose('Restarting language server')
protected async restartLanguageServer(): Promise<void> {
await this.stopLanguageServer();
await this.startLanguageServer();
}
@captureTelemetry(
EventName.JEDI_LANGUAGE_SERVER_STARTUP,
undefined,
true,
undefined,
JediLanguageServerManager.versionTelemetryProps,
)
@traceDecoratorVerbose('Starting language server')
protected async startLanguageServer(): Promise<void> {
const options = await this.analysisOptions.getAnalysisOptions();
this.middleware = new JediLanguageClientMiddleware(this.serviceContainer, this.lsVersion);
options.middleware = this.middleware;
// Make sure the middleware is connected if we restart and we we're already connected.
if (this.connected) {
this.middleware.connect();
}
// Then use this middleware to start a new language client.
await this.languageServerProxy.start(this.resource, this.interpreter, options);
}
@traceDecoratorVerbose('Stopping language server')
protected async stopLanguageServer(): Promise<void> {
if (this.languageServerProxy) {
await this.languageServerProxy.stop();
}
}
}
```
|
```objective-c
/*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RunLoopTimer_h
#define RunLoopTimer_h
#include <wtf/SchedulePair.h>
#include <wtf/RetainPtr.h>
namespace WTF {
// Time intervals are all in seconds.
class WTF_EXPORT_PRIVATE RunLoopTimerBase {
WTF_MAKE_NONCOPYABLE(RunLoopTimerBase);
public:
RunLoopTimerBase() { }
WTF_EXPORT_PRIVATE virtual ~RunLoopTimerBase();
WTF_EXPORT_PRIVATE void schedule(const SchedulePair*);
WTF_EXPORT_PRIVATE void schedule(const SchedulePairHashSet&);
WTF_EXPORT_PRIVATE void start(double nextFireInterval, double repeatInterval);
void startRepeating(double repeatInterval) { start(repeatInterval, repeatInterval); }
void startOneShot(double interval) { start(interval, 0); }
WTF_EXPORT_PRIVATE void stop();
bool isActive() const;
virtual void fired() = 0;
private:
#if USE(CF)
RetainPtr<CFRunLoopTimerRef> m_timer;
#endif
};
// FIXME: This doesn't have to be a class template.
template <typename TimerFiredClass> class RunLoopTimer : public RunLoopTimerBase {
public:
typedef void (TimerFiredClass::*TimerFiredFunction)();
RunLoopTimer(TimerFiredClass& o, TimerFiredFunction f)
: m_object(&o), m_function(f) { }
virtual void fired() { (m_object->*m_function)(); }
private:
TimerFiredClass* m_object;
TimerFiredFunction m_function;
};
} // namespace WTF
using WTF::RunLoopTimer;
#endif
```
|
McKenzie College, also called McKenzie's College, was a private college located on the plantation of Reverend John W. P. McKenzie, a Methodist minister, in Clarksville, Texas, United States. Starting in 1841, the school grew from 16 students educated in a log cabin to over 300 students and 9 faculty members occupying four large buildings in 1854. It was the largest institution of higher education in Texas during the 1850s and 1860s. Before the American Civil War began, it trained almost all of the new Methodist ministers in the state. Unable to retain financial support after the War, Rev. McKenzie closed the school in June, 1868. He served for a year as the first president of another Methodist school, Marvin College in Waxahachie, Texas, then completely retired from church-related work. He died in Clarksville on June 20, 1881.
History
John Witherspoon Pettigrew McKenzie (most often abbreviated to John W. P. McKenzie) was a native of North Carolina who was admitted to the Tennessee Conference of the Methodist Episcopal Church in 1836. He was soon transferred into the Arkansas Conference, which had been established on the Texas frontier. This conference assigned John and his wife Matilda as missionaries to the people of the Choctaw Nation, who had just been removed from their homes in various Southeastern states to the Indian Territory., p. 13. In 1839, the conference appointed John and Matilda to the Clarksville Circuit inside the Republic of Texas. In 1836, Matilda became a teacher in the first school for girls opened in the Choctaw Nation of Indian Territory (near Shawneetown in present-day McCurtain County, Oklahoma.)
Suffering from poor health, Reverend John W. P. McKenzie retired from missionary work in 1841 and moved to a plantation southwest of Clarksville, Texas. He began offering classes for local boys in his home, which he named Itinerant's Retreat. As the school grew, a separate log cabin was built. Most of the first students were receiving primary and secondary education, although the school awarded its first bachelor's degree in 1844. By 1845, the school was divided into three departments: preparatory, collegiate, and female. Four buildings were built in 1853 to accommodate the school's rapid growth.
McKenzie, and most of the school's other instructors, were Methodists, but did not pressure their students who adhered to other denominations to become Methodists. For example, B. F. Fuller, a Baptist, admitted later that he was nervous about enrolling in McKenzie College and the difficulty he anticipated having there because of doctrinal differences. The experience allayed his fear. He not only continued his education there, but a few years later, still a Baptist, became a teacher at McKenzie., p. 13.
Always considered a Methodist institution, McKenzie's school was actually controlled by the Methodist Conference for one year, After 1860, the college formed a military department that organized all male students into companies, in which they performed daily drills. In June 1861, McKenzie cancelled the final examinations and the graduation ceremonies that normally ended the school year.
By 1863, the enrollment had dropped to 33, and rose to an average of 74 for the period 1864–67. Unable to keep the institution financially independent, McKenzie and his son-in-law, Smith Ragsdale, closed it on June 25, 1868.
Giving up on restoring his school, McKenzie accepted an appointment as the first president of the Church-owned Marvin College in Waxahachie, Texas, but served for only one year, 1868–9. (Marvin College closed in 1878.) McKenzie returned to Clarksville and retired. He died there early in the morning of Monday, June 20, 1881, just after his school's regular morning prayers.
Student life
The Handbook of Texas states that about half of the students came from the Red River area, 40 percent came from other parts of Texas and 10 percent came from Arkansas and Louisiana. The school year was 10 months long, and the cost for 10 months tuition, board, room and laundry was $180.
Initially, the student body was all male. It is unclear when the first females were admitted, but Jordan notes that during the prosperous 1845-1860 period the school had two competing newspapers: The Bee, which was run by young women, and The Owl, run by young men. A third paper, The School Monthly, began publication somewhat later.
The college was home to two literary societies, the Philologian Society and Dialectic Society.
Female students were enrolled in the Female Department, which was supervised by the founder's wife. Their academic curriculum was substantially the same as that required for the young men, but upon graduation, they were awarded diplomas, rather than degrees.
Facilities
McKenzie College first opened in the fall of 1841, with 16 to 18 students at the high school or elementary levels. Classes met in a single log cabin that was by in size. By 1853–4, the school had expanded into what was described by one student as "... four large, multi-story buildings." The McKenzie's home also served as a dormitory for young women. Two of the new buildings, named Grant and Duke, to honor their donors, became dormitories for male students. Two floors of the Main Building contained the chapel, offices, recitation rooms, laboratory, and library, as well as a book store. The third floor was used by the literary societies for their meetings, debates, and oratory., p. 15.
Notable people
Martha E. Whitten (1842–1917), author
Notes
References
Defunct private universities and colleges in Texas
Educational institutions established in 1841
Southwestern University
Education in Red River County, Texas
1841 establishments in the Republic of Texas
1868 disestablishments in Texas
|
The 1950 Michigan State Normal Hurons football team represented Michigan State Normal College (later renamed Eastern Michigan University) in the Interstate Intercollegiate Athletic Conference (IIAC) during the 1950 college football season. In their second season under head coach Harry Ockerman, the Hurons compiled a 3–6 record (0–4 against IIAC opponents) and were outscored by their opponents, 194 to 123. Dr. James R. Wichterman was the team captain. Harry Mail was selected as a first-team player on the All-IIAC team.
Schedule
See also
1950 in Michigan
References
Michigan State Normal
Eastern Michigan Eagles football seasons
Michigan State Normal Hurons football
|
```smalltalk
using System.Collections.Immutable;
using System.Diagnostics;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
using Microsoft.CodeAnalysis.Diagnostics;
using Roslynator.CSharp.Syntax;
namespace Roslynator.CSharp.Analysis;
[DiagnosticAnalyzer(LanguageNames.CSharp)]
public sealed class RemoveRedundantSealedModifierAnalyzer : BaseDiagnosticAnalyzer
{
private static ImmutableArray<DiagnosticDescriptor> _supportedDiagnostics;
public override ImmutableArray<DiagnosticDescriptor> SupportedDiagnostics
{
get
{
if (_supportedDiagnostics.IsDefault)
Immutable.InterlockedInitialize(ref _supportedDiagnostics, DiagnosticRules.RemoveRedundantSealedModifier);
return _supportedDiagnostics;
}
}
public override void Initialize(AnalysisContext context)
{
base.Initialize(context);
context.RegisterSymbolAction(f => AnalyzeMethod(f), SymbolKind.Method);
context.RegisterSymbolAction(f => AnalyzeProperty(f), SymbolKind.Property);
}
private static void AnalyzeMethod(SymbolAnalysisContext context)
{
ISymbol symbol = context.Symbol;
if (((IMethodSymbol)symbol).MethodKind != MethodKind.Ordinary)
return;
Analyze(context, symbol);
}
private static void AnalyzeProperty(SymbolAnalysisContext context)
{
Analyze(context, context.Symbol);
}
private static void Analyze(SymbolAnalysisContext context, ISymbol symbol)
{
if (symbol.IsImplicitlyDeclared)
return;
if (!symbol.IsSealed)
return;
if (symbol.ContainingType?.IsSealed != true)
return;
Debug.Assert(symbol.ContainingType.TypeKind == TypeKind.Class, symbol.ContainingType.TypeKind.ToString());
SyntaxNode node = symbol.GetSyntax(context.CancellationToken);
SyntaxDebug.Assert(node.IsKind(SyntaxKind.MethodDeclaration, SyntaxKind.PropertyDeclaration, SyntaxKind.IndexerDeclaration), node);
ModifierListInfo info = SyntaxInfo.ModifierListInfo(node);
Debug.Assert(info.IsSealed, info.Modifiers.ToString());
if (!info.IsSealed)
return;
SyntaxToken sealedKeyword = info.Modifiers.Find(SyntaxKind.SealedKeyword);
DiagnosticHelpers.ReportDiagnostic(context, DiagnosticRules.RemoveRedundantSealedModifier, sealedKeyword);
}
}
```
|
```python
import cv2
import numpy as np
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
def region_of_interest(img):
"""
Applies an image mask. From Udacity Self-Driving Car Nanodegree project 1
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
vert=np.array([[(0,0), (0,340),(260,250), (530,250),(640,290), (640,0)]], dtype=np.int32)
ignore_mask_color = (255,) * 3
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vert, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
img=region_of_interest(img)
#Use only top part of image
img=img[0:340,:,:]
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
```
|
Arno Breker (19 July 1900 – 13 February 1991) was a German architect and sculptor who is best known for his public works in Nazi Germany, where they were endorsed by the authorities as the antithesis of degenerate art. He was made official state sculptor, and exempted from military service. One of his better known statues is Die Partei, representing the spirit of the Nazi Party that flanked one side of the carriage entrance to Albert Speer's new Reich Chancellery.
After the fall of Nazi Germany in 1945 Breker continued to thrive professionally as a sculptor in the new West Germany.
Life
Breker was born in Elberfeld, in the west of Germany, the son of stonemason Arnold Breker. He began to study architecture, along with stone-carving and anatomy. At age 20 he entered the Düsseldorf Academy of Arts where he concentrated on sculpture, studying under Hubert Netzer and Wilhelm Kreis. He first visited Paris in 1924, shortly before finishing his studies. There he met with Jean Cocteau, Jean Renoir, Pablo Picasso, Daniel-Henry Kahnweiler, and Alfred Flechtheim. In 1927 he moved to Paris, which he thereafter considered to be his home, in the same year he had an exhibition with Alf Bayrle. Breker was quickly accepted by the art dealer Alfred Flechtheim. He also established close relationships with important figures in the art world, including Charles Despiau, Isamu Noguchi, Maurice de Vlaminck and André Dunoyer de Segonzac, all of whom he later portrayed. He travelled to North Africa, producing lithographs which he published under the title "Tunisian Journey". He also visited Aristide Maillol, who was later to describe Breker as "Germany's Michelangelo".
In 1932, he was awarded a prize by the Prussian Ministry of Culture, which allowed him to stay in Rome for a year. In 1934 he returned to Germany on the advice of Max Liebermann. At this time Alfred Rosenberg, editor of the Nazi newspaper Völkischer Beobachter, actually denounced some of Breker's work as degenerate art. However, Breker was supported by many Nazi leaders, especially Adolf Hitler. Even Rosenberg later hailed his sculptures as expressions of the "mighty momentum and will power" ("Wucht und Willenhaftigkeit") of Nazi Germany. He took commissions from the Nazis from 1933 through 1942, for example participating in a show of his work in occupied Paris in 1942, where he met Jean Cocteau, who appreciated his work. He maintained personal relationships with Albert Speer and with Hitler. In 1936 he won the commission for two sculptures representing athletic prowess, to be entered in the 1936 Olympic games arts competition in Berlin, one representing a Decathlete ("Zehnkämpfer"), which won the silver medal for statues, and the other The Victress ("Die Siegerin"). In 1937 he married Demetra Messala (Δήμητρα Μεσσάλα), a Greek model. The same year, Breker joined the Nazi Party and was made "official state sculptor" by Hitler, given a large property and provided a studio with forty-three assistants. Breker was on a list of 378 "Gottbegnadeten" (divinely gifted) artists exempted from wartime military duty by Hitler and chief propagandist Joseph Goebbels. His twin sculptures The Party and The Army held a prominent position at the entrance to Albert Speer's new Reich Chancellery, as well as Josef Thorak's "Striding Horses" (1939), which until 1945 flanked the entrance stairs on the garden front of Adolf Hitler's Reich Chancellery in Berlin.
The neoclassical nature of his work, with titles like Comradeship, Torchbearer, and Sacrifice, typified Nazi ideals, and suited the characteristics of Nazi architecture. On closer inspection, though, the proportions of his figures, the highly colouristic treatment of his surfaces (the strong contrasts between dark and light accents), and the melodramatic tension of their musculatures perhaps invites comparison with the Italian Mannerist sculptors of the 16th century. This Mannerist tendency to Breker's neoclassicism may suggest closer affinities to concurrent expressionist tendencies in German Modernism than is acknowledged.
Until the fall of the Third Reich, Breker was a professor of visual arts in Berlin.
Post-Nazi career
Ninety percent of Breker's public works were destroyed during the bombings of Germany toward the end of the war. In 1946, Breker was offered a commission by Soviet leader Joseph Stalin, but he refused, saying "One dictatorship is sufficient for me". In 1948 Breker was designated as a "fellow traveller" of the Nazis and fired, despite which he continued to thrive professionally. He returned to Düsseldorf, now in the new West Germany, which remained his base, with periods of residence in Paris. During this time he worked as an architect. However, he continued to receive commissions for sculptures, producing a number of works in his familiar classical style, working for businesses and individual patrons. He also produced many portrait busts. In 1970 he was commissioned by the king of Morocco to produce work for the United Nations Building in Casablanca, but the work was destroyed. Many other works followed, including sculptures for Dusseldorf's city hall, portraits of Anwar Sadat and Konrad Adenauer, and a statue of Pallas Athene, helmeted and throwing a spear in the same bombastic style as his Nazi-era work. Breker's rehabilitation continued, culminating in the creation of a Breker museum, funded by the Bodenstein family, who set aside Schloss Nörvenich (between Aachen and Cologne) for the purpose. The Arno Breker Museum was inaugurated in 1985, and still open in 2021.
Breker's rehabilitation led to backlashes from anti-Nazi activists, including controversy in Paris when some of his works were exhibited at the Centre Georges Pompidou in 1981. In the same year anti-Breker demonstrations accompanied an exhibition in Berlin. Breker's admirers insisted that he had never been a supporter of Nazi ideology (despite being a member of the Nazi Party), but had simply accepted their patronage.
Breker's last major work was a monumental sculpture of Alexander the Great intended to be located in Greece.
Marriages and family
Arno Breker was married twice. His first wife, Demetra Messala, was a Greek model. She died in 1956 in a car accident. He remarried in 1958 to Charlotte Kluge. They had two children, Gerhart (1959) and Carola (1962). Breker remained married to Kluge until his death in 1991.
Portraits (mostly in bronze)
Baron von Mirbach, 1920
Friedrich Ebert, Berlin 1924 (erster Staatsauftrag)
Walter Kaesbach, Düsseldorf, 1925
Artur Kaufmann, 1925
Herbert Eulenberg, 1925/26
Otto Dix, Paris 1926/27
Isamu Noguchi, Paris 1927
Hermann Kesser, 1927
Moissey Kogan, Paris 1927/28
Inge Davemann, 1928
Albert Lindgens, 1928
Walter Lindgens, 1928
Illa Fudickar, 1929
Robert Gerling, 1929
Arnold von Guilleaume, 1929
Jean Marchand, 1929
Mossey Kogan, 1929
H. R. von Langen, 1929
Alberto Giacometti
Isolde von Conta, 1930
Abraham Frohwein, 1930
Heinrich Heine, 1930
Edith Arnthal, 1930/31
Demetra Breker, 1931
Nico Mazaraki, 1931
Robert Valancey, Paris 1931
Prince Georg of Bavaria, 1932
Andreas von Siemens, Berlin 1932
Nina Bausch, 1933
Demetra Breker, 1933
Olga von Dahlgreen, 1933
Arthur Kampf, 1933
Victor Manheimer, 1933
Nora von Schnitzler, 1933
Robert de Valencay, 1933
Max Liebermann, 1934
Gottfried Bermann Fischer, 1934
Max Baldner, 1934
Kurt Edzard, 1934
Graf von Luckner, 1934
Anne-Marie Merkel, 1934/35
Pütze von Siemens, 1934/35
Kurt Edzard, 1935
Anne-Marie Merkel, 1935
Pütze von Siemens, 1935/36
Carl Friedrich von Siemens, 1936
Leo von König, 1936
Joseph Goebbels, 1937
Paul von Hindenburg, 1937
Wolfgang Reindl, 1938
Adolf Hitler, 1938
Richard Wagner, 1939
Gerda Bormann (wife of Martin Bormann), 1940
Edda Göring (daughter of Hermann Göring), 1941
Albert Speer, 1941
Margarete Speer (wife of Albert Speer), 1941
Bernhard Rust
Erika Baeumker (wife of Adolf Baeumker), approx 1941
Gerhart Hauptmann, 1942
Serge Lifar, 1942/43
Aristide Maillol, 1942/43
Alfred Cortot, 1942/43
Abel Bonnard, 1943
Wilhelm Kreis, 1943
Maurice de Vlaminck, 1943
Claude Flammarion, 1944
Gottfried Ude-Bernays, 1945
Johannes Bork, 1946
Lothar Albano Müller, 1950
Ludwig Hoelscher, 1952
Gustav Lindemann, 1952
Wilhelm Kempff, 1953
Emperor Haile Selassie I of Ethiopia, 1955
Rolf Gerling, 1956
Hans Gerling
Friedrich Sieburg, 1961
Jean Cocteau, 1963
Jean Marais, 1963
Henry de Montherlant, 1964
Marcel Pagnol, 1964
Roger Peyrefitte, 1964
Jeanne Castel, 1964
Paul Morand, 1965
Jacques Benoist-Méchin, 1965
Henry Picker
André Dunoyer de Segonzac, 1966
Marcel Midy
Ezra Pound, 1967
King Mohammed V of Morocco
Princess Ira von Fürstenberg
Louis-Ferdinand Céline, 1970
Salvador Dalí, 1974/75
Ernst Fuchs, 1976/77
Leopold Sedar Senghor, 1978
Anwar El Sadat, 1980
Ernst Jünger, 1981/82
Richard Wagner, Cosima Wagner, Franz Liszt, 1982
Heinrich Heine, 1983
Peter und Irene Ludwig, 1986/1987
Gerhard Hauptmann, 1988
Arno Breker, Selfportrait, 1991
Sculptures 1935–1945
Prometheus (1935)
Relief am Gebäude der Lebensversicherung Nordstern, Berlin (1936)
Der Zehnkämpfer fürs Olympia-Stadion, Berlin (1936, Silver medal)
Die Siegerin fürs Olympia-Stadion, Berlin (1936)
Dionysos fürs Olympia-Dorf, Berlin (1936)
Der Verwundete (1938)
Der Rosseführer (1938)
Anmut (1938)
Fackelträger („Die Partei") im Hof der Neuen Reichskanzlei (1939)
Schwertträger („Die Wehrmacht") im Hof der Neuen Reichskanzlei (1939)
Schreitende Pferde, Gartenfront, Neue Reichskanzlei (1939)
Der Künder (1939)
Der Wäger (1939)
Bereitschaft (1939)
Der Rächer (1940)
Kameraden (1940), Breker-Museum
Bannerträger (1940)
Abschied (1940)
Vernichtung (1940)
Opfer (1940)
Schreitende (1940)
Der Wächter (1941)
Psyche (1941)
Berufung (1941)
Der Sieger (1942)
Kniende (1942)
Eos (1942)
Flora (1943)
Heros (1943)
Reliefs
Der Genius (1938)
Der Kämpfer (1938)
Apollo und Daphne
Auszug zum Kampf (1941)
Aufbruch der Kämpfer (1940/41)
Der Rufer (1941)
Orpheus and Eurydice (1944, Breker-Museum)
Books by Breker
1983 – Schriften ("Writings") Bonn: Marco-Edition .
1987 – Begegnungen und Betrachtungen ("Encounters and Reflections") Bonn: Marco-Edition .
2000 – Über allem Schönheit ("Above All Beauty") Arnshaugk.
Films and videos
Arno Breker – Harte Zeit, starke Kunst, by Arnold Fanck, Hans Cürlis, Riefenstahl-Film GmbH, Berlin (1944)
Arno Breker – Skulpturen und Musik, by Marco J. Bodenstein, 20 minutes, Marco-Edition Bonn.
Arno Breker – Deutsche Lebensläufe, Farbfilm 60 minutes, Marco-VG, Bonn.
Paris-Rom-Berlin und Arno Breker, and Interview with Albert Speer. Farbfilm, 60 minutes, EKS Museum Europäische Kunst, Schloss 52388 Nörvenich.
Zeit der Götter (1992)
See also
Art of the Third Reich
Chantons sous l'Occupation (documentary film)
Conrad Hommel
Nazi architecture
Werner Peiner
Josef Thorak
Adolf Wissel
References
Notes
Further reading
Bodenstein, Joe F. (2016). Arno Breker – une biographie. Paris: Èditions Séguier Paris.
Despiau, Charles (1942). Arno Breker. Paris: Edition Flammarion.
Egret, Dominique (1997). Arno Breker: Ein Leben für das Schöne. Berlin: Grabert Verlag. .
Hirlé, Ronald (2010). Arno Breker – Sculpteur – Dessinateur – Architecte. Strasbourg and Paris: Editions Hirlè.
Klier, Hans (1978). Arno Breker – Form und Schönheit. Bonn: Salzburger Kulturvereinigung; Paris: Marco-Edition.
Leber, Hermann (1998). Rodin, Breker, Hrdlicka
Möller, Uwe (2000). Arno Breker – Zeichnungen-Drawings-Dessins 1927–1990. Bonn: Marco Edition
Peyrefitte, Roger (1980). Hommage an Arno Breker. Paris: Marco-Edition.
Probst, Volker G. (1981). Der Bildhauer Arno Breker – Eine Untersuchung. Paris: Marco-Edition .
Probst, Volker G. (1981). Das Bildnis des Menschen im Werk von Arno Breker Paris: Marco-Edition. .
Probst, Volker G. (1985). Das Pietà-Motiv bei Arno Breker. Paris: Marco-Edition.
Schilling, Rolf (1994). Eros und Ares – Begegnung mit Breker. Munich: Edition Arnshaugk
Trimborn, Jürgen (2011). Arno Breker. Der Künstler und die Macht. Berlin: Aufbau-Verlag
Zavrel, B. John (1985). Arno Breker – His Art and Life. New York: West Art.
Zavrel, B. John and Ludwig, Peter (1990). Arno Breker - The Collected Writings. New York: West Art; Paris: Marco-Edition.
Zavrel, B. John and Webb, Benjiman D. (1982). Arno Breker – The Divine Beauty in Art. New York: West Art.
External links
Web museum
Interview with Arno Breker conducted in 1979
Arno Breker Museum Official Site (in German)
Arno Breker Biography (in German)
Arno Breker Life, Work and Relationships with Modern Writers and Artists (in French)
Demetra Messala Article about Arno Breker's wife
Arno Breker Appreciation Group
1900 births
1991 deaths
People from Elberfeld
Nazi Party politicians
German sculptors
Modern sculptors
Artists from the Rhine Province
Olympic silver medalists in art competitions
20th-century German sculptors
20th-century German male artists
German male sculptors
Medalists at the 1936 Summer Olympics
Olympic competitors in art competitions
Architects from Wuppertal
|
```javascript
import assert from "node:assert"
import { setTimeout } from "node:timers/promises"
import { join } from "desm"
import { setup, teardown } from "../../../_testHelpers/index.js"
import { BASE_URL } from "../../../config.js"
describe("run mode with in-process", function desc() {
beforeEach(() =>
setup({
servicePath: join(import.meta.url),
}),
)
afterEach(() => teardown())
it("does not create a new lambda instance, instead uses same", async () => {
const url = new URL("/dev/foo", BASE_URL)
const responses = await Promise.all(
Array.from(new Array(10).keys()).map(() => fetch(url)),
)
responses.forEach((response) => {
assert.equal(response.status, 200)
})
const jsons = await Promise.all(
responses.map((response) => response.json()),
)
jsons.forEach((json) => {
assert.deepEqual(json, 10)
})
})
it("re-uses existing lambda instance when idle", async () => {
const url = new URL("/dev/foo", BASE_URL)
const results = []
// eslint-disable-next-line no-unused-vars
for (const _ of new Array(5)) {
// eslint-disable-next-line no-await-in-loop
await setTimeout(2000)
results.push(fetch(url))
}
const responses = await Promise.all(results)
responses.forEach((response) => {
assert.equal(response.status, 200)
})
const jsons = await Promise.all(
responses.map((response) => response.json()),
)
let sort = 0
jsons.sort().forEach((json) => {
sort += 1
assert.deepEqual(json, sort)
})
})
})
```
|
```c++
//
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// path_to_url
//
// See path_to_url for most recent version including documentation.
#ifndef BOOST_TT_HAS_GREATER_EQUAL_HPP_INCLUDED
#define BOOST_TT_HAS_GREATER_EQUAL_HPP_INCLUDED
#define BOOST_TT_TRAIT_NAME has_greater_equal
#define BOOST_TT_TRAIT_OP >=
#define BOOST_TT_FORBIDDEN_IF\
(\
/* Lhs==pointer and Rhs==fundamental */\
(\
::boost::is_pointer< Lhs_noref >::value && \
::boost::is_fundamental< Rhs_nocv >::value\
) || \
/* Rhs==pointer and Lhs==fundamental */\
(\
::boost::is_pointer< Rhs_noref >::value && \
::boost::is_fundamental< Lhs_nocv >::value\
) || \
/* Lhs==pointer and Rhs==pointer and Lhs!=base(Rhs) and Rhs!=base(Lhs) and Lhs!=void* and Rhs!=void* */\
(\
::boost::is_pointer< Lhs_noref >::value && \
::boost::is_pointer< Rhs_noref >::value && \
(! \
( \
::boost::is_base_of< Lhs_noptr, Rhs_noptr >::value || \
::boost::is_base_of< Rhs_noptr, Lhs_noptr >::value || \
::boost::is_same< Lhs_noptr, Rhs_noptr >::value || \
::boost::is_void< Lhs_noptr >::value || \
::boost::is_void< Rhs_noptr >::value\
)\
)\
) || \
(\
::boost::type_traits_detail::is_likely_stateless_lambda<Lhs_noref>::value\
)\
)
#include <boost/type_traits/detail/has_binary_operator.hpp>
#undef BOOST_TT_TRAIT_NAME
#undef BOOST_TT_TRAIT_OP
#undef BOOST_TT_FORBIDDEN_IF
#endif
```
|
```smalltalk
/* ====================================================================
contributor license agreements. See the NOTICE file distributed with
this work for Additional information regarding copyright ownership.
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
==================================================================== */
/* ================================================================
* About NPOI
* Author: Tony Qu
* Author's email: tonyqus (at) gmail.com
* Author's Blog: tonyqus.wordpress.com.cn (wp.tonyqus.cn)
* HomePage: path_to_url
* Contributors:
*
* ==============================================================*/
using System;
using System.Text;
using System.Collections;
using System.IO;
using NUnit.Framework;
using NPOI.POIFS.Common;
using NPOI.POIFS.Storage;
using NPOI.POIFS.Properties;
using TestCases.POIFS.Storage;
namespace TestCases.POIFS.Properties
{
/**
* Class to Test DocumentProperty functionality
*
* @author Marc Johnson
*/
[TestFixture]
public class TestDocumentProperty
{
/**
* Constructor TestDocumentProperty
*
* @param name
*/
public TestDocumentProperty()
{
}
/**
* Test constructing DocumentPropertys
*
* @exception IOException
*/
[Test]
public void TestConstructor()
{
// Test with short name, small file
VerifyProperty("foo", 1234);
// Test with just long enough name, small file
VerifyProperty("A.really.long.long.long.name123", 2345);
// Test with longer name, just small enough file
VerifyProperty("A.really.long.long.long.name1234", 4095);
// Test with just long enough file
VerifyProperty("A.really.long.long.long.name123", 4096);
}
/**
* Test Reading constructor
*
* @exception IOException
*/
[Test]
public void TestReadingConstructor()
{
string[] hexData = {
"52 00 6F 00 6F 00 74 00 20 00 45 00 6E 00 74 00 72 00 79 00 00 00 00 00 00 00 00 00 00 00 00 00",
"00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"16 00 05 01 FF FF FF FF FF FF FF FF 02 00 00 00 20 08 02 00 00 00 00 00 C0 00 00 00 00 00 00 46",
"00 00 00 00 00 00 00 00 00 00 00 00 C0 5C E8 23 9E 6B C1 01 FE FF FF FF 00 00 00 00 00 00 00 00",
"57 00 6F 00 72 00 6B 00 62 00 6F 00 6F 00 6B 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"12 00 02 01 FF FF FF FF FF FF FF FF FF FF FF FF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00",
"05 00 53 00 75 00 6D 00 6D 00 61 00 72 00 79 00 49 00 6E 00 66 00 6F 00 72 00 6D 00 61 00 74 00", //SummaryInformation
"69 00 6F 00 6E 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"28 00 02 01 01 00 00 00 03 00 00 00 FF FF FF FF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 00 00 10 00 00 00 00 00 00",
"05 00 44 00 6F 00 63 00 75 00 6D 00 65 00 6E 00 74 00 53 00 75 00 6D 00 6D 00 61 00 72 00 79 00", //DocumentSummaryInformation
"49 00 6E 00 66 00 6F 00 72 00 6D 00 61 00 74 00 69 00 6F 00 6E 00 00 00 00 00 00 00 00 00 00 00",
"38 00 02 01 FF FF FF FF FF FF FF FF FF FF FF FF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00",
"00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10 00 00 00 00 10 00 00 00 00 00 00",
};
byte[] input = RawDataUtil.Decode(hexData);
VerifyReadingProperty(1, input, 128, "Workbook");
VerifyReadingProperty(2, input, 256, "\x0005SummaryInformation");
VerifyReadingProperty(3, input, 384, "\x0005DocumentSummaryInformation");
}
private void VerifyReadingProperty(int index, byte[] input, int offset, string name)
{
DocumentProperty property = new DocumentProperty(index, input, offset);
MemoryStream stream = new MemoryStream(128);
byte[] expected = new byte[128];
Array.Copy(input, offset, expected, 0, 128);
property.WriteData(stream);
byte[] output = stream.ToArray();
Assert.AreEqual(128, output.Length);
for (int j = 0; j < 128; j++)
{
Assert.AreEqual(expected[j],
output[j], "mismatch at offset " + j);
}
Assert.AreEqual(index, property.Index);
Assert.AreEqual(name, property.Name);
}
private void VerifyProperty(String name, int size)
{
DocumentProperty property = new DocumentProperty(name, size);
if (size >= 4096)
{
Assert.IsTrue(!property.ShouldUseSmallBlocks);
}
else
{
Assert.IsTrue(property.ShouldUseSmallBlocks);
}
byte[] Testblock = new byte[128];
int index = 0;
for (; index < 0x40; index++)
{
Testblock[index] = (byte)0;
}
int limit = Math.Min(31, name.Length);
Testblock[index++] = (byte)(2 * (limit + 1));
Testblock[index++] = (byte)0;
Testblock[index++] = (byte)2;
Testblock[index++] = (byte)1;
for (; index < 0x50; index++)
{
Testblock[index] = (byte)0xFF;
}
for (; index < 0x78; index++)
{
Testblock[index] = (byte)0;
}
int sz = size;
Testblock[index++] = (byte)sz;
sz /= 256;
Testblock[index++] = (byte)sz;
sz /= 256;
Testblock[index++] = (byte)sz;
sz /= 256;
Testblock[index++] = (byte)sz;
for (; index < 0x80; index++)
{
Testblock[index] = (byte)0x0;
}
byte[] name_bytes = Encoding.UTF8.GetBytes(name);
for (index = 0; index < limit; index++)
{
Testblock[index * 2] = name_bytes[index];
}
MemoryStream stream = new MemoryStream(512);
property.WriteData(stream);
byte[] output = stream.ToArray();
Assert.AreEqual(Testblock.Length, output.Length);
for (int j = 0; j < Testblock.Length; j++)
{
Assert.AreEqual(Testblock[j],
output[j], "mismatch at offset " + j);
}
}
}
}
```
|
Gregory Park railway station opened in 1845 and closed in 1992. It served the Gregory Park sugar estate on the Kingston to Montego Bay line, from the Kingston terminus. It was destroyed by fire sometime after closure.
The station was a two-story, timber building The ground floor had timber doors and sash windows. The upper floor was partially cantilevered using a series of angled timber arms to the upper front elevation to form a canopy over the platform with a veranda above. The roof of the building was a T-shaped gable end.
Fares
In 1910 the third class fare from Gregory Park to Kingston was 6d (sixpence); first class was about double.
See also
Railway stations in Jamaica
External links
Aerial view of site.
Stamps and ticket: .
References
Railway stations in Jamaica
Buildings and structures in Saint Catherine Parish
Railway stations in Jamaica closed in 1992
Railway stations in Jamaica opened in 1845
|
```javascript
/*
* Project: Bootstrap Notify = v3.1.3
* Description: Turns standard Bootstrap alerts into "Growl-like" notifications.
* Author: Mouse0270 aka Robert McIntosh
* Website: path_to_url
*/
(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// Node/CommonJS
factory(require('jquery'));
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
// Create the defaults once
var defaults = {
element: 'body',
position: null,
type: "info",
allow_dismiss: true,
newest_on_top: false,
showProgressbar: false,
placement: {
from: "top",
align: "right"
},
offset: 20,
spacing: 10,
z_index: 1031,
delay: 5000,
timer: 1000,
url_target: '_blank',
mouse_over: null,
animate: {
enter: 'animated fadeInDown',
exit: 'animated fadeOutUp'
},
onShow: null,
onShown: null,
onClose: null,
onClosed: null,
icon_type: 'class',
template: '<div data-notify="container" class="col-xs-11 col-sm-4 alert alert-{0}" role="alert"><button type="button" aria-hidden="true" class="close" data-notify="dismiss">×</button><span data-notify="icon"></span> <span data-notify="title">{1}</span> <span data-notify="message">{2}</span><div class="progress" data-notify="progressbar"><div class="progress-bar progress-bar-{0}" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 0%;"></div></div><a href="{3}" target="{4}" data-notify="url"></a></div>'
};
String.format = function() {
var str = arguments[0];
for (var i = 1; i < arguments.length; i++) {
str = str.replace(RegExp("\\{" + (i - 1) + "\\}", "gm"), arguments[i]);
}
return str;
};
function Notify ( element, content, options ) {
// Setup Content of Notify
var content = {
content: {
message: typeof content == 'object' ? content.message : content,
title: content.title ? content.title : '',
icon: content.icon ? content.icon : '',
url: content.url ? content.url : '#',
target: content.target ? content.target : '-'
}
};
options = $.extend(true, {}, content, options);
this.settings = $.extend(true, {}, defaults, options);
this._defaults = defaults;
if (this.settings.content.target == "-") {
this.settings.content.target = this.settings.url_target;
}
this.animations = {
start: 'webkitAnimationStart oanimationstart MSAnimationStart animationstart',
end: 'webkitAnimationEnd oanimationend MSAnimationEnd animationend'
}
if (typeof this.settings.offset == 'number') {
this.settings.offset = {
x: this.settings.offset,
y: this.settings.offset
};
}
this.init();
};
$.extend(Notify.prototype, {
init: function () {
var self = this;
this.buildNotify();
if (this.settings.content.icon) {
this.setIcon();
}
if (this.settings.content.url != "#") {
this.styleURL();
}
this.styleDismiss();
this.placement();
this.bind();
this.notify = {
$ele: this.$ele,
update: function(command, update) {
var commands = {};
if (typeof command == "string") {
commands[command] = update;
}else{
commands = command;
}
for (var command in commands) {
switch (command) {
case "type":
this.$ele.removeClass('alert-' + self.settings.type);
this.$ele.find('[data-notify="progressbar"] > .progress-bar').removeClass('progress-bar-' + self.settings.type);
self.settings.type = commands[command];
this.$ele.addClass('alert-' + commands[command]).find('[data-notify="progressbar"] > .progress-bar').addClass('progress-bar-' + commands[command]);
break;
case "icon":
var $icon = this.$ele.find('[data-notify="icon"]');
if (self.settings.icon_type.toLowerCase() == 'class') {
$icon.removeClass(self.settings.content.icon).addClass(commands[command]);
}else{
if (!$icon.is('img')) {
$icon.find('img');
}
$icon.attr('src', commands[command]);
}
break;
case "progress":
var newDelay = self.settings.delay - (self.settings.delay * (commands[command] / 100));
this.$ele.data('notify-delay', newDelay);
this.$ele.find('[data-notify="progressbar"] > div').attr('aria-valuenow', commands[command]).css('width', commands[command] + '%');
break;
case "url":
this.$ele.find('[data-notify="url"]').attr('href', commands[command]);
break;
case "target":
this.$ele.find('[data-notify="url"]').attr('target', commands[command]);
break;
default:
this.$ele.find('[data-notify="' + command +'"]').html(commands[command]);
};
}
var posX = this.$ele.outerHeight() + parseInt(self.settings.spacing) + parseInt(self.settings.offset.y);
self.reposition(posX);
},
close: function() {
self.close();
}
};
},
buildNotify: function () {
var content = this.settings.content;
this.$ele = $(String.format(this.settings.template, this.settings.type, content.title, content.message, content.url, content.target));
this.$ele.attr('data-notify-position', this.settings.placement.from + '-' + this.settings.placement.align);
if (!this.settings.allow_dismiss) {
this.$ele.find('[data-notify="dismiss"]').css('display', 'none');
}
if ((this.settings.delay <= 0 && !this.settings.showProgressbar) || !this.settings.showProgressbar) {
this.$ele.find('[data-notify="progressbar"]').remove();
}
},
setIcon: function() {
if (this.settings.icon_type.toLowerCase() == 'class') {
this.$ele.find('[data-notify="icon"]').addClass(this.settings.content.icon);
}else{
if (this.$ele.find('[data-notify="icon"]').is('img')) {
this.$ele.find('[data-notify="icon"]').attr('src', this.settings.content.icon);
}else{
this.$ele.find('[data-notify="icon"]').append('<img src="'+this.settings.content.icon+'" alt="Notify Icon" />');
}
}
},
styleDismiss: function() {
this.$ele.find('[data-notify="dismiss"]').css({
position: 'absolute',
right: '10px',
top: '5px',
zIndex: this.settings.z_index + 2
});
},
styleURL: function() {
this.$ele.find('[data-notify="url"]').css({
backgroundImage: 'url(data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7)',
height: '100%',
left: '0px',
position: 'absolute',
top: '0px',
width: '100%',
zIndex: this.settings.z_index + 1
});
},
placement: function() {
var self = this,
offsetAmt = this.settings.offset.y,
css = {
display: 'inline-block',
margin: '0px auto',
position: this.settings.position ? this.settings.position : (this.settings.element === 'body' ? 'fixed' : 'absolute'),
transition: 'all .5s ease-in-out',
zIndex: this.settings.z_index
},
hasAnimation = false,
settings = this.settings;
$('[data-notify-position="' + this.settings.placement.from + '-' + this.settings.placement.align + '"]:not([data-closing="true"])').each(function() {
return offsetAmt = Math.max(offsetAmt, parseInt($(this).css(settings.placement.from)) + parseInt($(this).outerHeight()) + parseInt(settings.spacing));
});
if (this.settings.newest_on_top == true) {
offsetAmt = this.settings.offset.y;
}
css[this.settings.placement.from] = offsetAmt+'px';
switch (this.settings.placement.align) {
case "left":
case "right":
css[this.settings.placement.align] = this.settings.offset.x+'px';
break;
case "center":
css.left = 0;
css.right = 0;
break;
}
this.$ele.css(css).addClass(this.settings.animate.enter);
$.each(Array('webkit-', 'moz-', 'o-', 'ms-', ''), function(index, prefix) {
self.$ele[0].style[prefix+'AnimationIterationCount'] = 1;
});
$(this.settings.element).append(this.$ele);
if (this.settings.newest_on_top == true) {
offsetAmt = (parseInt(offsetAmt)+parseInt(this.settings.spacing)) + this.$ele.outerHeight();
this.reposition(offsetAmt);
}
if ($.isFunction(self.settings.onShow)) {
self.settings.onShow.call(this.$ele);
}
this.$ele.one(this.animations.start, function(event) {
hasAnimation = true;
}).one(this.animations.end, function(event) {
if ($.isFunction(self.settings.onShown)) {
self.settings.onShown.call(this);
}
});
setTimeout(function() {
if (!hasAnimation) {
if ($.isFunction(self.settings.onShown)) {
self.settings.onShown.call(this);
}
}
}, 600);
},
bind: function() {
var self = this;
this.$ele.find('[data-notify="dismiss"]').on('click', function() {
self.close();
})
this.$ele.mouseover(function(e) {
$(this).data('data-hover', "true");
}).mouseout(function(e) {
$(this).data('data-hover', "false");
});
this.$ele.data('data-hover', "false");
if (this.settings.delay > 0) {
self.$ele.data('notify-delay', self.settings.delay);
var timer = setInterval(function() {
var delay = parseInt(self.$ele.data('notify-delay')) - self.settings.timer;
if ((self.$ele.data('data-hover') === 'false' && self.settings.mouse_over == "pause") || self.settings.mouse_over != "pause") {
var percent = ((self.settings.delay - delay) / self.settings.delay) * 100;
self.$ele.data('notify-delay', delay);
self.$ele.find('[data-notify="progressbar"] > div').attr('aria-valuenow', percent).css('width', percent + '%');
}
if (delay <= -(self.settings.timer)) {
clearInterval(timer);
self.close();
}
}, self.settings.timer);
}
},
close: function() {
var self = this,
$successors = null,
posX = parseInt(this.$ele.css(this.settings.placement.from)),
hasAnimation = false;
this.$ele.data('closing', 'true').addClass(this.settings.animate.exit);
self.reposition(posX);
if ($.isFunction(self.settings.onClose)) {
self.settings.onClose.call(this.$ele);
}
this.$ele.one(this.animations.start, function(event) {
hasAnimation = true;
}).one(this.animations.end, function(event) {
$(this).remove();
if ($.isFunction(self.settings.onClosed)) {
self.settings.onClosed.call(this);
}
});
setTimeout(function() {
if (!hasAnimation) {
self.$ele.remove();
if (self.settings.onClosed) {
self.settings.onClosed(self.$ele);
}
}
}, 600);
},
reposition: function(posX) {
var self = this,
notifies = '[data-notify-position="' + this.settings.placement.from + '-' + this.settings.placement.align + '"]:not([data-closing="true"])',
$elements = this.$ele.nextAll(notifies);
if (this.settings.newest_on_top == true) {
$elements = this.$ele.prevAll(notifies);
}
$elements.each(function() {
$(this).css(self.settings.placement.from, posX);
posX = (parseInt(posX)+parseInt(self.settings.spacing)) + $(this).outerHeight();
});
}
});
$.notify = function ( content, options ) {
var plugin = new Notify( this, content, options );
return plugin.notify;
};
$.notifyDefaults = function( options ) {
defaults = $.extend(true, {}, defaults, options);
return defaults;
};
$.notifyClose = function( command ) {
if (typeof command === "undefined" || command == "all") {
$('[data-notify]').find('[data-notify="dismiss"]').trigger('click');
}else{
$('[data-notify-position="'+command+'"]').find('[data-notify="dismiss"]').trigger('click');
}
};
}));
```
|
```xml
import omit from 'lodash/omit'
export type RecordSet<Parent extends string, Child extends string, Value> =
Record<Parent, Record<Child, Value>>
export function setChild<
Parent extends string,
Child extends string,
Value,
> (
obj: Record<Parent, Record<Child, Value>>,
parentKey: Parent,
childKey: Child,
value: Value,
): Record<Parent, Record<Child, Value>> {
const inner = obj[parentKey] || {}
return {
...obj,
[parentKey]: {
...inner,
[childKey]: value,
},
}
}
export function removeChild<
Parent extends string,
Child extends string,
Value,
> (
obj: RecordSet<Parent, Child, Value>,
parentKey: Parent,
childKey: Child,
): Record<Parent, Record<Child, Value>> {
let inner = (obj[parentKey] || {}) as Record<Child, Value>
// I don't want to fight with you TypeScript, but sometimes you make my life
// damn hard.
inner = omit(inner, childKey) as Record<Child, Value>
if (Object.keys(inner).length === 0) {
return removeParent(obj, parentKey)
}
return {
...obj,
[parentKey]: inner,
}
}
export function removeParent<
Parent extends string,
Child extends string,
Value,
> (
obj: RecordSet<Parent, Child, Value>,
parentKey: Parent,
): Record<Parent, Record<Child, Value>> {
return omit(obj, parentKey) as unknown as RecordSet<Parent, Child, Value>
}
```
|
Matías Malvino ( born 20 January 1992) is an Uruguayan professional footballer who plays as a centre back for Segunda División Amateur club Basáñez.
Honours
Club
Lugano
Swiss Challenge League: 2014–15
Nacional
Uruguayan Primera División: 2016
References
External links
1992 births
Living people
Uruguayan men's footballers
Uruguayan expatriate men's footballers
Uruguayan Primera División players
Swiss Super League players
Swiss Challenge League players
Liga 1 (Indonesia) players
Liga Nacional de Fútbol de Guatemala players
Uruguayan Segunda División players
Defensor Sporting players
FC Lugano players
Club Nacional de Football players
Racing Club de Montevideo players
C.S.D. Municipal players
FC Chiasso players
Arema F.C. players
Club Sportivo Cerrito players
Miramar Misiones players
C.S.D. Villa Española players
Men's association football defenders
Uruguayan expatriate sportspeople in Switzerland
Uruguayan expatriate sportspeople in Guatemala
Uruguayan expatriate sportspeople in Indonesia
Expatriate men's footballers in Switzerland
Expatriate men's footballers in Guatemala
Expatriate men's footballers in Indonesia
|
```php
<?php
namespace variables\globals_003;
// Set variable and read it via $GLOBALS
$a = 5;
echo $GLOBALS["a"]."<br>";
// Take reference of $GLOBALS
$a = array(1=>"hello", "two"=>"world");
$b =& $GLOBALS;
echo $b["a"][1]." ".$b["a"]["two"]."<br>";
// Set $GLOBALS
$GLOBALS["a"] = "Set via GLOBALS"."<br>";
echo $a;
```
|
The 1958 Louisiana Tech Bulldogs football team was an American football team that represented the Louisiana Polytechnic Institute (now known as Louisiana Tech University) as a member of the Gulf States Conference during the 1958 NCAA College Division football season. In their eighteenth year under head coach Joe Aillet, the team compiled a 7–3 record and finished as Gulf States Conference co-champion.
Schedule
References
Louisiana Tech
Louisiana Tech Bulldogs football seasons
Louisiana Tech Bulldogs football
|
Grande Ecaille is an island located in the parish of Plaquemines, Louisiana, United States. It is in Lake Grand Ecaille. It was the site of a sulphur factory operated by Freeport Sulphur Company built in 1933.
Geography
The island is located at , approximately west of Port Sulfur and 71 kilometers from New Orleans. It is only above sea level, making it an area prone to flooding.
References
Unincorporated communities in New Orleans metropolitan area
Unincorporated communities in Louisiana
|
Dwayne Shattuck (born June 1, 1959 in Atlanta, Georgia, USA) is a television producer. He has worked for 4 seasons on the AMC drama series Mad Men and has won several awards.
Biography
He produced the first season of the new period drama "Magic City" for Starz Network. Dwayne has also worked for four seasons as a Producer on the AMC drama series "Mad Men," as well as "Push Nevada" and "Skin" for producers Jerry Bruckheimer, Ben Affleck, and Sean Bailey. Dwayne started in the film industry as a set medic, and has worked as an Assistant Director, Production Manager, and then as a Producer.
Dwayne lives in Los Angeles with his wife, Nicole (a still photographer) and his two sons.
Awards and nominations
2010 DGA Award for Outstanding Directorial Achievement in Dramatic Series Night for Mad Men episode "Guy Walks Into An Advertising Agency"
2010 Winner Golden Globe Award for producing "Mad Men"
2010 Winner Emmy Award for producing "Mad Men"
2011 DGA Nominations for producing "Mad Men"
2011 Golden Globe Nomination for producing "Mad Men"
2011 PGA Award for Television Producer of the Year Award in Episodic for Mad Men season 4
2011 Winner Emmy Award for producing "Mad Men"
2011 Winner AFI Producer of The Year Award for producing "Mad Men"
2012 PGA Award Nomination for producing "Mad Men"
References
External links
Television producers from Georgia (U.S. state)
Living people
1959 births
Businesspeople from Atlanta
|
```ruby
# frozen_string_literal: true
module Decidim
module Admin
# This module includes helpers to manage newsletters in admin layout
module NewslettersHelper
def participatory_spaces_for_select(form_object)
content_tag :div do
@form.participatory_space_types.each do |space_type|
concat participatory_space_types_form_object(form_object, space_type)
end
end
end
def participatory_space_types_form_object(form_object, space_type)
return if spaces_user_can_admin[space_type.manifest_name.to_sym].blank?
html = ""
form_object.fields_for "participatory_space_types[#{space_type.manifest_name}]", space_type do |ff|
html += ff.hidden_field :manifest_name, value: space_type.manifest_name
html += select_tag_participatory_spaces(space_type.manifest_name, spaces_for_select(space_type.manifest_name.to_sym), ff)
end
html.html_safe
end
def select_tag_participatory_spaces(manifest_name, spaces, child_form)
return unless spaces
content_tag :div, class: "#{manifest_name}-block spaces-block-tag cell small-12 medium-6" do
child_form.select :ids, options_for_select(spaces),
{ prompt: t("select_recipients_to_deliver.none", scope: "decidim.admin.newsletters"),
label: t("activerecord.models.decidim/#{manifest_name.singularize}.other"),
include_hidden: false },
multiple: true, size: [spaces.size, 10].min, class: "chosen-select"
end
end
def spaces_for_select(manifest_name)
return unless Decidim.participatory_space_manifests.map(&:name).include?(manifest_name)
return spaces_user_can_admin[manifest_name] unless current_user.admin?
[[I18n.t("select_recipients_to_deliver.all_spaces", scope: "decidim.admin.newsletters"), "all"]] + spaces_user_can_admin[manifest_name]
end
def selective_newsletter_to(newsletter)
return content_tag(:strong, t("index.not_sent", scope: "decidim.admin.newsletters"), class: "text-warning") unless newsletter.sent?
return content_tag(:strong, t("index.all_users", scope: "decidim.admin.newsletters"), class: "text-success") if newsletter.sent? && newsletter.extended_data.blank?
content_tag :div do
concat sent_to_users newsletter
concat sent_to_spaces newsletter
concat sent_to_scopes newsletter
end
end
def sent_to_users(newsletter)
content_tag :p, style: "margin-bottom:0;" do
concat content_tag(:strong, t("index.has_been_sent_to", scope: "decidim.admin.newsletters"), class: "text-success")
concat content_tag(:strong, t("index.all_users", scope: "decidim.admin.newsletters")) if newsletter.sended_to_all_users?
concat content_tag(:strong, t("index.followers", scope: "decidim.admin.newsletters")) if newsletter.sended_to_followers?
concat t("index.and", scope: "decidim.admin.newsletters") if newsletter.sended_to_followers? && newsletter.sended_to_participants?
concat content_tag(:strong, t("index.participants", scope: "decidim.admin.newsletters")) if newsletter.sended_to_participants?
end
end
def sent_to_spaces(newsletter)
html = "<p style='margin-bottom:0;'> "
newsletter.sent_to_participatory_spaces.try(:each) do |type|
next if type["ids"].blank?
html += t("index.segmented_to", scope: "decidim.admin.newsletters", subject: t("activerecord.models.decidim/#{type["manifest_name"].singularize}.other"))
if type["ids"].include?("all")
html += "<strong> #{t("index.all", scope: "decidim.admin.newsletters")} </strong>"
else
Decidim.find_participatory_space_manifest(type["manifest_name"].to_sym)
.participatory_spaces.call(current_organization).where(id: type["ids"]).each do |space|
html += "<strong>#{decidim_escape_translated(space.title)}</strong>"
end
end
html += "<br/>"
end
html += "</p>"
html.html_safe
end
def sent_to_scopes(newsletter)
content_tag :p, style: "margin-bottom:0;" do
concat t("index.segmented_to", scope: "decidim.admin.newsletters", subject: nil)
if newsletter.sent_scopes.any?
newsletter.sent_scopes.each do |scope|
concat content_tag(:strong, decidim_escape_translated(scope.name).to_s)
end
else
concat content_tag(:strong, t("index.no_scopes", scope: "decidim.admin.newsletters"))
end
end
end
def organization_participatory_space(manifest_name)
@organization_participatory_spaces ||= {}
@organization_participatory_spaces[manifest_name] ||= Decidim
.find_participatory_space_manifest(manifest_name)
.participatory_spaces.call(current_organization)
.published
.sort_by { |space| [space.try(:closed?) ? 1 : 0, space.title[current_locale]] }
end
def spaces_user_can_admin
@spaces_user_can_admin ||= {}
Decidim.participatory_space_manifests.each do |manifest|
organization_participatory_space(manifest.name)&.each do |space|
next unless space.admins.exists?(id: current_user.id)
@spaces_user_can_admin[manifest.name] ||= []
space_as_option_for_select_data = space_as_option_for_select(space)
@spaces_user_can_admin[manifest.name] << space_as_option_for_select_data unless @spaces_user_can_admin[manifest.name].detect do |x|
x == space_as_option_for_select_data
end
end
end
@spaces_user_can_admin
end
def space_as_option_for_select(space)
return unless space
[
translated_attribute(space.title),
space.id,
{ class: space.try(:closed?) ? "red" : "green", title: translated_attribute(space.title).to_s }
]
end
def newsletter_attention_callout_announcement
{
body: t("warning", scope: "decidim.admin.newsletters.select_recipients_to_deliver").html_safe
}
end
def newsletter_recipients_count_callout_announcement
spinner = "<span id='recipients_count_spinner' class='loading-spinner hide'></span>"
body = "#{t("recipients_count", scope: "decidim.admin.newsletters.select_recipients_to_deliver", count: recipients_count_query)} #{spinner}"
{
body:
}
end
end
end
end
```
|
```kotlin
package mega.privacy.android.app.presentation.settings.startscreen
import androidx.lifecycle.ViewModel
import androidx.lifecycle.viewModelScope
import dagger.hilt.android.lifecycle.HiltViewModel
import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow
import kotlinx.coroutines.flow.update
import kotlinx.coroutines.launch
import mega.privacy.android.app.presentation.settings.startscreen.model.StartScreenOptionMapper
import mega.privacy.android.app.presentation.settings.startscreen.model.StartScreenSettingsState
import mega.privacy.android.domain.entity.preference.StartScreen
import mega.privacy.android.domain.usecase.MonitorStartScreenPreference
import mega.privacy.android.domain.usecase.SetStartScreenPreference
import javax.inject.Inject
@HiltViewModel
class StartScreenViewModel @Inject constructor(
private val monitorStartScreenPreference: MonitorStartScreenPreference,
private val setStartScreenPreference: SetStartScreenPreference,
startScreenOptionMapper: StartScreenOptionMapper,
) : ViewModel() {
private val _state = MutableStateFlow(
StartScreenSettingsState(
options = StartScreen.entries
.mapNotNull(startScreenOptionMapper),
selectedScreen = null,
)
)
val state: StateFlow<StartScreenSettingsState> = _state
init {
viewModelScope.launch {
monitorStartScreenPreference()
.collect { screen ->
_state.update { it.copy(selectedScreen = screen) }
}
}
}
fun newScreenClicked(newScreen: StartScreen) {
viewModelScope.launch {
setStartScreenPreference(newScreen)
}
}
}
```
|
Necm-i Şevket (Ottoman Turkish: Star of Majesty) was the second of two central battery ships built for the Ottoman Navy in the 1860s. Originally ordered by the Khedivate of Egypt but confiscated by the Ottoman Empire while under construction, the vessel was initially named Muzaffer. The ship was laid down at the French Forges et Chantiers de la Gironde shipyard in 1867, was launched in 1868, and was commissioned into the Ottoman fleet in March 1870. Asar-i Şevket was armed with a battery of four Armstrong guns in a central casemate and one Armstrong gun in a revolving barbette.
The ship saw action in the Russo-Turkish War in 1877–1878, where she supported Ottoman forces in the Caucasus, and later helped to defend the port of Sulina on the Danube. She was laid up for twenty years, until the outbreak of the Greco-Turkish War in 1897, which highlighted the badly deteriorated state of the Ottoman fleet. Necm-i Şevket was one of just two ironclads that was still in serviceable condition at the time of the war, though she was not included in the large fleet modernization program. Instead, she became a stationary ship and later a barracks ship. During the First Balkan War in 1912, Necm-i Şevket was reactivated to help stop the Bulgarian advance on Constantinople. Thoroughly obsolete by that point, she saw little action and returned to barracks duties after the war. The ship remained in the fleet's inventory through the 1920s, being decommissioned in 1929 and broken up thereafter.
Design
Necm-i Şevket was long overall, with a beam of and a draft of . The hull was constructed with iron, incorporated a ram bow and a partial double bottom. She displaced normally. She had a crew of 170 officers and enlisted men.
The ship was powered by a single horizontal compound steam engine which drove a single screw propeller. Steam was provided by four coal-fired box boilers that were trunked into a single funnel amidships. The engine was rated at and produced a top speed of , though by 1877 she was only capable of . Necm-i Şevket carried of coal. A supplementary brig rig was also fitted.
Necm-i Şevket was armed with a battery of one muzzle loading Armstrong gun and four Armstrong guns. The 178 mm guns were mounted in a central, armored battery, with the 229 mm gun on top in an open barbette mount. The ship's armored belt consisted of wrought iron that was thick and was reduced to toward the bow and stern. Above the main belt, a strake of armor 114 mm thick protected the central battery, and the same thickness was used for the barbette.
Service history
Necm-i Şevket, meaning "Star of Majesty", was originally ordered by the Khedivate of Egypt in 1866 from the French Forges et Chantiers de la Gironde shipyard in Bordeaux under the name Muzaffer. Her keel was laid down in 1867, and she was launched the following year. On 29 August 1868, the Ottoman Empire forced Egypt to surrender the ship, which was then renamed Necm-i Şevket and commissioned into the Ottoman Navy on 3 March 1870. Upon completion, Necm-i Şevket and the other ironclads then being built in Britain and France were sent to Crete to assist in stabilizing the island in the aftermath of the Cretan Revolt of 1866–1869. During this period, the Ottoman fleet, under Hobart Pasha, remained largely inactive, with training confined to reading translated British instruction manuals. Necm-i Şevket was assigned to the I Squadron of the Asiatic Fleet, along with her sister ship and the ironclads and . Early in the ship's career, the Ottoman ironclad fleet was activated every summer for short cruises from the Golden Horn to the Bosporus to ensure their propulsion systems were in operable condition.
Russo-Turkish War
The Ottoman fleet began mobilizing in September 1876 to prepare for a conflict with Russia, as tensions with the country had been growing for several years, an insurrection had begun in Ottoman Bosnia in mid-1875, and Serbia had declared war on the Ottoman Empire in July 1876. The Russo-Turkish War began on 24 April 1877 with a Russian declaration of war. Necm-i Şevket spent the war in the Black Sea squadron, with the bulk of the Ottoman ironclad fleet. The Ottoman fleet, commanded by Hobart Pasha, was vastly superior to the Russian Black Sea Fleet; the only ironclads the Russians possessed there were and , circular vessels that had proved to be useless in service. The presence of the fleet did force the Russians to keep two corps in reserve for coastal defense, but the Ottoman high command failed to make use of its naval superiority in a more meaningful way, particularly to hinder the Russian advance into the Balkans. Hobart Pasha took the fleet to the eastern Black Sea, where he was able to make a more aggressive use of it to support the Ottoman forces battling the Russians in the Caucasus. The fleet bombarded Poti and assisted in the defense of Batumi.
On 14 May 1877, an Ottoman squadron consisting of Necm-i Şevket and the ironclads , , , , and bombarded Russian positions around the Black Sea port of Sokhumi before landing infantry and arming the local populace to start an uprising against the Russians. The Ottomans captured Sokhumi two days later. Over the course of the war, Russian torpedo boats made several attacks on the vessels stationed in Batumi, but Necm-i Şevket was not damaged in any of them. The Ottoman fleet continued to support the Ottoman garrison at Batumi, when held out against constant Russian attacks to the end of the war.
Later career
On 7 March 1878, Necm-i Şevket ran into the British steamship John Middleton at Tophane. John Middleton was driven into and then sank. HMS Antelope, which was severely damaged herself, rescued the crew of John Middleton. After the end of the war in 1878, Necm-i Şevket was laid up in Constantinople. This was in part due to chronically low budgets, and in part due to the fact that the Sultan, Abdul Hamid II, who had come to power after a coup deposed Murad V that involved senior members of the Navy, distrusted the Navy. The annual summer cruises to the Bosporus ended. By the mid-1880s, the Ottoman ironclad fleet was in poor condition, and Necm-i Şevket was unable to go to sea. Many of the ships' engines were unusable, having seized up from rust, and their hulls were badly fouled. The British naval attaché to the Ottoman Empire at the time estimated that the Imperial Arsenal would take six months to get just five of the ironclads ready to go to sea. Throughout this period, the ship's crew was limited to about one-third the normal figure. During a period of tension with Greece in 1886, the fleet was brought to full crews and the ships were prepared to go to sea, but none actually left the Golden Horn, and they were quickly laid up again. By that time, most of the ships were capable of little more than . In 1890, the ship was taken to the Imperial Arsenal for refitting, and new boilers were installed. The ship also received a battery of light guns, including two Krupp guns, two Krupp guns, two Hotchkiss revolver cannon, and one Nordenfelt gun. The ship returned to service on 12 February 1892.
At the start of the Greco-Turkish War in February 1897, Necm-i Şevket was assigned to the II Squadron. The Ottomans inspected the fleet and found that almost all of the vessels, including Necm-i Şevket, to be completely unfit for combat against the Greek Navy. Many of the ships had rotted hulls and their crews were poorly trained. Necm-i Şevket was one of two ironclads found to be in usable condition, the other being . In April and May, the ship escorted troopships transporting infantry from western Anatolia to Gelibolu, and while conducting these operations, she took part in gunnery exercises. On 15 May, Necm-i Şevket and the ironclads Mesudiye, , , and , along with several other vessels conducted a major training exercise, where severe deficiencies in the level of training were revealed, particularly with the men's ability to operate the ships' guns. In September 1897, the war came to an end, and the Ottoman fleet returned to Constantinople.
The condition of the Ottoman fleet could not be concealed from foreign observers, which proved to be an embarrassment for the government and finally forced Abdul Hamid II to authorize a modernization program, which recommended that the ironclads be modernized in foreign shipyards. German firms, including Krupp, Schichau-Werke, and AG Vulcan, were to rebuild the ships, but after having surveyed the ships, withdrew from the project in December 1897 owing to the impracticality of modernizing the ships and the inability of the Ottoman government to pay for the work. By 1900, the contracts were finally awarded, and Necm-i Şevket was not included in the program. Instead, the ship was employed as a stationary ship based in Selanik from 1899 to 1909, at which point she was converted into a barracks ship in Constantinople.
On 30 October 1912, during the First Balkan War, Necm-i Şevket was reactivated to stop the Bulgarian advance against the Ottoman defenders at Çatalca. She was joined by the ironclad Iclaliye; both vessels had to be towed into place, and they remained in their firing positions for only a few days. The two ships, joined by the pre-dreadnought battleships and and the modernized Mesudiye and Asar-i Tevfik, were towed to Büyükçekmece, where they remained from 15 to 20 November, though they made little contact with Bulgarian forces. The ship resumed her barracks ship duties after the war and was decommissioned in 1929 and was thereafter broken up.
Notes
References
Asar-i Şevket-class ironclads
1868 ships
Maritime incidents in March 1878
Ships built in France
|
V-Partei3, known officially as V-Partei³ – Party for Change, Vegetarians and Vegans (), is a German political party that was founded in April 2016 in Munich, Bavaria. The principal focus of the party is animal rights and environmentalism. It is the sole political party in Germany devoted to encouraging the adoption of a plant-based diet. The party took part in the North Rhine-Westphalia state elections in 2017, and received 10,013 votes or 0.12% of the vote, far below the 5% threshold required to enter the State Landtag. Notable members of the party were actress Barbara Rütting and Axel Ritt, guitarist of the band Grave Digger, who joined in May 2017.
Party program
In the party manifesto created for the 2017 Bundestag elections focuses on improving animal welfare conditions in Germany and changing existing government policies so that Germany reduces its consumption of meat. The manifesto focuses on 10 key areas, and some are noted below.
Animal testing
The party opposes the testing of animals and wants to end the practice entirely. The platform instead advocates for cruelty-free medical research, which includes the use of human simulation.
Transport policy
The party supports projects that increase the use of bicycles, as an eco-friendly way of travelling. V-Partei³ also want to introduce speed limits on roads in an attempt to reduce air pollution. Their manifesto advocates a speed limit on highways and general speed limits elsewhere.
Energy policy
The party want to see an end to nuclear power, a reduction in the number of coal-fired and biogas power plants and greater subsidies for renewable energy, with the aim of reaching 100% renewable in the next few decades.
Economic policy
The V-Partei³ opposes any genetic engineering in agriculture. The party opposes TTIP, which they say softens labour rights and causes climate destruction. They advocate a basic income on the following grounds:
It must be a living wage.
There must be an individual entitlement.
The income must not be means-tested.
It must be made available without any work obligation.
Health policy
The V-Partei³ supports the legalization of cannabis.
Election results
References
External links
Official website (in German)
V-Partei3 on the website of the Federal Agency for Civic Education
Animal advocacy parties
Political parties established in 2016
2016 establishments in Germany
Universal basic income in Germany
Political parties supporting universal basic income
|
```smalltalk
// ==========================================================================
// Squidex Headless CMS
// ==========================================================================
// ==========================================================================
using System.Text.RegularExpressions;
using Squidex.Domain.Apps.Core.Apps;
using Squidex.Domain.Apps.Entities.Contents;
using Squidex.Domain.Apps.Entities.Contents.Text;
using Squidex.Hosting;
using Squidex.Infrastructure;
using Squidex.Infrastructure.Json;
namespace Squidex.Extensions.Text.ElasticSearch;
public sealed partial class ElasticSearchTextIndex : ITextIndex, IInitializable
{
private static readonly Regex RegexLanguageNormal = BuildLanguageRegexNormal();
private static readonly Regex RegexLanguageStart = BuildLanguageRegexStart();
private readonly IJsonSerializer jsonSerializer;
private readonly IElasticSearchClient elasticClient;
private readonly QueryParser queryParser = new QueryParser(ElasticSearchIndexDefinition.GetFieldPath);
private readonly string indexName;
public ElasticSearchTextIndex(IElasticSearchClient elasticClient, string indexName, IJsonSerializer jsonSerializer)
{
this.elasticClient = elasticClient;
this.indexName = indexName;
this.jsonSerializer = jsonSerializer;
}
public Task InitializeAsync(
CancellationToken ct)
{
return ElasticSearchIndexDefinition.ApplyAsync(elasticClient, indexName, ct);
}
public Task ClearAsync(
CancellationToken ct = default)
{
return Task.CompletedTask;
}
public Task ExecuteAsync(IndexCommand[] commands,
CancellationToken ct = default)
{
var args = new List<object>();
foreach (var command in commands)
{
CommandFactory.CreateCommands(command, args, indexName);
}
if (args.Count == 0)
{
return Task.CompletedTask;
}
return elasticClient.BulkAsync(args, ct);
}
public async Task<List<DomainId>?> SearchAsync(App app, GeoQuery query, SearchScope scope,
CancellationToken ct = default)
{
Guard.NotNull(app);
Guard.NotNull(query);
var serveField = GetServeField(scope);
var elasticQuery = new
{
query = new
{
@bool = new
{
filter = new object[]
{
new
{
term = new Dictionary<string, object>
{
["schemaId.keyword"] = query.SchemaId.ToString()
}
},
new
{
term = new Dictionary<string, string>
{
["geoField.keyword"] = query.Field
}
},
new
{
term = new Dictionary<string, string>
{
[serveField] = "true"
}
},
new
{
geo_distance = new
{
geoObject = new
{
lat = query.Latitude,
lon = query.Longitude
},
distance = $"{query.Radius}m"
}
}
}
}
},
_source = new[]
{
"contentId"
},
size = query.Take
};
return await SearchAsync(elasticQuery, ct);
}
public async Task<List<DomainId>?> SearchAsync(App app, TextQuery query, SearchScope scope,
CancellationToken ct = default)
{
Guard.NotNull(app);
Guard.NotNull(query);
var parsed = queryParser.Parse(query.Text);
if (parsed == null)
{
return null;
}
var serveField = GetServeField(scope);
var elasticQuery = new
{
query = new
{
@bool = new
{
filter = new List<object>
{
new
{
term = new Dictionary<string, object>
{
["appId.keyword"] = app.Id.ToString()
}
},
new
{
term = new Dictionary<string, string>
{
[serveField] = "true"
}
}
},
must = new
{
query_string = new
{
query = parsed.Text
}
},
should = new List<object>()
}
},
_source = new[]
{
"contentId"
},
size = query.Take
};
if (query.RequiredSchemaIds?.Count > 0)
{
var bySchema = new
{
terms = new Dictionary<string, object>
{
["schemaId.keyword"] = query.RequiredSchemaIds.Select(x => x.ToString()).ToArray()
}
};
elasticQuery.query.@bool.filter.Add(bySchema);
}
else if (query.PreferredSchemaId.HasValue)
{
var bySchema = new
{
terms = new Dictionary<string, object?>
{
["schemaId.keyword"] = query.PreferredSchemaId.ToString()
}
};
elasticQuery.query.@bool.should.Add(bySchema);
}
var json = jsonSerializer.Serialize(elasticQuery, true);
return await SearchAsync(elasticQuery, ct);
}
private async Task<List<DomainId>> SearchAsync(object query,
CancellationToken ct)
{
var hits = await elasticClient.SearchAsync(indexName, query, ct);
var ids = new List<DomainId>();
foreach (var item in hits)
{
ids.Add(DomainId.Create(item["_source"]["contentId"]));
}
return ids;
}
private static string GetServeField(SearchScope scope)
{
return scope == SearchScope.Published ? "servePublished" : "serveAll";
}
[GeneratedRegex("[^\\w]+([a-z\\-_]{2,}):", RegexOptions.ExplicitCapture | RegexOptions.Compiled)]
private static partial Regex BuildLanguageRegexNormal();
[GeneratedRegex("$^([a-z\\-_]{2,}):", RegexOptions.ExplicitCapture | RegexOptions.Compiled)]
private static partial Regex BuildLanguageRegexStart();
}
```
|
```html
<!DOCTYPE html>
<html>
<head>
<title>England | Flags</title>
<style>
#england
{
width: 160px;
height: 96px;
background-color: #FFFFFF;
background-image:
linear-gradient(90deg, transparent, transparent 72px,
#CE1124 72px, #CE1124 88px,
transparent 88px),
linear-gradient(180deg, transparent, transparent 24px,
#FFFFFF 24px, #FFFFFF 40px,
#CE1124 40px, #CE1124 56px,
#FFFFFF 56px, #FFFFFF 56px,
transparent 56px);
box-shadow: 0 0 16px 3px #E1E1E1;
position: relative;
}
</style>
</head>
<body>
<div id="england"></div>
</body>
</html>
```
|
```qml
import QtQuick 2.4
import Qt.labs.folderlistmodel 2.1
import QtGraphicalEffects 1.0
import QtMultimedia 5.6
import QtGamepad 1.0
import "Oculus.js" as Oculus
Rectangle {
width: 1920
height: 1080
color: "#183755"
id: mainWindow
Component.onCompleted: Oculus.createObjects();
FolderListModel {
id: assetsModel
folder: Revive.BaseURL + 'CoreData/Manifests/'
nameFilters: ["*.json"]
showDirs: false
onCountChanged: {
// If an application was previously installed, its assets bundle remains.
// Check if the corresponding applications are still installed.
for (var i = 0; i < assetsModel.count; i++)
{
var key = assetsModel.get(i, "fileName");
console.log("Found assets bundle " + key);
var appManifest = key.substring(0, key.indexOf("_assets"));
//verify only assets files
if (appManifest.length !=0)
Oculus.verifyAppManifest(appManifest);
}
}
}
Text {
id: emptyText
visible: !Revive.LibraryFound
x: 644
y: 363
width: 1052
height: 134
color: "#1cc4f7"
text: qsTr("No Oculus Store games found, please make sure the Oculus software is installed")
horizontalAlignment: Text.AlignHCenter
wrapMode: Text.WordWrap
anchors.horizontalCenter: parent.horizontalCenter
anchors.verticalCenter: parent.verticalCenter
font.pixelSize: 56
}
ListModel {
id: coverModel
ListElement {
coverURL: "SupportAssets/oculus-worlds/cover_square_image.jpg"
libraryId: "0"
appKey: "oculus-worlds"
appId: "1112064135564993"
}
ListElement {
coverURL: "SupportAssets/oculus-dreamdeck-nux/cover_square_image.jpg"
libraryId: "0"
appKey: "oculus-dreamdeck-nux"
appId: "919445174798085"
}
ListElement {
coverURL: "SupportAssets/oculus-touch-tutorial/cover_square_image.jpg"
libraryId: "0"
appKey: "oculus-touch-tutorial"
appId: "1184903171584429"
}
ListElement {
coverURL: "SupportAssets/oculus-first-contact/cover_square_image.jpg"
libraryId: "0"
appKey: "oculus-first-contact"
appId: "1217155751659625"
}
}
SoundEffect {
id: activateSound
source: OpenVR.URL + "content/panorama/sounds/activation.wav"
}
SoundEffect {
id: failSound
source: OpenVR.URL + "content/panorama/sounds/activation_change_fail.wav"
}
SoundEffect {
id: moveSound
source: OpenVR.URL + "content/panorama/sounds/focus_change.wav"
volume: 0.6
}
property var currentAppId: "0"
Component {
id: coverDelegate
Item {
width: coverGrid.cellWidth
height: coverGrid.cellHeight
Text {
color: "#1cc4f7"
id: coverText
text: appKey
font.pixelSize: 24
width: parent.width - 25
anchors.centerIn: parent
wrapMode: Text.WordWrap
}
Image {
id: coverImage
anchors.fill: parent
fillMode: Image.Pad
source: coverURL
MouseArea {
id: coverArea
hoverEnabled: true
anchors.fill: parent
onHoveredChanged: coverGrid.currentIndex = index
onPressed: {
if (Revive.launchApplication(appKey)) {
activateSound.play();
currentAppId = appId;
heartbeat.start();
} else {
failSound.play();
}
}
}
}
}
}
Timer {
id: heartbeat
interval: 10000
repeat: true
onTriggered: {
Oculus.heartbeat(currentAppId)
}
}
Component {
id: coverHighlight
Rectangle {
id: coverRect
x: coverGrid.currentItem.x + 5
y: coverGrid.currentItem.y + 5
width: coverGrid.cellWidth - 10
height: coverGrid.cellHeight - 10
border.color: "#b4dff7"
border.width: 5
radius: 5
RectangularGlow {
anchors.fill: parent
glowRadius: 5
spread: 0.2
color: parent.border.color
cornerRadius: parent.radius + glowRadius
}
}
}
GridView {
id: coverGrid
visible: Revive.LibraryFound
focus: true
cellHeight: 384
cellWidth: 384
anchors.fill: parent
model: coverModel
delegate: coverDelegate
highlight: coverHighlight
highlightFollowsCurrentItem: false
keyNavigationWraps: true
}
Gamepad {
property real lastX: 0.0
property real lastY: 0.0
onAxisLeftXChanged: {
if (OpenVR.gamepadFocus) {
if (axisLeftX > 0.5 && lastX < 0.5) {
moveSound.play();
coverGrid.moveCurrentIndexRight();
}
if (axisLeftX < -0.5 && lastX > -0.5) {
moveSound.play();
coverGrid.moveCurrentIndexLeft();
}
}
lastX = axisLeftX;
}
onAxisLeftYChanged: {
if (OpenVR.gamepadFocus) {
if (axisLeftY > 0.5 && lastY < 0.5) {
moveSound.play();
coverGrid.moveCurrentIndexDown();
}
if (axisLeftY < -0.5 && lastY > -0.5) {
moveSound.play();
coverGrid.moveCurrentIndexUp();
}
}
lastY = axisLeftY;
}
onButtonLeftChanged: {
if (buttonLeft && OpenVR.gamepadFocus) {
moveSound.play();
coverGrid.moveCurrentIndexLeft();
}
}
onButtonUpChanged: {
if (buttonUp && OpenVR.gamepadFocus) {
moveSound.play();
coverGrid.moveCurrentIndexUp();
}
}
onButtonRightChanged: {
if (buttonRight && OpenVR.gamepadFocus) {
moveSound.play();
coverGrid.moveCurrentIndexRight();
}
}
onButtonDownChanged: {
if (buttonDown && OpenVR.gamepadFocus) {
moveSound.play();
coverGrid.moveCurrentIndexDown();
}
}
onButtonAChanged: {
if (buttonA && OpenVR.gamepadFocus && coverGrid.currentIndex != -1) {
var cover = coverModel.get(coverGrid.currentIndex);
activateSound.play();
Revive.launchApplication(cover.appKey);
}
}
}
Rectangle {
id: loading
color: "#80000000"
visible: OpenVR.loading
anchors.fill: parent
z: 1
AnimatedImage {
id: loadingIcon
width: 100
height: 100
anchors.horizontalCenter: parent.horizontalCenter
anchors.verticalCenter: parent.verticalCenter
source: OpenVR.URL + "content/vrmonitor/icons/icon_loading.gif"
}
}
Image {
id: link
x: 10
y: 10
width: 50
height: 50
opacity: 0.5
source: "no-link.svg"
fillMode: Image.PreserveAspectFit
visible: !Platform.connected
}
}
```
|
Paul Helbronner (24 April 1871 – 18 October 1938) was a French topographer, alpinist and geodesist who pioneered cartography of the French Alps. Pointe Helbronner in the Mont Blanc massif is named in his honor.
References
1871 births
1938 deaths
French topographers
Members of the French Academy of Sciences
Fould family
|
The Women's Windsurfing RS:X is a sailing event on the Sailing at the SEA Games programme at the National Sailing Centre.
Schedule
All times are Singapore Standard Time (UTC+08:00)
Results
Notes
If sailors are disqualified or do not complete the race, 7 points are assigned for that race with 6 boats, 6 points for race with 5 boats, and 5 points for race with 4 boats
Scoring abbreviations are defined as follows:
OCS – On course side of the starting line
DSQ – Disqualified
DNF – Did Not Finish
DNS – Did Not Start
References
Women's Windsurfing RS:X
Windsurfing at multi-sport events
Women's sports competitions in Singapore
RS:X class sailing competitions
South
|
OrbusNeich Medical Group Holdings Limited (OrbusNeich) ( ) is a company that designs, develops, manufactures and markets medical devices for the treatment of vascular diseases.
In 2013, OrbusNeich received CE mark for the world's first and only dual therapy stent, the COMBO Dual Therapy Stent. The COMBO Stent features active endothelial progenitor cell (EPC) capture technology, which promotes the accelerated natural healing of the vessel wall after the implantation of a stent. Several trials including a randomized study dedicated to patients with Acute Coronary Syndrome (ACS) and registries with a sizeable ACS patient subset demonstrated consistently low event rates and short DAPT option of 3 months, if needed.
History
Neich Medical, with its headquarters in Hong Kong, China, established its manufacturing facilities in Shenzhen, China, in 2000 to begin the research and development of interventional medical devices, including balloon catheters.
By 2001, the company had obtained license approval for the sale of its first balloon catheter product in China and the European Union markets. Other balloon catheters soon followed.
Neich Medical continued its international expansion in 2005 by acquiring its longstanding partner Orbus Medical Technologies Inc., a company with strong ties in the European medical community, whose main business was the research and development of interventional medical devices, pioneering the design of the dual helical coronary stent platform, thus forming OrbusNeich Medical.
Over the years, OrbusNeich has brought to market a range of stent and balloon devices, including a series of angioplasty balloon catheters for the management of the most complex lesions, including chronic total occlusion, for both the treatment of coronary and peripheral vascular disease.
In addition to PCI/PTA balloons and coronary stent products, the company is actively expanding into neuro-vascular intervention and structural heart disease areas.
Products
OrbusNeich owns more than 180 granted patents globally.
Locations
OrbusNeich is represented throughout the world. The Corporate Headquarters are located in Hong Kong, and direct sales teams are operating from Hong Kong, Singapore, Malaysia, Japan, PRC, Germany, Switzerland, Spain and France. A distributor network is covering more than 70 countries across six continents. The company has a production and research & development facility in Shenzhen, China, an advanced research & development facility in Fort Lauderdale, Florida, USA and further production facilities in Hoevelaken, The Netherlands.
References
Medical technology companies of China
Manufacturing companies of Hong Kong
|
```java
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Contributors:
* ohun@live.cn ()
*/
package com.mpush.api.spi.handler;
import com.mpush.api.spi.Plugin;
/**
* Created by ohun on 16/10/19.
*
* @author ohun@live.cn ()
*/
public interface BindValidator extends Plugin {
boolean validate(String userId, String data);
}
```
|
is a 2006 French comedy film directed by Charlotte de Turckheim.
Plot
The Arbac Family Neuville is a family of penniless aristocrats, that to survive and continue living their castle into disrepair, need to conduct some tricks like selling fake antiques to tourists. One day arises a bailiff commissioned by the Treasury for the recovery of a sum of nearly two million euro in respect of tax. In the event of default, all the family property will be seized. Follows a race against time to find the money. Everything is tried: a visit to the distant cousins still rich, a job search at the employment center in the Pauline boards, letter carrier and family friend, nothing works. Finally, Charles-Antoine, the eldest, who will have to go to a rally to go fishing to young unmarried aristocrat, namely Marie-Astrid Saumur-Chantilly Fortemure, wealthy heiress but particularly repulsive and stupid. Marriage is about to be organized, but Anthony Charles becomes infatuated with Pauline, opposing the interests of the family, the reasons of which reason knows nothing heart ...
Cast
Charlotte de Turckheim as Countess Solange
Jacques Weber as Count Charles Valéran
Catherine Jacob as Duchess Marie-Claude Saumur Chantilly
Urbain Cancelier as Duke Reginald Saumur Chantilly
Armelle as Marie-Karoline
Julia Piaton as Pauline
Johanna Piaton as Marie-Charlotte
Gaëlle Lebert as Marie-Astrid
Vincent Desagnat as Charles-Edouard
Rudi Rosenberg as Charles-Antoine
Victoria Abril as Duquessa Pilar de Malaga i Benidorm
Rossy de Palma as Duquessa Maria de Malaga i Benidorm
Hélène de Fougerolles as Marie-Stéphanie Montcougnet
Catherine Hosmalin as Aristo hostess
Chantal Ladesou as Aristo hostess
Sébastien Cauet as Lawyer Convert
Edith Perret as Countess Marthe Ambroisine
Eric Le Roch as Stanislas Montcougnet
Benjamin Castera as Charles-Eric
Antoine de Turckheim as Charles-Victor
Arthur Derancourt as Charles-Hubert
Oscar Derancourt as Charles-Gustave
Sébastien Cotterot as Gonzague
Swann Arlaud as A good looking man
Stéphane Bern as TV Host
Alban Lenoir as Bad guy 2
References
External links
2006 films
2000s French-language films
French comedy films
Films directed by Charlotte de Turckheim
2006 comedy films
2000s French films
|
```javascript
module.exports =
/******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 12);
/******/ })
/************************************************************************/
/******/ ({
/***/ 12:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
Component({
properties: {
span: {
type: Number,
value: 24
},
offset: {
type: Number,
value: 0
},
push: {
type: Number,
value: -1
},
pull: {
type: Number,
value: -1
},
xs: {
type: Number,
optionalTypes: [Object],
value: -1
},
sm: {
type: Number,
optionalTypes: [Object],
value: -1
},
md: {
type: Number,
optionalTypes: [Object],
value: -1
},
lg: {
type: Number,
optionalTypes: [Object],
value: -1
},
xl: {
type: Number,
optionalTypes: [Object],
value: -1
}
},
data: {
classList: ['weui-col'],
gutter: 0,
paddingLeft: 0,
paddingRight: 0
},
relations: {
"../row/index": {
type: 'parent',
linked: function linked(target) {
this.data.gutter = Number(target.data.gutter);
this.updateGutter();
},
linkChanged: function linkChanged(target) {
this.data.gutter = Number(target.data.gutter);
this.updateGutter();
}
}
},
attached: function attached() {
this.updateCol();
},
methods: {
updateCol: function updateCol() {
var classList = ['weui-col'];
var paddingLeft = void 0,
paddingRight = 0;
classList.push('weui-col-' + this.data.span);
classList.push('weui-col-offset-' + this.data.offset);
if (this.data.gutter) {
paddingLeft = this.data.gutter / 2 + 'px';
paddingRight = paddingLeft;
}
if (this.data.push !== -1) {
this.data.push && classList.push('weui-col-push-' + this.data.push);
}
if (this.data.pull !== -1) {
this.data.pull && classList.push('weui-col-pull-' + this.data.pull);
}
this.screenSizeSet('xs', classList);
this.screenSizeSet('sm', classList);
this.screenSizeSet('md', classList);
this.screenSizeSet('lg', classList);
this.screenSizeSet('xl', classList);
return this.setData({
classList: classList
});
},
updateGutter: function updateGutter() {
var paddingLeft = void 0,
paddingRight = 0;
if (this.data.gutter) {
paddingLeft = this.data.gutter / 2 + 'px';
paddingRight = paddingLeft;
}
this.setData({
paddingLeft: paddingLeft,
paddingRight: paddingRight
});
},
screenSizeSet: function screenSizeSet(screen, classList) {
if (typeof this.data[screen] === 'number' && this.data[screen] !== -1) {
classList.push('weui-col-' + screen + '-' + this.data[screen]);
} else if (_typeof(this.data[screen]) === 'object') {
typeof this.data[screen].offset === 'number' && classList.push('weui-col-' + screen + '-offset-' + this.data[screen].offset);
typeof this.data[screen].span === 'number' && classList.push('weui-col-' + screen + '-' + this.data[screen].span);
}
}
}
});
/***/ })
/******/ });
```
|
```javascript
/**
* ag-grid - Advanced Data Grid / Data Table supporting Javascript / React / AngularJS / Web Components
* @version v6.2.1
* @link path_to_url
* @license MIT
*/
function QuerySelector(selector) {
return querySelectorFunc.bind(this, selector);
}
exports.QuerySelector = QuerySelector;
function querySelectorFunc(selector, classPrototype, methodOrAttributeName, index) {
if (selector === null) {
console.error('ag-Grid: QuerySelector selector should not be null');
return;
}
if (typeof index === 'number') {
console.error('ag-Grid: QuerySelector should be on an attribute');
return;
}
// it's an attribute on the class
var props = getOrCreateProps(classPrototype);
if (!props.querySelectors) {
props.querySelectors = [];
}
props.querySelectors.push({
attributeName: methodOrAttributeName,
querySelector: selector
});
}
function Listener(eventName) {
return listenerFunc.bind(this, eventName);
}
exports.Listener = Listener;
function listenerFunc(eventName, target, methodName, descriptor) {
if (eventName === null) {
console.error('ag-Grid: EventListener eventName should not be null');
return;
}
// it's an attribute on the class
var props = getOrCreateProps(target);
if (!props.listenerMethods) {
props.listenerMethods = [];
}
props.listenerMethods.push({
methodName: methodName,
eventName: eventName
});
}
function getOrCreateProps(target) {
var props = target.__agComponentMetaData;
if (!props) {
props = {};
target.__agComponentMetaData = props;
}
return props;
}
```
|
```php
<?php
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
*/
namespace Google\Service\GoogleAnalyticsAdmin;
class GoogleAnalyticsAdminV1alphaRunAccessReportResponse extends \Google\Collection
{
protected $collection_key = 'rows';
protected $dimensionHeadersType = GoogleAnalyticsAdminV1alphaAccessDimensionHeader::class;
protected $dimensionHeadersDataType = 'array';
protected $metricHeadersType = GoogleAnalyticsAdminV1alphaAccessMetricHeader::class;
protected $metricHeadersDataType = 'array';
protected $quotaType = GoogleAnalyticsAdminV1alphaAccessQuota::class;
protected $quotaDataType = '';
/**
* @var int
*/
public $rowCount;
protected $rowsType = GoogleAnalyticsAdminV1alphaAccessRow::class;
protected $rowsDataType = 'array';
/**
* @param GoogleAnalyticsAdminV1alphaAccessDimensionHeader[]
*/
public function setDimensionHeaders($dimensionHeaders)
{
$this->dimensionHeaders = $dimensionHeaders;
}
/**
* @return GoogleAnalyticsAdminV1alphaAccessDimensionHeader[]
*/
public function getDimensionHeaders()
{
return $this->dimensionHeaders;
}
/**
* @param GoogleAnalyticsAdminV1alphaAccessMetricHeader[]
*/
public function setMetricHeaders($metricHeaders)
{
$this->metricHeaders = $metricHeaders;
}
/**
* @return GoogleAnalyticsAdminV1alphaAccessMetricHeader[]
*/
public function getMetricHeaders()
{
return $this->metricHeaders;
}
/**
* @param GoogleAnalyticsAdminV1alphaAccessQuota
*/
public function setQuota(GoogleAnalyticsAdminV1alphaAccessQuota $quota)
{
$this->quota = $quota;
}
/**
* @return GoogleAnalyticsAdminV1alphaAccessQuota
*/
public function getQuota()
{
return $this->quota;
}
/**
* @param int
*/
public function setRowCount($rowCount)
{
$this->rowCount = $rowCount;
}
/**
* @return int
*/
public function getRowCount()
{
return $this->rowCount;
}
/**
* @param GoogleAnalyticsAdminV1alphaAccessRow[]
*/
public function setRows($rows)
{
$this->rows = $rows;
}
/**
* @return GoogleAnalyticsAdminV1alphaAccessRow[]
*/
public function getRows()
{
return $this->rows;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(GoogleAnalyticsAdminV1alphaRunAccessReportResponse::class, your_sha256_hashunAccessReportResponse');
```
|
The sport of Australian rules football has been played in Fiji since at least 1963, when an exhibition was held in Suva.
Fiji has produced several professional players for the Australian Football League, however organised Australian rules did not begin there until 2009.
The governing body for the sport on the islands is AFL Fiji, created in 2009.
History
An Australian rules exhibition was held at Suva in 1963. In 1965, St Kilda Football Club returned to Fiji as part of a proposed biennial end of season trip, but declined to further promote the game there.
The first television broadcasts of matches by the Australia Network began in 2002. In the period 2004–2008, a proposal for a serious ongoing Australian Rules football competition was mooted by former Fiji Police Commissioner, Andrew Hughes. Hughes had played footy at various amateur levels in Australia and was keen to promote it when he arrived in Fiji to take up his post as the nation's top cop. In mid-2004, Hughes, based in Suva, called a meeting within the Fiji police force of officers interested in assisting with his vision of using the force itself as the basis for developing and promoting a local footy competition. According to one who was present at that initial meeting in the Police Officers Mess, ‘Hughes himself was to be the trainer and teacher’ of the new code. A second meeting was held in Lautoka for interested officers based in the West. According to the Police Public Relations Officer at the time, Mesake Koroi, ‘many top rugby players joined in and it started off well, but there was no one except Hughes to run the [programme], so it was hard because, of course, he had other police work to attend to’. This then was the first effort to establish the sport in Fiji. The Fijian Australian Football Association was formed as a governing body but the sheer weight of workload for Hughes meant the FAFA went into recess without creating an organised league. (A failed bid was made for inclusion of the sport into the 2007 South Pacific Games held in Fiji.). Meanwhile, the Western Bulldogs became the first AFL club to actively scouting for recruits in Fiji. This project was later announced to begin with open trials in Suva and Labasa in October of that year 2004. 16 year-old Solomon Loki and 19 year-old Inoke Ratu were added to the Western Bulldogs International scholarship lists. They were not able to take up their offers regrettably because of visa obstructions arising out of Fiji's 2006 coup.
Additionally, in early 2005, representatives the Fiji Daily Post (FDP) newspaper (Alan Hckling publisher; Mesake Koroi then general manager; and editor Robert Wolfgramm), met with Police Commissioner Hughes to alert him of their interest in promoting the code and to work with other like-minded locals and expats by giving publicity to footy through the newspaper. As the 2005 pre-season began, Melbourne-based footy enthusiast, Lex Neander, began filing match summaries for the FDP. His column soon turned into a weekly full page run every Tuesday which continued until the end of the 2008 Australian footy season.
In 2008 meanwhile, pushing footy in Fiji then fell to Fiji-born, David Rodan jnr, the professional player who began with Richmond but was then with Port Adelaide football club. David had long expressed an interest in bringing the code to Fiji and was anointed the AFL's official ambassador for the code. In 2008 his plans were shared with fellow Port Adelaide recruit, Alipate Carlile, and physical education teacher and wife-to-be, Carla Di Pasquale. Together they explored the possibility of Port Adelaide football club joining with him to re-establish the code here in Fiji by means of a new body - ‘AFL Fiji’. In a historic presentation to the PA football club, the Rodan-led trio outlined a proposed logo, a playing strip, and a timeline for establishing the code in Fiji. The trio also outlined where the Hughes initiative failed, and proposed a strategy for future success. The AFL accepted the grand design of his vision and partnered Rodan's initiative with their representative, Andrew Cadzow, who was overseeing "AFL Asia-Pacific" in Brisbane.
In July 2009, Andrew arrived at Suva's Holiday Inn on behalf of the AFL with a brief to develop a plan and establish a steering committee to drive the Rodan-inspired vision into reality. The chief question because Rodan was still playing footy in Australia, was who on the ground would take the interest in footy to concretise it, give it substance, make it work. In response to a newspaper advertisement, fifteen Fiji locals turned up on the July 10th and 11th to hear what Andrew cadzow proposed. Apologies to the meeting were received from David Rodan snr, David Rodan jnr, Carla Di Pasquale, John and Marilyn (from Levuka homestay), and Mareta (from Iwala Events). But among those present were Peter Fulcher, Damian Ames and Pam, Nemani and Biri Rokobuli, Lia Ratu and son Inoke, Vasenai Loki and son Solo, Caroline Narruhn and son Joseph, and Lupe Wolfgramm and her sons, Max and Dylan (who had playing experience as juniors in Melbourne beforehand). From this formative group, Damian Ames was appointed inaugural Chairman. Pam, the Rokobuli and Loki families, as well as Caroline Narruhn were nominated to the Steering Committee.
During the meeting, Cadzow also outlined a future for an "AFL Fiji" that could potentially involve a calendar of events packed with school programmes, talent searches, Auskick/Fijikick rollouts, the presence of a youth ambassador, corporate sponsorships, and participation in Oceania and International Cup championships – all to be overseen by a structured AFL Fiji steering committee and advisory board. The immediate aim was to get up and running, organised and operational, by September 2009, with an official AFL Fiji launch in October as part of the inaugural David Rodan Cup Under 16s competition. That inter-school competition would serve as a filter for local players to join a new national team to compete in December 2009, in an inaugural Under 16s Oceania Cup championships hosted in Fiji.
After the AFL creation of the "AFL Oceania" in Australia in 2008, Fiji was identified as a major target for Australian rules football development in the region. AFL Fiji's formation in mid-2009, by the creation of national committee. In August (2009), Dylan Wolfgramm was selected to be Fiji's representative in an Oceania Under 23 team to play exhibition matches in Cairns on the occasion of the sitting of the Pacific Islands Forum there. Kevin Rudd and other leaders were present to witness the Mal Michael-coached Oceania team win a match against a North Queensland representative side. In September (2009) a newly formed AFL Fiji Steering Committee met at the Holiday Inn and comprised: Tony Moore, Jordan Moore, Lupe Wolfgramm, Max Wolfgramm, Dylan Wolfgramm, Inoke Ratu, Lia Ratu, Solo Loki, Vasenai Loki, Damian Ames, Alex Hales (from the Fiji Daily Post), Caroline Narruhn, Joseph Narruhn, David Rodan Snr, David Rodan Jnr, and Carla Di Pasquale. The meeting settled dates for a forthcoming official launch of AFL Fiji and to set dates for and inaugural "David Rodan Cup" and inter-Pacific "Oceania Cup". ‘Super-clinics’ were also planned at Marist, Grammar, Dudley, Cathedral, International, Laucala Bay and Gospel secondary schools, along with training of development officers necessary for completing the preparatory skilling tasks. Volunteers were assigned for finding sponsorships, game venues, and maintaining the media profile of AFL Fiji.
In the second week of October 2009, the AFL's Andrew Cadzow arrived and co-launched "AFL Fiji". Coinciding with this, Chris Maple and others from the Western Bulldogs Football Club had begun running footy clinics and searches at various locations around Fiji. “With their rugby background they’re used to physical contact, they’re athletic, and from what I’ve seen today they’re very hard working – all the traits needed for AFL football,” Maple told Dale Carruthers, a Canadian journalist attached to the Fiji Daily Post newspaper. New AFL Fiji chairman, Damian Ames, also told the newspaper that Fiji was an ideal ground for introducing the code - “It’s an untapped source of athletes,” Ames said. But the major challenges of introducing the code Damian admitted was that “rugby is so entrenched here [in Fiji].” Ames, Cadzow and other supporters attended and co-supervised clinics held at Suva Grammar School on 14 October, and later at Marist Brothers, while Bulldog representatives Maple and crew were at Yat Sen School doing their talent search.
Following these first tentative steps to gauge local interest, on Friday 16 October 2009, AFL Fiji was modestly, officially and publicly launched with about thirty in attendance in the Banyan Room at Suva's Holiday Inn. Australia's High Commissioner, James Batley, kindly accepted my invitation to do the honours with best wishes for the future. Three days later, 19 October, the Diwali Day public holiday, Fiji "first footy carnival" leading to the "David Rodan Cup" was held. At the end of round-robin matches, the inaugural cup was awarded to Laucala Bay Secondary College, with Runners Up Suva Grammar School; and 3rd Place to Cathedral Secondary School, with 4th Place: Gospel High School. From these four teams, a "Fiji Power" squad was picked to contest the forthcoming inaugural "Oceania Cup" to be held in December (2009). The squad was named as: Laijiasa Bolenaivalu, Fuata Silisoma, Ropate Tobe, Darryll Arthur-Valentine, Jonathan Chongkit, Jiuta Vateitei, Viliame Tuni, Esekia Gibbons, Joeli Logavatu, Gabriel Ledua, Eroni Niumataiwalu, Anasa Yabaki, Wilson Kacivi, Penisoni Tuiova, Richard Niulevu, Semisi Apakuki, Sisa Qarikau, Kinivuwai Nanovo, Paula Rokotuiloma, Samuela Delai, Semi Tikoitoga, Titus Raihman, Ledua Tuberi, Mesake Dakai, Yabaki Cakautini, John Tuivanuakula, Solo Ratu, Dylan Wolfgramm, and Luke Gucake.
The inaugural ‘Under 16 Oceania Cup’ was held at Cathedral Secondary School grounds, 11–13 December (2009). Nauru, New Zealand, Samoa, Tonga and Fiji teams competed. In blustery weather, the fiercely but fairly contested Cup went to Tonga. All Pacific teams would head to Tonga in 2010. In the meantime, footy clinics were conducted in schools around the Suva-Nausori area with training sessions organised weekly at Albert Park in downtown Suva.
In 2010, a schools tournament was held, with more than 80 students from 14 different schools in Suva and Nausori represented in six teams that played round robin matches. The "Fiji Power" national team made its first appearance at the Under 16 Oceania Cup in Tonga in December 2010.
A senior team entered the 2011 Australian Football International Cup and were highly successful, taking the Division 2 title after dominating the Grand Final against France.
A local league with four teams (Nausori Cats, Raiwaqa Bulldogs, Suva City Swans and Suva Lions) has since commenced.
In October 2022, the Australian government announced its commitment to a support package for the sport in Fiji.
National team
Men's team
The Fiji men's national Australian rules football team has competed in the Australian Football International Cup three times, finishing 13th in 2011, 10th in 2014 and 8th in 2017.
Results
Women's team
The Fiji women's national Australian rules football team has competed in the Australian Football Women's International Cup twice, finishing 5th in 2014 and 6th in 2017.
Results
Notable players
Men's
Women's
References
Fiji
Fiji
Sport in Fiji
|
```objective-c
/**
* @license Apache-2.0
*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/*
* The following is auto-generated. Do not manually edit. See scripts/loops.js.
*/
#ifndef STDLIB_STRIDED_BASE_UNARY_S_Z_AS_Z_Z_H
#define STDLIB_STRIDED_BASE_UNARY_S_Z_AS_Z_Z_H
#include <stdint.h>
/*
* If C++, prevent name mangling so that the compiler emits a binary file having undecorated names, thus mirroring the behavior of a C compiler.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Applies a unary callback to strided input array elements and assigns results to elements in a strided output array.
*/
void stdlib_strided_s_z_as_z_z( uint8_t *arrays[], const int64_t *shape, const int64_t *strides, void *fcn );
#ifdef __cplusplus
}
#endif
#endif // !STDLIB_STRIDED_BASE_UNARY_S_Z_AS_Z_Z_H
```
|
Dirk Rupnow (born 1972 in Berlin, Germany) is a German historian. Since 2009 he has taught as assistant professor, since 2013 as associate professor at the University of Innsbruck, Austria, since 2010 he has been head of the institute for contemporary history there.
Life
Rupnow studied history, German literature, philosophy and art history at the Free University Berlin and the University of Vienna. He completed his studies in Vienna 1999. 2002 he received his PhD from the University of Klagenfurt, Austria. In 2009 he completed his Habilitation at the University of Vienna. 1999/2000 he worked as a research associate for the Historians‘ Commission of the Republic of Austria. 2000/01 he was a junior fellow at the Internationalen Research Center for Cultural Studies IFK, Vienna, 2004-07 a postdoc fellow in the framework of the Austrian Programme for Advanced Research and Technology (APART) of the Austrian Academy of Sciences ÖAW, 2007-09 a visiting fellow at Institute for Human Sciences IWM, Vienna. Since 2007 he has been a lecturer, since 2009 senior lecturer at the Department of Contemporary History at the University of Vienna. 2008 he was elected member of the Junge Kurie of the Austrian Academy of Sciences ÖAW.
Rupnow taught as a visiting assistant professor in the Jewish studies program at Dartmouth College, at the University of Bielefeld and was invited for fellowships at the history department of Duke University, the Simon Dubnow Institute for Jewish History and Culture at Leipzig University and the Center for Advanced Holocaust Studies at the United States Holocaust Memorial Museum in Washington, DC.
Awards
For his work Rupnow received numerous international awards, e.g. 2009 the Fraenkel Prize in Contemporary History of the Wiener Library, London, and 2011 the „Humanities International“ award of the German Publishers and Booksellers Association.
Professional memberships
Junge Kurie, Austrian Academy of Sciences ÖAW (elected 2008), American Historical Association AHA (since 2003), German Studies Association GSA (since 2003), Austrian Scientists and Scholars in North America ASCINA (since 2004),
Society for History of Science GWG (elected 2007). Since 2017, he is also member of the International Academic Advisory Board of the Vienna Wiesenthal Institute for Holocaust Studies VWI.
Selected publications
(ed., along with Marcus Gräser) Österreichische Zeitgeschichte. Zeitgeschichte in Österreich. Eine Standortbestimmung in Zeiten des Umbruchs (Böhlaus Zeitgeschichtliche Bibliothek 41). Wien 2021, .
Judenforschung im Dritten Reich: Wissenschaft zwischen Politik, Propaganda und Ideologie (Historische Grundlagen der Moderne, Autoritäre Regime und Diktaturen 4). Nomos, Baden-Baden 2011, .
Zeitgeschichte ausstellen in Österreich. Museen – Gedenkstätten – Ausstellungen, Böhlau, Wien u.a. 2011, (hrsg. mit H. Uhl).
Pseudowissenschaft. Konzeptionen von Nichtwissenschaftlichkeit in der Wissenschaftsgeschichte (Suhrkamp Taschenbuch Wissenschaft 1897). Suhrkamp, Frankfurt a.M. 2008, (hrsg. mit V. Lipphardt/J. Thiel/Ch. Wessely).
Aporien des Gedenkens. Reflexionen über ‚Holocaust’ und Erinnerung (Edition Parabasen Bd. 5). Rombach Wissenschaften, Freiburg/Br.–Berlin 2006, .
Vernichten und Erinnern. Spuren nationalsozialistischer Gedächtnispolitik. Wallstein-Verlag, Göttingen 2005, .
Die „Zentralstelle für jüdische Auswanderung“ als Beraubungsinstitution. Veröffentlichungen der Österreichischen Historikerkommission. Vermögensentzug während der NS-Zeit sowie Rückstellungen und Entschädigungen seit 1945 in Österreich (Nationalsozialistische Institutionen des Vermögensentzuges, Bd. 20. 1. T.). Oldenbourg, München u.a. 2004, (mit G. Anderl).
Täter-Gedächtnis-Opfer. Das „Jüdische Zentralmuseum“ in Prag 1942–1945. Picus Verlag, Wien 2000, .
References
External links
Dirk Rupnow at the Homepage of the Instituts of Contemporary History at the University of Innsbruck
Dirk Rupnow, Fellow at the United States Holocaust Memorial Museum
Historians of the Holocaust
Contemporary historians
1972 births
Living people
|
```kotlin
package mega.privacy.android.app.presentation.audiosection
import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.foundation.layout.padding
import androidx.compose.foundation.layout.size
import androidx.compose.foundation.lazy.grid.rememberLazyGridState
import androidx.compose.foundation.lazy.rememberLazyListState
import androidx.compose.runtime.Composable
import androidx.compose.runtime.LaunchedEffect
import androidx.compose.runtime.getValue
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.res.painterResource
import androidx.compose.ui.res.stringResource
import androidx.compose.ui.unit.dp
import androidx.lifecycle.compose.collectAsStateWithLifecycle
import mega.privacy.android.app.R
import mega.privacy.android.app.fragments.homepage.SortByHeaderViewModel
import mega.privacy.android.app.presentation.audiosection.model.AudioUiEntity
import mega.privacy.android.domain.entity.preference.ViewType
import mega.privacy.android.legacy.core.ui.controls.LegacyMegaEmptyView
import mega.privacy.android.shared.original.core.ui.controls.progressindicator.MegaCircularProgressIndicator
/**
* The compose view for audio section
*/
@Composable
fun AudioSectionComposeView(
viewModel: AudioSectionViewModel,
modifier: Modifier = Modifier,
onChangeViewTypeClick: () -> Unit = {},
onSortOrderClick: () -> Unit = {},
onMenuClick: (AudioUiEntity) -> Unit = {},
onLongClick: (item: AudioUiEntity, index: Int) -> Unit = { _, _ -> },
) {
val uiState by viewModel.state.collectAsStateWithLifecycle()
val listState = rememberLazyListState()
val gridState = rememberLazyGridState()
val progressBarShowing = uiState.progressBarShowing
val items = uiState.allAudios
val scrollToTop = uiState.scrollToTop
val accountType = uiState.accountDetail?.levelDetail?.accountType
LaunchedEffect(items) {
if (scrollToTop) {
if (uiState.currentViewType == ViewType.LIST)
listState.scrollToItem(0)
else
gridState.scrollToItem(0)
}
}
Box(modifier = modifier) {
when {
progressBarShowing -> {
Box(
modifier = Modifier
.fillMaxSize()
.padding(top = 20.dp),
contentAlignment = Alignment.TopCenter,
content = {
MegaCircularProgressIndicator(
modifier = Modifier
.size(50.dp),
strokeWidth = 4.dp,
)
},
)
}
items.isEmpty() -> LegacyMegaEmptyView(
modifier = Modifier,
text = stringResource(id = R.string.homepage_empty_hint_audio),
imagePainter = painterResource(id = R.drawable.ic_homepage_empty_audio)
)
else -> {
AudiosView(
items = items,
accountType = accountType,
isListView = uiState.currentViewType == ViewType.LIST,
listState = listState,
gridState = gridState,
sortOrder = stringResource(
id = SortByHeaderViewModel.orderNameMap[uiState.sortOrder]
?: R.string.sortby_name
),
modifier = Modifier,
onChangeViewTypeClick = onChangeViewTypeClick,
onSortOrderClick = onSortOrderClick,
onClick = viewModel::onItemClicked,
onLongClick = onLongClick,
onMenuClick = onMenuClick,
inSelectionMode = uiState.isInSelection
)
}
}
}
}
```
|
Ein Vered () is a moshav in central Israel. Located in the Sharon plain, it falls under the jurisdiction of Lev HaSharon Regional Council. In it had a population of .
History
Before the 20th century the area formed part of the Forest of Sharon. It was an open woodland dominated by Mount Tabor Oak, which extended from Kfar Yona in the north to Ra'anana in the south. The local Arab inhabitants traditionally used the area for pasture, firewood and intermittent cultivation. The intensification of settlement and agriculture in the coastal plain during the 19th century led to deforestation and subsequent environmental degradation.
Ein Vered was established in the southern Sharon in 1930, by South African Jewry on land purchased by the Jewish National Fund, and named after 'Ayun el Werdat (Arabic for "the Springs of the Water Female Water Drawers"), the wells serving the area. It was originally an intensive farming community. In 1947 it had a population of 450.
Citrus groves, field crops, beehives and flowers were the principal branches of agriculture.
Landmarks
The Israel Tractor Museum is located in Ein Vered.
References
Moshavim
Populated places established in 1930
Jewish villages in Mandatory Palestine
Populated places in Central District (Israel)
1930 establishments in Mandatory Palestine
South African-Jewish culture in Israel
|
```go
package flux
import (
"net/http"
"net/url"
"strings"
"time"
"github.com/influxdata/chronograf/util"
)
// Shared transports for all clients to prevent leaking connections.
var (
skipVerifyTransport = util.CreateTransport(true)
defaultTransport = util.CreateTransport(false)
)
// Client is how we interact with Flux.
type Client struct {
URL *url.URL
InsecureSkipVerify bool
Timeout time.Duration
}
// FluxEnabled returns true if the server has flux querying enabled.
func (c *Client) FluxEnabled() (bool, error) {
url := util.AppendPath(c.URL, "/api/v2/query")
req, err := http.NewRequest("POST", url.String(), nil)
if err != nil {
return false, err
}
hc := &http.Client{
Timeout: c.Timeout,
}
if c.InsecureSkipVerify {
hc.Transport = skipVerifyTransport
} else {
hc.Transport = defaultTransport
}
resp, err := hc.Do(req)
if err != nil {
return false, err
}
defer resp.Body.Close()
contentType := resp.Header.Get("Content-Type")
// 1.x: When flux is enabled, the response has 'Content-Type' set to 'application/json' and a body
// of `{"error":"mime: no media type"}`. Otherwise it is 'text/plain; charset=utf-8' with
// `Flux query service disabled.` in the body.
// 2.x: Flux is always enabled, the 401 response with 'application/json; charset=utf-8' content type and body
// {"code":"unauthorized","message":"unauthorized access"} is received
return strings.HasPrefix(contentType, "application/json"), nil
}
```
|
Robert Owen Keohane (born October 3, 1941) is an American academic working within the fields of international relations and international political economy. Following the publication of his influential book After Hegemony (1984), he has become widely associated with the theory of neoliberal institutionalism in international relations, as well as transnational relations and world politics in international relations in the 1970s.
He is Professor Emeritus of International Affairs at the Princeton School of Public and International Affairs, and has also taught at Swarthmore College, Duke University, Harvard University and Stanford University. A 2011 survey of International Relations scholars placed Keohane second in terms of influence and quality of scholarship in the last twenty years. According to the Open Syllabus Project, Keohane is the most frequently cited author on college syllabi for political science courses.
Early life
Keohane was born at the University of Chicago Hospitals. His education through the fifth grade was at the University of Chicago Laboratory Schools. When he was 10, the family moved to Mount Carroll, Illinois, where he attended public school and his parents taught at Shimer College. After the 10th grade, Keohane enrolled at Shimer through the school's early entrance program, which since 1950 has allowed selected high school students to enter college before completing high school. When later asked to compare his undergraduate education as an early entrant at Shimer with his graduate work at Harvard, Keohane remarked "it is not clear to me that I have ever been with a brighter set of people than those early entrants." Keohane currently serves on the Board of Trustees of Shimer College.
He earned a BA, with honors, from Shimer College in 1961. He obtained his PhD from Harvard in 1966, one year after he joined the faculty of Swarthmore College. He was the student of Harvard University Professor Stanley Hoffmann. He described Judith Shklar as his strongest intellectual mentor during his graduate studies. He has also described Kenneth Waltz and Karl Polanyi as influences.
Career
Keohane has taught at Swarthmore, Stanford, Brandeis, Harvard, and Duke. At Harvard he was Stanfield Professor of International Peace, and at Duke he was the James B. Duke Professor of Political Science.
He is the author of many works, including After Hegemony: Cooperation and Discord in the World Political Economy (Princeton University Press, 1984), for which he was awarded the second annual University of Louisville Grawemeyer Award in 1989 for "Ideas Improving World Order". Keohane describes the process of forming the theoretical insights of After Hegemony as follows during the late 1970s,
Keohane has been characterized as a key figure in the development of a discipline of International Political Economy in the United States. Along with Joseph Nye, Keohane coined the concept of complex interdependence to capture the ways in which power had been fragmented and diffused in economic affairs. Robert Keohane coined the term Hegemonic stability theory in a 1980 article for the notion that the international system is more likely to remain stable when a single nation-state is the dominant world power, or hegemon. Keohane's 1984 book After Hegemony used insights from the new institutional economics to argue that the international system could remain stable in the absence of a hegemon, thus rebutting hegemonic stability theory. Keohane showed that international cooperation could be sustained through repeated interactions, transparency, and monitoring.
Keohane played an important role in steering the focus of the journal International Organization from scholarship focused on international organizations to a general IR journal; it is now the leading journal in the field of IR. He joined the journal in 1968. Between 1974 and 1980, he was editor of the journal.
He has been president of the International Studies Association, 1988–1989, and of the American Political Science Association, 1999–2000.
Keohane is a fellow of the American Academy of Arts and Sciences, the American Academy of Political and Social Science and has held a Guggenheim Fellowship and fellowships at the Center for Advanced Study in the Behavioral Sciences and the National Humanities Center. He was awarded the Johan Skytte Prize in Political Science in 2005, and elected to the National Academy of Sciences that same year. In 2007, he was elected to the American Philosophical Society. He was listed as the most influential scholar of international relations in a 2005 Foreign Policy poll.
Political scientists he has taught include Lisa Martin, Andrew Moravcsik, Layna Mosley, Beth Simmons, Ronald Mitchell, and Helen V. Milner. Other students include Fareed Zakaria.
In 2012, Keohane received the Harvard Centennial Medal.
In fall 2013 he is the Allianz Distinguished Visitor at the American Academy in Berlin.
In 2014, he was awarded the James Madison Award of the American Political Science Association.
He was awarded the 2016 Balzan Prize for International Relations: History and Theory.
Personal life
While he was an assistant professor at Swarthmore College, he was an activist against the Vietnam War, and also campaigned for 1968 presidential candidate Eugene McCarthy. Keohane is married to Nannerl O. Keohane, former president of Duke University and Wellesley College and herself a noted political scientist. They have four grown children: Sarah, Stephan, Jonathan, and Nathaniel.
Books
Transnational Relations and World Politics, co-authored with Joseph S. Nye, Jr. (Harvard University Press, 1972)
Power and Interdependence: World Politics in Transition (Little, Brown, 1977); with Joseph Nye
After Hegemony: Cooperation and Discord in the World Political Economy (Princeton University Press, 1984)
Neorealism and Its Critics (Columbia University Press, 1986)
International Institutions and State Power: Essays in International Relations Theory (Westview, 1989)
Designing Social Inquiry: Scientific Inference in Qualitative Research (Princeton, 1994); with Gary King and Sidney Verba
Power and Governance in a Partially Globalized World (Routledge, New York, 2002)
Humanitarian Intervention: Ethical, Legal, and Political Dilemmas (Cambridge University Press, 2003); with J. L. Holzgrefe
The Regime Complex for Climate Change with David G. Victor (2010)
References
External links
Robert Keohane's Faculty Profile at Princeton
Interview with Robert Keohane by Theory Talks (May 2008)
Robert O. Keohane as the Allianz Distinguished Visitor at the American Academy in Berlin
1941 births
Living people
American political scientists
American male writers
Brandeis University faculty
Duke University faculty
Harvard University alumni
Harvard University faculty
American international relations scholars
People from Mount Carroll, Illinois
Political liberals (international relations)
Princeton University faculty
Shimer College alumni
Stanford University Department of Political Science faculty
Swarthmore College faculty
University of Chicago Laboratory Schools alumni
Members of the United States National Academy of Sciences
Corresponding Fellows of the British Academy
Presidents of the International Studies Association
|
```javascript
/**
*/
(function() {
/**
* Construct a new FileActions instance
* @constructs FileActions
* @memberof OCA.Files
*/
var FileActions = function() {
this.initialize();
};
FileActions.TYPE_DROPDOWN = 0;
FileActions.TYPE_INLINE = 1;
FileActions.prototype = {
/** @lends FileActions.prototype */
actions: {},
defaults: {},
icons: {},
/**
* @deprecated
*/
currentFile: null,
/**
* Dummy jquery element, for events
*/
$el: null,
_fileActionTriggerTemplate: null,
/**
* @private
*/
initialize: function() {
this.clear();
// abusing jquery for events until we get a real event lib
this.$el = $('<div class="dummy-fileactions hidden"></div>');
$('body').append(this.$el);
this._showMenuClosure = _.bind(this._showMenu, this);
},
/**
* Adds an event handler
*
* @param {String} eventName event name
* @param {Function} callback
*/
on: function(eventName, callback) {
this.$el.on(eventName, callback);
},
/**
* Removes an event handler
*
* @param {String} eventName event name
* @param {Function} callback
*/
off: function(eventName, callback) {
this.$el.off(eventName, callback);
},
/**
* Notifies the event handlers
*
* @param {String} eventName event name
* @param {Object} data data
*/
_notifyUpdateListeners: function(eventName, data) {
this.$el.trigger(new $.Event(eventName, data));
},
/**
* Merges the actions from the given fileActions into
* this instance.
*
* @param {OCA.Files.FileActions} fileActions instance of OCA.Files.FileActions
*/
merge: function(fileActions) {
var self = this;
// merge first level to avoid unintended overwriting
_.each(fileActions.actions, function(sourceMimeData, mime) {
var targetMimeData = self.actions[mime];
if (!targetMimeData) {
targetMimeData = {};
}
self.actions[mime] = _.extend(targetMimeData, sourceMimeData);
});
this.defaults = _.extend(this.defaults, fileActions.defaults);
this.icons = _.extend(this.icons, fileActions.icons);
},
/**
* @deprecated use #registerAction() instead
*/
register: function(mime, name, permissions, icon, action, displayName) {
return this.registerAction({
name: name,
mime: mime,
permissions: permissions,
icon: icon,
actionHandler: action,
displayName: displayName || name
});
},
/**
* Register action
*
* @param {OCA.Files.FileAction} action object
*/
registerAction: function (action) {
var mime = action.mime;
var name = action.name;
var actionSpec = {
action: function(fileName, context) {
// Actions registered in one FileAction may be executed on a
// different one (for example, due to the "merge" function),
// so the listeners have to be updated on the FileActions
// from the context instead of on the one in which it was
// originally registered.
if (context && context.fileActions) {
context.fileActions._notifyUpdateListeners('beforeTriggerAction', {action: actionSpec, fileName: fileName, context: context});
}
action.actionHandler(fileName, context);
if (context && context.fileActions) {
context.fileActions._notifyUpdateListeners('afterTriggerAction', {action: actionSpec, fileName: fileName, context: context});
}
},
name: name,
displayName: action.displayName,
mime: mime,
filename: action.filename,
order: action.order || 0,
icon: action.icon,
iconClass: action.iconClass,
permissions: action.permissions,
type: action.type || FileActions.TYPE_DROPDOWN,
altText: action.altText || ''
};
if (_.isUndefined(action.displayName)) {
actionSpec.displayName = t('files', name);
}
if (_.isFunction(action.render)) {
actionSpec.render = action.render;
}
if (_.isFunction(action.shouldRender)) {
actionSpec.shouldRender = action.shouldRender;
}
if (!this.actions[mime]) {
this.actions[mime] = {};
}
this.actions[mime][name] = actionSpec;
this.icons[name] = action.icon;
this._notifyUpdateListeners('registerAction', {action: action});
},
/**
* Clears all registered file actions.
*/
clear: function() {
this.actions = {};
this.defaults = {};
this.icons = {};
this.currentFile = null;
},
/**
* Sets the default action for a given mime type.
*
* @param {String} mime mime type
* @param {String} name action name
*/
setDefault: function (mime, name) {
this.defaults[mime] = name;
this._notifyUpdateListeners('setDefault', {defaultAction: {mime: mime, name: name}});
},
/**
* Returns a map of file actions handlers matching the given conditions
*
* @param {string} mime mime type
* @param {string} type "dir" or "file"
* @param {number} permissions permissions
* @param {string} filename filename
*
* @return {Object.<string,OCA.Files.FileActions~actionHandler>} map of action name to action spec
*/
get: function(mime, type, permissions, filename) {
var actions = this.getActions(mime, type, permissions, filename);
var filteredActions = {};
$.each(actions, function (name, action) {
filteredActions[name] = action.action;
});
return filteredActions;
},
/**
* Returns an array of file actions matching the given conditions
*
* @param {string} mime mime type
* @param {string} type "dir" or "file"
* @param {number} permissions permissions
* @param {string} filename filename
*
* @return {Array.<OCA.Files.FileAction>} array of action specs
*/
getActions: function(mime, type, permissions, filename) {
var actions = {};
if (this.actions.all) {
actions = $.extend(actions, this.actions.all);
}
if (type) {//type is 'dir' or 'file'
if (this.actions[type]) {
actions = $.extend(actions, this.actions[type]);
}
}
if (mime) {
var mimePart = mime.substr(0, mime.indexOf('/'));
if (this.actions[mimePart]) {
actions = $.extend(actions, this.actions[mimePart]);
}
if (this.actions[mime]) {
actions = $.extend(actions, this.actions[mime]);
}
}
var filteredActions = {};
var self = this;
$.each(actions, function(name, action) {
if (self.allowedPermissions(action.permissions, permissions) &&
self.allowedFilename(action.filename, filename)) {
filteredActions[name] = action;
}
});
return filteredActions;
},
allowedPermissions: function(actionPermissions, permissions) {
return (actionPermissions === OC.PERMISSION_NONE || (actionPermissions & permissions));
},
allowedFilename: function(actionFilename, filename) {
return (!filename || filename === '' || !actionFilename
|| actionFilename === '' || actionFilename === filename);
},
/**
* Returns the default file action handler for the given conditions
*
* @param {string} mime mime type
* @param {string} type "dir" or "file"
* @param {number} permissions permissions
*
* @return {OCA.Files.FileActions~actionHandler} action handler
*
* @deprecated use getDefaultFileAction instead
*/
getDefault: function (mime, type, permissions) {
var defaultActionSpec = this.getDefaultFileAction(mime, type, permissions);
if (defaultActionSpec) {
return defaultActionSpec.action;
}
return undefined;
},
/**
* Returns the default file action handler for the current file
*
* @return {OCA.Files.FileActions~actionSpec} action spec
* @since 8.2
*/
getCurrentDefaultFileAction: function() {
var mime = this.getCurrentMimeType();
var type = this.getCurrentType();
var permissions = this.getCurrentPermissions();
return this.getDefaultFileAction(mime, type, permissions);
},
/**
* Returns the default file action handler for the given conditions
*
* @param {string} mime mime type
* @param {string} type "dir" or "file"
* @param {number} permissions permissions
*
* @return {OCA.Files.FileActions~actionSpec} action spec
* @since 8.2
*/
getDefaultFileAction: function(mime, type, permissions) {
var mimePart;
if (mime) {
mimePart = mime.substr(0, mime.indexOf('/'));
}
var name = false;
if (mime && this.defaults[mime]) {
name = this.defaults[mime];
} else if (mime && this.defaults[mimePart]) {
name = this.defaults[mimePart];
} else if (type && this.defaults[type]) {
name = this.defaults[type];
} else {
name = this.defaults.all;
}
var actions = this.getActions(mime, type, permissions);
return actions[name];
},
/**
* Default function to render actions
*
* @param {OCA.Files.FileAction} actionSpec file action spec
* @param {boolean} isDefault true if the action is a default one,
* false otherwise
* @param {OCA.Files.FileActionContext} context action context
*/
_defaultRenderAction: function(actionSpec, isDefault, context) {
if (!isDefault) {
var params = {
name: actionSpec.name,
nameLowerCase: actionSpec.name.toLowerCase(),
displayName: actionSpec.displayName,
icon: actionSpec.icon,
iconClass: actionSpec.iconClass,
altText: actionSpec.altText,
hasDisplayName: !!actionSpec.displayName
};
if (_.isFunction(actionSpec.icon)) {
params.icon = actionSpec.icon(context.$file.attr('data-file'), context);
}
if (_.isFunction(actionSpec.iconClass)) {
params.iconClass = actionSpec.iconClass(context.$file.attr('data-file'), context);
}
var $actionLink = this._makeActionLink(params, context);
context.$file.find('a.name>span.fileactions').append($actionLink);
$actionLink.addClass('permanent');
return $actionLink;
}
},
/**
* Renders the action link element
*
* @param {Object} params action params
*/
_makeActionLink: function(params) {
return $(OCA.Files.Templates['file_action_trigger'](params));
},
/**
* Displays the file actions dropdown menu
*
* @param {string} fileName file name
* @param {OCA.Files.FileActionContext} context rendering context
*/
_showMenu: function(fileName, context) {
var menu;
var $trigger = context.$file.closest('tr').find('.fileactions .action-menu');
$trigger.addClass('open');
$trigger.attr('aria-expanded', 'true');
menu = new OCA.Files.FileActionsMenu();
context.$file.find('td.filename').append(menu.$el);
menu.$el.on('afterHide', function() {
context.$file.removeClass('mouseOver');
$trigger.removeClass('open');
$trigger.attr('aria-expanded', 'false');
menu.remove();
});
context.$file.addClass('mouseOver');
menu.show(context);
},
/**
* Renders the menu trigger on the given file list row
*
* @param {Object} $tr file list row element
* @param {OCA.Files.FileActionContext} context rendering context
*/
_renderMenuTrigger: function($tr, context) {
// remove previous
$tr.find('.action-menu').remove();
var $el = this._renderInlineAction({
name: 'menu',
displayName: '',
iconClass: 'icon-more',
altText: t('files', 'Actions'),
action: this._showMenuClosure
}, false, context);
$el.addClass('permanent');
$el.attr('aria-expanded', 'false');
},
/**
* Renders the action element by calling actionSpec.render() and
* registers the click event to process the action.
*
* @param {OCA.Files.FileAction} actionSpec file action to render
* @param {boolean} isDefault true if the action is a default action,
* false otherwise
* @param {OCA.Files.FileActionContext} context rendering context
*/
_renderInlineAction: function(actionSpec, isDefault, context) {
if (actionSpec.shouldRender) {
if (!actionSpec.shouldRender(context)) {
return;
}
}
var renderFunc = actionSpec.render || _.bind(this._defaultRenderAction, this);
var $actionEl = renderFunc(actionSpec, isDefault, context);
if (!$actionEl || !$actionEl.length) {
return;
}
$actionEl.on(
'click', {
a: null
},
function(event) {
event.stopPropagation();
event.preventDefault();
if ($actionEl.hasClass('open')) {
return;
}
var $file = $(event.target).closest('tr');
if ($file.hasClass('busy')) {
return;
}
var currentFile = $file.find('td.filename');
var fileName = $file.attr('data-file');
context.fileActions.currentFile = currentFile;
var callContext = _.extend({}, context);
if (!context.dir && context.fileList) {
callContext.dir = $file.attr('data-path') || context.fileList.getCurrentDirectory();
}
if (!context.fileInfoModel && context.fileList) {
callContext.fileInfoModel = context.fileList.getModelForFile(fileName);
if (!callContext.fileInfoModel) {
console.warn('No file info model found for file "' + fileName + '"');
}
}
actionSpec.action(
fileName,
callContext
);
}
);
return $actionEl;
},
/**
* Trigger the given action on the given file.
*
* @param {string} actionName action name
* @param {OCA.Files.FileInfoModel} fileInfoModel file info model
* @param {OCA.Files.FileList} [fileList] file list, for compatibility with older action handlers [DEPRECATED]
*
* @return {boolean} true if the action handler was called, false otherwise
*
* @since 8.2
*/
triggerAction: function(actionName, fileInfoModel, fileList) {
var actionFunc;
var actions = this.get(
fileInfoModel.get('mimetype'),
fileInfoModel.isDirectory() ? 'dir' : 'file',
fileInfoModel.get('permissions'),
fileInfoModel.get('name')
);
if (actionName) {
actionFunc = actions[actionName];
} else {
actionFunc = this.getDefault(
fileInfoModel.get('mimetype'),
fileInfoModel.isDirectory() ? 'dir' : 'file',
fileInfoModel.get('permissions')
);
}
if (!actionFunc) {
actionFunc = actions['Download'];
}
if (!actionFunc) {
return false;
}
var context = {
fileActions: this,
fileInfoModel: fileInfoModel,
dir: fileInfoModel.get('path')
};
var fileName = fileInfoModel.get('name');
this.currentFile = fileName;
if (fileList) {
// compatibility with action handlers that expect these
context.fileList = fileList;
context.$file = fileList.findFileEl(fileName);
}
actionFunc(fileName, context);
},
/**
* Display file actions for the given element
* @param parent "td" element of the file for which to display actions
* @param triggerEvent if true, triggers the fileActionsReady on the file
* list afterwards (false by default)
* @param fileList OCA.Files.FileList instance on which the action is
* done, defaults to OCA.Files.App.fileList
*/
display: function (parent, triggerEvent, fileList) {
if (!fileList) {
console.warn('FileActions.display() MUST be called with a OCA.Files.FileList instance');
return;
}
this.currentFile = parent;
var self = this;
var $tr = parent.closest('tr');
var actions = this.getActions(
this.getCurrentMimeType(),
this.getCurrentType(),
this.getCurrentPermissions(),
this.getCurrentFile()
);
var nameLinks;
if ($tr.data('renaming')) {
return;
}
// recreate fileactions container
nameLinks = parent.children('a.name');
nameLinks.find('.fileactions, .nametext .action').remove();
nameLinks.append('<span class="fileactions"></span>');
var defaultAction = this.getDefaultFileAction(
this.getCurrentMimeType(),
this.getCurrentType(),
this.getCurrentPermissions()
);
var context = {
$file: $tr,
fileActions: this,
fileList: fileList
};
$.each(actions, function (name, actionSpec) {
if (actionSpec.type === FileActions.TYPE_INLINE) {
self._renderInlineAction(
actionSpec,
defaultAction && actionSpec.name === defaultAction.name,
context
);
}
});
function objectValues(obj) {
var res = [];
for (var i in obj) {
if (obj.hasOwnProperty(i)) {
res.push(obj[i]);
}
}
return res;
}
// polyfill
if (!Object.values) {
Object.values = objectValues;
}
var menuActions = Object.values(actions).filter(function (action) {
return action.type !== OCA.Files.FileActions.TYPE_INLINE && (!defaultAction || action.name !== defaultAction.name)
});
// do not render the menu if nothing is in it
if (menuActions.length > 0) {
this._renderMenuTrigger($tr, context);
}
if (triggerEvent){
fileList.$fileList.trigger(jQuery.Event("fileActionsReady", {fileList: fileList, $files: $tr}));
}
},
getCurrentFile: function () {
return this.currentFile.parent().attr('data-file');
},
getCurrentMimeType: function () {
return this.currentFile.parent().attr('data-mime');
},
getCurrentType: function () {
return this.currentFile.parent().attr('data-type');
},
getCurrentPermissions: function () {
return this.currentFile.parent().data('permissions');
},
/**
* Register the actions that are used by default for the files app.
*/
registerDefaultActions: function() {
this.registerAction({
name: 'Download',
displayName: t('files', 'Download'),
order: -20,
mime: 'all',
permissions: OC.PERMISSION_READ,
iconClass: 'icon-download',
actionHandler: function (filename, context) {
var dir = context.dir || context.fileList.getCurrentDirectory();
var isDir = context.$file.attr('data-type') === 'dir';
var url = context.fileList.getDownloadUrl(filename, dir, isDir);
var downloadFileaction = $(context.$file).find('.fileactions .action-download');
// don't allow a second click on the download action
if(downloadFileaction.hasClass('disabled')) {
return;
}
if (url) {
var disableLoadingState = function() {
context.fileList.showFileBusyState(filename, false);
};
context.fileList.showFileBusyState(filename, true);
OCA.Files.Files.handleDownload(url, disableLoadingState);
}
}
});
this.registerAction({
name: 'Rename',
displayName: t('files', 'Rename'),
mime: 'all',
order: -30,
permissions: OC.PERMISSION_UPDATE,
iconClass: 'icon-rename',
actionHandler: function (filename, context) {
context.fileList.rename(filename);
}
});
this.registerAction({
name: 'MoveCopy',
displayName: function(context) {
var permissions = context.fileInfoModel.attributes.permissions;
if (permissions & OC.PERMISSION_UPDATE) {
if (!context.fileInfoModel.canDownload()) {
return t('files', 'Move');
}
return t('files', 'Move or copy');
}
return t('files', 'Copy');
},
mime: 'all',
order: -25,
permissions: $('#isPublic').val() ? OC.PERMISSION_UPDATE : OC.PERMISSION_READ,
iconClass: 'icon-external',
actionHandler: function (filename, context) {
var permissions = context.fileInfoModel.attributes.permissions;
var actions = OC.dialogs.FILEPICKER_TYPE_COPY;
if (permissions & OC.PERMISSION_UPDATE) {
if (!context.fileInfoModel.canDownload()) {
actions = OC.dialogs.FILEPICKER_TYPE_MOVE;
} else {
actions = OC.dialogs.FILEPICKER_TYPE_COPY_MOVE;
}
}
var dialogDir = context.dir;
if (typeof context.fileList.dirInfo.dirLastCopiedTo !== 'undefined') {
dialogDir = context.fileList.dirInfo.dirLastCopiedTo;
}
OC.dialogs.filepicker(t('files', 'Choose target folder'), function(targetPath, type) {
if (type === OC.dialogs.FILEPICKER_TYPE_COPY) {
context.fileList.copy(filename, targetPath, false, context.dir);
}
if (type === OC.dialogs.FILEPICKER_TYPE_MOVE) {
context.fileList.move(filename, targetPath, false, context.dir);
}
context.fileList.dirInfo.dirLastCopiedTo = targetPath;
}, false, "httpd/unix-directory", true, actions, dialogDir);
}
});
if (Boolean(OC.appswebroots.files_reminders) && Boolean(OC.appswebroots.notifications)) {
this.registerAction({
name: 'SetReminder',
displayName: function(_context) {
return t('files', 'Set reminder');
},
mime: 'all',
order: -24,
icon: function(_filename, _context) {
return OC.imagePath('files_reminders', 'alarm.svg')
},
permissions: $('#isPublic').val() ? null : OC.PERMISSION_READ,
actionHandler: function(_filename, _context) {},
});
}
if (!/Android|iPhone|iPad|iPod/i.test(navigator.userAgent) && !!window.oc_current_user) {
this.registerAction({
name: 'EditLocally',
displayName: function(context) {
var locked = context.$file.data('locked');
if (!locked) {
return t('files', 'Edit locally');
}
},
mime: 'all',
order: -23,
icon: function(filename, context) {
var locked = context.$file.data('locked');
if (!locked) {
return OC.imagePath('files', 'computer.svg')
}
},
permissions: OC.PERMISSION_UPDATE,
actionHandler: function (filename, context) {
var dir = context.dir || context.fileList.getCurrentDirectory();
var path = dir === '/' ? dir + filename : dir + '/' + filename;
context.fileList.openLocalClient(path);
},
});
}
this.registerAction({
name: 'Open',
mime: 'dir',
permissions: OC.PERMISSION_READ,
icon: '',
actionHandler: function (filename, context) {
let dir, id
if (context.$file) {
dir = context.$file.attr('data-path')
id = context.$file.attr('data-id')
} else {
dir = context.fileList.getCurrentDirectory()
id = context.fileId
}
if (OCA.Files.App && OCA.Files.App.getActiveView() !== 'files') {
OCA.Files.App.setActiveView('files', {silent: true});
OCA.Files.App.fileList.changeDirectory(OC.joinPaths(dir, filename), true, true);
} else {
context.fileList.changeDirectory(OC.joinPaths(dir, filename), true, false, parseInt(id, 10));
}
},
displayName: t('files', 'Open')
});
this.registerAction({
name: 'Delete',
displayName: function(context) {
var mountType = context.$file.attr('data-mounttype');
var type = context.$file.attr('data-type');
var deleteTitle = (type && type === 'file')
? t('files', 'Delete file')
: t('files', 'Delete folder')
if (mountType === 'external-root') {
deleteTitle = t('files', 'Disconnect storage');
} else if (mountType === 'shared-root') {
deleteTitle = t('files', 'Leave this share');
}
return deleteTitle;
},
mime: 'all',
order: 1000,
// permission is READ because we show a hint instead if there is no permission
permissions: OC.PERMISSION_DELETE,
iconClass: 'icon-delete',
actionHandler: function(fileName, context) {
// if there is no permission to delete do nothing
if((context.$file.data('permissions') & OC.PERMISSION_DELETE) === 0) {
return;
}
context.fileList.do_delete(fileName, context.dir);
$('.tipsy').remove();
// close sidebar on delete
const path = context.dir + '/' + fileName
if (OCA.Files.Sidebar && OCA.Files.Sidebar.file === path) {
OCA.Files.Sidebar.close()
}
}
});
this.setDefault('dir', 'Open');
}
};
OCA.Files.FileActions = FileActions;
/**
* Replaces the button icon with a loading spinner and vice versa
* - also adds the class disabled to the passed in element
*
* @param {jQuery} $buttonElement The button element
* @param {boolean} showIt whether to show the spinner(true) or to hide it(false)
*/
OCA.Files.FileActions.updateFileActionSpinner = function($buttonElement, showIt) {
var $icon = $buttonElement.find('.icon');
if (showIt) {
var $loadingIcon = $('<span class="icon icon-loading-small"></span>');
$icon.after($loadingIcon);
$icon.addClass('hidden');
} else {
$buttonElement.find('.icon-loading-small').remove();
$buttonElement.find('.icon').removeClass('hidden');
}
};
/**
* File action attributes.
*
* @todo make this a real class in the future
* @typedef {Object} OCA.Files.FileAction
*
* @property {String} name identifier of the action
* @property {(String|OCA.Files.FileActions~displayNameFunction)} displayName
* display name string for the action, or function that returns the display name.
* Defaults to the name given in name property
* @property {String} mime mime type
* @property {String} filename filename
* @property {number} permissions permissions
* @property {(Function|String)} icon icon path to the icon or function that returns it (deprecated, use iconClass instead)
* @property {(String|OCA.Files.FileActions~iconClassFunction)} iconClass class name of the icon (recommended for theming)
* @property {OCA.Files.FileActions~renderActionFunction} [render] optional rendering function
* @property {OCA.Files.FileActions~actionHandler} actionHandler action handler function
*/
/**
* File action context attributes.
*
* @typedef {Object} OCA.Files.FileActionContext
*
* @property {Object} $file jQuery file row element
* @property {OCA.Files.FileActions} fileActions file actions object
* @property {OCA.Files.FileList} fileList file list object
*/
/**
* Render function for actions.
* The function must render a link element somewhere in the DOM
* and return it. The function should NOT register the event handler
* as this will be done after the link was returned.
*
* @callback OCA.Files.FileActions~renderActionFunction
* @param {OCA.Files.FileAction} actionSpec action definition
* @param {Object} $row row container
* @param {boolean} isDefault true if the action is the default one,
* false otherwise
* @return {Object} jQuery link object
*/
/**
* Display name function for actions.
* The function returns the display name of the action using
* the given context information..
*
* @callback OCA.Files.FileActions~displayNameFunction
* @param {OCA.Files.FileActionContext} context action context
* @return {String} display name
*/
/**
* Icon class function for actions.
* The function returns the icon class of the action using
* the given context information.
*
* @callback OCA.Files.FileActions~iconClassFunction
* @param {String} fileName name of the file on which the action must be performed
* @param {OCA.Files.FileActionContext} context action context
* @return {String} icon class
*/
/**
* Action handler function for file actions
*
* @callback OCA.Files.FileActions~actionHandler
* @param {String} fileName name of the file on which the action must be performed
* @param context context
* @param {String} context.dir directory of the file
* @param {OCA.Files.FileInfoModel} fileInfoModel file info model
* @param {Object} [context.$file] jQuery element of the file [DEPRECATED]
* @param {OCA.Files.FileList} [context.fileList] the FileList instance on which the action occurred [DEPRECATED]
* @param {OCA.Files.FileActions} context.fileActions the FileActions instance on which the action occurred
*/
// global file actions to be used by all lists
OCA.Files.fileActions = new OCA.Files.FileActions();
})();
```
|
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Voodoo Graphics, 2, Banshee, 3 emulation.
*
*
*
* Authors: Sarah Walker, <path_to_url
* leilei
*
*/
#ifndef VIDEO_VOODOO_SETUP_H
#define VIDEO_VOODOO_SETUP_H
void voodoo_triangle_setup(voodoo_t *voodoo);
#endif /*VIDEO_VOODOO_SETUP_H*/
```
|
```java
/**
* Provides classes related to type mapping.
*/
package org.springframework.kafka.support.mapping;
```
|
```xml
//
//
// Microsoft Bot Framework: path_to_url
//
// Bot Framework Emulator Github:
// path_to_url
//
// All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
import { Attachments } from './attachments';
import { ConversationSet } from './conversationSet';
import { EndpointSet } from './endpointSet';
export class ServerState {
public attachments = new Attachments();
public conversations = new ConversationSet();
public endpoints: EndpointSet;
public locale?: string;
constructor(fetch: (url: string, options?: any) => Promise<any>) {
this.endpoints = new EndpointSet(fetch);
}
}
```
|
```kotlin
package kotlinx.coroutines.lincheck
import kotlinx.coroutines.*
import kotlinx.coroutines.internal.*
import org.jetbrains.kotlinx.lincheck.annotations.*
import org.jetbrains.kotlinx.lincheck.paramgen.*
@Param(name = "index", gen = IntGen::class, conf = "0:4")
@Param(name = "value", gen = IntGen::class, conf = "1:5")
class ResizableAtomicArrayLincheckTest : AbstractLincheckTest() {
private val a = ResizableAtomicArray<Int>(2)
@Operation
fun get(@Param(name = "index") index: Int): Int? = a[index]
@Operation(nonParallelGroup = "writer")
fun set(@Param(name = "index") index: Int, @Param(name = "value") value: Int) {
a.setSynchronized(index, value)
}
}
```
|
```sqlpl
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
(
a DECIMAL(9,0),
b DECIMAL(18,0),
c DECIMAL(38,0),
d DECIMAL(9, 9),
e DEC(18, 18),
f dec(38, 38),
g Decimal(9, 3),
h decimal(18, 9),
i deciMAL(38, 18),
j dec(4, 2),
k NumEriC(23, 4),
l numeric(9, 3),
m NUMEric(18, 9),
n FixED(12, 6),
o fixed(8, 6)
) ENGINE = Memory;
INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42);
INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42);
SELECT a + a, a - a, a * a, a / a, intDiv(a, a), intDivOrZero(a, a) FROM decimal WHERE a = 42;
SELECT b + b, b - b, b * b, b / b, intDiv(b, b), intDivOrZero(b, b) FROM decimal WHERE b = 42;
SELECT c + c, c - c, c * c, c / c, intDiv(c, c), intDivOrZero(c, c) FROM decimal WHERE c = 42;
SELECT e + e, e - e, e * e, e / e, intDiv(e, e), intDivOrZero(e, e) FROM decimal WHERE e > 0; -- { serverError ARGUMENT_OUT_OF_BOUND }
SELECT f + f, f - f, f * f, f / f, intDiv(f, f), intDivOrZero(f, f) FROM decimal WHERE f > 0; -- { serverError ARGUMENT_OUT_OF_BOUND }
SELECT g + g, g - g, g * g, g / g, intDiv(g, g), intDivOrZero(g, g) FROM decimal WHERE g > 0;
SELECT h + h, h - h, h * h, h / h, intDiv(h, h), intDivOrZero(h, h) FROM decimal WHERE h > 0; -- { serverError DECIMAL_OVERFLOW }
SELECT h + h, h - h FROM decimal WHERE h > 0;
SELECT i + i, i - i, i * i, i / i, intDiv(i, i), intDivOrZero(i, i) FROM decimal WHERE i > 0;
SELECT i + i, i - i FROM decimal WHERE i > 0;
SELECT j + j, j - j, j * j, j / j, intDiv(j, j), intDivOrZero(j, j) FROM decimal WHERE j > 0;
SELECT a + 21, a - 21, a - 84, a * 21, a * -21, a / 21, a / 84, intDiv(a, 21), intDivOrZero(a, 84) FROM decimal WHERE a = 42;
SELECT b + 21, b - 21, b - 84, b * 21, b * -21, b / 21, b / 84, intDiv(b, 21), intDivOrZero(b, 84) FROM decimal WHERE b = 42;
SELECT c + 21, c - 21, c - 84, c * 21, c * -21, c / 21, c / 84, intDiv(c, 21), intDivOrZero(c, 84) FROM decimal WHERE c = 42;
SELECT e + 21, e - 21, e - 84, e * 21, e * -21, e / 21, e / 84 FROM decimal WHERE e > 0; -- { serverError DECIMAL_OVERFLOW }
SELECT f + 21, f - 21, f - 84, f * 21, f * -21, f / 21, f / 84 FROM decimal WHERE f > 0;
SELECT g + 21, g - 21, g - 84, g * 21, g * -21, g / 21, g / 84, intDiv(g, 21), intDivOrZero(g, 84) FROM decimal WHERE g > 0;
SELECT h + 21, h - 21, h - 84, h * 21, h * -21, h / 21, h / 84, intDiv(h, 21), intDivOrZero(h, 84) FROM decimal WHERE h > 0;
SELECT i + 21, i - 21, i - 84, i * 21, i * -21, i / 21, i / 84, intDiv(i, 21), intDivOrZero(i, 84) FROM decimal WHERE i > 0;
SELECT j + 21, j - 21, j - 84, j * 21, j * -21, j / 21, j / 84, intDiv(j, 21), intDivOrZero(j, 84) FROM decimal WHERE j > 0;
SELECT 21 + a, 21 - a, 84 - a, 21 * a, -21 * a, 21 / a, 84 / a, intDiv(21, a), intDivOrZero(84, a) FROM decimal WHERE a = 42;
SELECT 21 + b, 21 - b, 84 - b, 21 * b, -21 * b, 21 / b, 84 / b, intDiv(21, b), intDivOrZero(84, b) FROM decimal WHERE b = 42;
SELECT 21 + c, 21 - c, 84 - c, 21 * c, -21 * c, 21 / c, 84 / c, intDiv(21, c), intDivOrZero(84, c) FROM decimal WHERE c = 42;
SELECT 21 + e, 21 - e, 84 - e, 21 * e, -21 * e, 21 / e, 84 / e FROM decimal WHERE e > 0; -- { serverError DECIMAL_OVERFLOW }
SELECT 21 + f, 21 - f, 84 - f, 21 * f, -21 * f, 21 / f, 84 / f FROM decimal WHERE f > 0;
SELECT 21 + g, 21 - g, 84 - g, 21 * g, -21 * g, 21 / g, 84 / g, intDiv(21, g), intDivOrZero(84, g) FROM decimal WHERE g > 0;
SELECT 21 + h, 21 - h, 84 - h, 21 * h, -21 * h, 21 / h, 84 / h FROM decimal WHERE h > 0; -- { serverError DECIMAL_OVERFLOW }
SELECT 21 + h, 21 - h, 84 - h, 21 * h, -21 * h FROM decimal WHERE h > 0;
SELECT 21 + i, 21 - i, 84 - i, 21 * i, -21 * i, 21 / i, 84 / i, intDiv(21, i), intDivOrZero(84, i) FROM decimal WHERE i > 0;
SELECT 21 + j, 21 - j, 84 - j, 21 * j, -21 * j, 21 / j, 84 / j, intDiv(21, j), intDivOrZero(84, j) FROM decimal WHERE j > 0;
SELECT a, -a, -b, -c, -d, -e, -f, -g, -h, -j from decimal ORDER BY a;
SELECT abs(a), abs(b), abs(c), abs(d), abs(e), abs(f), abs(g), abs(h), abs(j) from decimal ORDER BY a;
SET decimal_check_overflow = 0;
SELECT (h * h) != 0, (h / h) != 1 FROM decimal WHERE h > 0;
SELECT (i * i) != 0, (i / i) = 1 FROM decimal WHERE i > 0;
SELECT e + 1 > e, e + 10 > e, 1 + e > e, 10 + e > e FROM decimal WHERE e > 0;
SELECT f + 1 > f, f + 10 > f, 1 + f > f, 10 + f > f FROM decimal WHERE f > 0;
SELECT 1 / toDecimal32(0, 0); -- { serverError ILLEGAL_DIVISION }
SELECT 1 / toDecimal64(0, 1); -- { serverError ILLEGAL_DIVISION }
SELECT 1 / toDecimal128(0, 2); -- { serverError ILLEGAL_DIVISION }
SELECT 0 / toDecimal32(0, 3); -- { serverError ILLEGAL_DIVISION }
SELECT 0 / toDecimal64(0, 4); -- { serverError ILLEGAL_DIVISION }
SELECT 0 / toDecimal128(0, 5); -- { serverError ILLEGAL_DIVISION }
SELECT toDecimal32(0, 0) / toInt8(0); -- { serverError ILLEGAL_DIVISION }
SELECT toDecimal64(0, 1) / toInt32(0); -- { serverError ILLEGAL_DIVISION }
SELECT toDecimal128(0, 2) / toInt64(0); -- { serverError ILLEGAL_DIVISION }
SELECT toDecimal32(0, 4) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0));
SELECT toDecimal64(0, 8) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0));
SELECT toDecimal64(0, 18) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0));
-- { echoOn }
SELECT toDecimal128(1, 38) / toDecimal128(1, 0) SETTINGS decimal_check_overflow=1;
SELECT toDecimal128(1, 38) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=1; -- { serverError DECIMAL_OVERFLOW }
SELECT toDecimal128(1, 38) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=0;
SELECT toDecimal128(1, 37) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=1;
SELECT toDecimal128(1, 19) / toDecimal128(1, 19) SETTINGS decimal_check_overflow=1;
SELECT toDecimal128(1, 20) / toDecimal128(1, 19) SETTINGS decimal_check_overflow=1; -- { serverError DECIMAL_OVERFLOW }
-- { echoOff }
DROP TABLE IF EXISTS decimal;
```
|
```go
// Package handler is the highest level module of the macro package which makes use the rest of the macro package,
// it is mainly used, internally, by the router package.
package handler
import (
"fmt"
"github.com/kataras/iris/v12/context"
"github.com/kataras/iris/v12/core/memstore"
"github.com/kataras/iris/v12/macro"
)
// ParamErrorHandler is a special type of Iris handler which receives
// any error produced by a path type parameter evaluator and let developers
// customize the output instead of the
// provided error code 404 or anyother status code given on the `else` literal.
//
// Note that the builtin macros return error too, but they're handled
// by the `else` literal (error code). To change this behavior
// and send a custom error response you have to register it:
//
// app.Macros().Get("uuid").HandleError(func(ctx iris.Context, paramIndex int, err error)).
//
// You can also set custom macros by `app.Macros().Register`.
//
// See macro.HandleError to set it.
type ParamErrorHandler = func(*context.Context, int, error) // alias.
// CanMakeHandler reports whether a macro template needs a special macro's evaluator handler to be validated
// before procceed to the next handler(s).
// If the template does not contain any dynamic attributes and a special handler is NOT required
// then it returns false.
func CanMakeHandler(tmpl macro.Template) (needsMacroHandler bool) {
if len(tmpl.Params) == 0 {
return
}
// check if we have params like: {name:string} or {name} or {anything:path} without else keyword or any functions used inside these params.
// 1. if we don't have, then we don't need to add a handler before the main route's handler (as I said, no performance if macro is not really used)
// 2. if we don't have any named params then we don't need a handler too.
for i := range tmpl.Params {
p := tmpl.Params[i]
if p.CanEval() {
// if at least one needs it, then create the handler.
needsMacroHandler = true
if p.HandleError != nil {
// Check for its type.
if _, ok := p.HandleError.(ParamErrorHandler); !ok {
panic(fmt.Sprintf("HandleError input argument must be a type of func(iris.Context, int, error) but got: %T", p.HandleError))
}
}
break
}
}
return
}
// MakeHandler creates and returns a handler from a macro template, the handler evaluates each of the parameters if necessary at all.
// If the template does not contain any dynamic attributes and a special handler is NOT required
// then it returns a nil handler.
func MakeHandler(tmpl macro.Template) context.Handler {
filter := MakeFilter(tmpl)
return func(ctx *context.Context) {
if !filter(ctx) {
if ctx.GetCurrentRoute().StatusErrorCode() == ctx.GetStatusCode() {
ctx.Next()
} else {
ctx.StopExecution()
}
return
}
// if all passed or the next is the registered error handler to handle this status code,
// just continue.
ctx.Next()
}
}
// MakeFilter returns a Filter which reports whether a specific macro template
// and its parameters pass the serve-time validation.
func MakeFilter(tmpl macro.Template) context.Filter {
if !CanMakeHandler(tmpl) {
return nil
}
return func(ctx *context.Context) bool {
for i := range tmpl.Params {
p := tmpl.Params[i]
if !p.CanEval() {
continue // allow.
}
// 07-29-2019
// changed to retrieve by param index in order to support
// different parameter names for routes with
// different param types (and probably different param names i.e {name:string}, {id:uint64})
// in the exact same path pattern.
//
// Same parameter names are not allowed, different param types in the same path
// should have different name e.g. {name} {id:uint64};
// something like {name} and {name:uint64}
// is bad API design and we do NOT allow it by-design.
entry, found := ctx.Params().Store.GetEntryAt(p.Index)
if !found {
// should never happen.
ctx.StatusCode(p.ErrCode) // status code can change from an error handler, set it here.
return false
}
value, passed := p.Eval(entry.String())
if !passed {
ctx.StatusCode(p.ErrCode) // status code can change from an error handler, set it here.
if value != nil && p.HandleError != nil {
// The "value" is an error here, always (see template.Eval).
// This is always a type of ParamErrorHandler at this state (see CanMakeHandler).
p.HandleError.(ParamErrorHandler)(ctx, p.Index, value.(error))
}
return false
}
// Fixes binding different path parameters names,
//
// app.Get("/{fullname:string}", strHandler)
// app.Get("/{id:int}", idHandler)
//
// before that user didn't see anything
// but under the hoods the set-ed value was a type of string instead of type of int,
// because store contained both "fullname" (which set-ed by the router itself on its string representation)
// and "id" by the param evaluator (see core/router/handler.go and bindMultiParamTypesHandler->MakeFilter)
// and the MVC get by index (e.g. 0) therefore
// it got the "fullname" of type string instead of "id" int if /{int} requested.
// which is critical for faster type assertion in the upcoming, new iris dependency injection (20 Feb 2020).
ctx.Params().Store[p.Index] = memstore.Entry{
Key: p.Name,
ValueRaw: value,
}
// for i, v := range ctx.Params().Store {
// fmt.Printf("[%d:%s] macro/handler/handler.go: param passed: %s(%v of type: %T)\n", i, v.Key,
// p.Src, v.ValueRaw, v.ValueRaw)
// }
}
return true
}
}
```
|
"Ferrari Horses" is a song by British hip hop collective D-Block Europe featuring British singer Raye. It was released as a single in 2021 from D-Block Europe's 2020 album The Blue Print: Us vs. Them, reaching number 14 on the UK Singles Chart and being certified platinum by the British Phonographic Industry (BPI). In 2023, it was remixed by British DJ and producer Cassö and retitled "Prada", which charted higher in the UK and internationally.
Charts
Weekly charts
Year-end charts
Certifications
Cassö remix
Cassö posted an unofficial remix of "Ferrari Horses" on his TikTok in May 2023, which gained attraction and went viral on the app. It was officially titled "Prada" and was released as a single on 11 August 2023. The single has so far topped the charts in Germany, Ireland, Sweden and Netherlands, and entered the top ten in Austria, Finland, Latvia, Lithuania, Norway, Switzerland, Australia and the United Kingdom.
Charts
Certifications
References
2021 singles
2021 songs
2023 singles
2023 songs
D-Block Europe songs
Irish Singles Chart number-one singles
Ministry of Sound singles
Number-one singles in Germany
Number-one singles in Sweden
Raye (singer) songs
Songs written by Raye (singer)
|
```javascript
Registry user accounts for npm
Combining script commands in npm
Tab completion in `npm`
Current Lifecycle Event
Lock down dependency versions by shrinkwrapping
```
|
```yaml
swagger: '2.0'
paths:
/pathA:
get:
responses:
200:
<caret>
```
|
```elixir
defmodule Commanded.Aggregates.Aggregate do
use TelemetryRegistry
use GenServer, restart: :temporary
use Commanded.Registration
telemetry_event(%{
event: [:commanded, :aggregate, :execute, :start],
description: "Emitted when an aggregate starts executing a command",
measurements: "%{system_time: integer()}",
metadata: """
%{application: Commanded.Application.t(),
aggregate_uuid: String.t(),
aggregate_state: struct(),
aggregate_version: non_neg_integer(),
caller: pid(),
execution_context: Commanded.Aggregates.ExecutionContext.t()}
"""
})
telemetry_event(%{
event: [:commanded, :aggregate, :execute, :stop],
description: "Emitted when an aggregate stops executing a command",
measurements: "%{duration: non_neg_integer()}",
metadata: """
%{application: Commanded.Application.t(),
aggregate_uuid: String.t(),
aggregate_state: struct(),
aggregate_version: non_neg_integer(),
caller: pid(),
execution_context: Commanded.Aggregates.ExecutionContext.t(),
events: [map()],
error: nil | any()}
"""
})
telemetry_event(%{
event: [:commanded, :aggregate, :execute, :exception],
description: "Emitted when an aggregate raises an exception",
measurements: "%{duration: non_neg_integer()}",
metadata: """
%{application: Commanded.Application.t(),
aggregate_uuid: String.t(),
aggregate_state: struct(),
aggregate_version: non_neg_integer(),
caller: pid(),
execution_context: Commanded.Aggregates.ExecutionContext.t(),
kind: :throw | :error | :exit,
reason: any(),
stacktrace: list()}
"""
})
@moduledoc """
Aggregate is a `GenServer` process used to provide access to an
instance of an event sourced aggregate.
It allows execution of commands against an aggregate instance, and handles
persistence of created events to the configured event store. Concurrent
commands sent to an aggregate instance are serialized and executed in the
order received.
The `Commanded.Commands.Router` module will locate, or start, an aggregate
instance when a command is dispatched. By default, an aggregate process will
run indefinitely once started. Its lifespan may be controlled by using the
`Commanded.Aggregates.AggregateLifespan` behaviour.
## Snapshotting
You can configure state snapshots for an aggregate in config. By default
snapshots are *not* taken for an aggregate. The following options are
available to enable snapshots:
- `snapshot_every` - snapshot aggregate state every so many events. Use
`nil` to disable snapshotting, or exclude the configuration entirely.
- `snapshot_version` - a non-negative integer indicating the version of
the aggregate state snapshot. Incrementing this version forces any
earlier recorded snapshots to be ignored when rebuilding aggregate
state.
### Example
In `config/config.exs` enable snapshots for `MyApp.ExampleAggregate` after
every ten events:
config :my_app, MyApp.Application,
snapshotting: %{
MyApp.ExampleAggregate => [
snapshot_every: 10,
snapshot_version: 1
]
}
## Telemetry
#{telemetry_docs()}
"""
require Logger
alias Commanded.Aggregate.Multi
alias Commanded.Aggregates.Aggregate
alias Commanded.Aggregates.AggregateStateBuilder
alias Commanded.Aggregates.ExecutionContext
alias Commanded.Application.Config
alias Commanded.Event.Mapper
alias Commanded.Event.Upcast
alias Commanded.EventStore
alias Commanded.EventStore.RecordedEvent
alias Commanded.Registration
alias Commanded.Snapshotting
alias Commanded.Telemetry
@type state :: struct()
@type uuid :: String.t()
defstruct [
:application,
:aggregate_module,
:aggregate_uuid,
:aggregate_state,
:snapshotting,
aggregate_version: 0,
lifespan_timeout: :infinity
]
def start_link(config, opts) do
{start_opts, aggregate_opts} =
Keyword.split(opts, [:debug, :name, :timeout, :spawn_opt, :hibernate_after])
aggregate_module = Keyword.fetch!(aggregate_opts, :aggregate_module)
aggregate_uuid = Keyword.fetch!(aggregate_opts, :aggregate_uuid)
unless is_atom(aggregate_module),
do: raise(ArgumentError, message: "aggregate module must be an atom")
unless is_binary(aggregate_uuid),
do: raise(ArgumentError, message: "aggregate identity must be a string")
application = Keyword.fetch!(config, :application)
snapshotting = Keyword.get(config, :snapshotting, %{})
snapshot_options = Map.get(snapshotting, aggregate_module, [])
state = %Aggregate{
application: application,
aggregate_module: aggregate_module,
aggregate_uuid: aggregate_uuid,
snapshotting: Snapshotting.new(application, aggregate_uuid, snapshot_options)
}
GenServer.start_link(__MODULE__, state, start_opts)
end
@doc false
def name(application, aggregate_module, aggregate_uuid)
when is_atom(application) and is_atom(aggregate_module) and is_binary(aggregate_uuid),
do: {application, aggregate_module, aggregate_uuid}
@doc """
Execute the given command against the aggregate.
- `aggregate_module` - the aggregate's module (e.g. `BankAccount`).
- `aggregate_uuid` - uniquely identifies an instance of the aggregate.
- `context` - includes command execution arguments
(see `Commanded.Aggregates.ExecutionContext` for details).
- `timeout` - an non-negative integer which specifies how many milliseconds
to wait for a reply, or the atom :infinity to wait indefinitely.
The default value is five seconds (5,000ms).
## Return values
Returns `{:ok, aggregate_version, events}` on success, or `{:error, error}`
on failure.
- `aggregate_version` - the updated version of the aggregate after executing
the command.
- `events` - events produced by the command, can be an empty list.
"""
def execute(
application,
aggregate_module,
aggregate_uuid,
%ExecutionContext{} = context,
timeout \\ 5_000
)
when is_atom(aggregate_module) and is_binary(aggregate_uuid) and
(is_number(timeout) or timeout == :infinity) do
name = via_name(application, aggregate_module, aggregate_uuid)
try do
GenServer.call(name, {:execute_command, context}, timeout)
catch
:exit, {:noproc, {GenServer, :call, [^name, {:execute_command, ^context}, ^timeout]}} ->
{:exit, {:normal, :aggregate_stopped}}
:exit, {:normal, {GenServer, :call, [^name, {:execute_command, ^context}, ^timeout]}} ->
{:exit, {:normal, :aggregate_stopped}}
end
end
@doc false
def aggregate_state(application, aggregate_module, aggregate_uuid, timeout \\ 5_000) do
name = via_name(application, aggregate_module, aggregate_uuid)
try do
GenServer.call(name, :aggregate_state, timeout)
catch
:exit, {reason, {GenServer, :call, [^name, :aggregate_state, ^timeout]}}
when reason in [:normal, :noproc] ->
task =
Task.async(fn ->
snapshot_options =
application
|> Config.get(:snapshotting)
|> Kernel.||(%{})
|> Map.get(aggregate_module, [])
%Aggregate{
application: application,
aggregate_module: aggregate_module,
aggregate_uuid: aggregate_uuid,
snapshotting: Snapshotting.new(application, aggregate_uuid, snapshot_options)
}
|> AggregateStateBuilder.populate()
|> Map.fetch!(:aggregate_state)
end)
case Task.yield(task, timeout) || Task.shutdown(task) do
{:ok, result} ->
result
nil ->
exit({:timeout, {GenServer, :call, [name, :aggregate_state, timeout]}})
end
end
end
@doc false
def aggregate_version(application, aggregate_module, aggregate_uuid, timeout \\ 5_000) do
name = via_name(application, aggregate_module, aggregate_uuid)
GenServer.call(name, :aggregate_version, timeout)
end
@doc false
def take_snapshot(application, aggregate_module, aggregate_uuid) do
name = via_name(application, aggregate_module, aggregate_uuid)
GenServer.cast(name, :take_snapshot)
end
@doc false
def shutdown(application, aggregate_module, aggregate_uuid) do
name = via_name(application, aggregate_module, aggregate_uuid)
GenServer.stop(name)
end
@doc false
@impl GenServer
def init(%Aggregate{} = state) do
# Initial aggregate state is populated by loading its state snapshot and/or
# events from the event store.
{:ok, state, {:continue, :populate_aggregate_state}}
end
@doc false
@impl GenServer
def handle_continue(:populate_aggregate_state, %Aggregate{} = state) do
state = AggregateStateBuilder.populate(state)
# Subscribe to aggregate's events to catch any events appended to its stream
# by another process, such as directly appended to the event store.
{:noreply, state, {:continue, :subscribe_to_events}}
end
@doc false
@impl GenServer
def handle_continue(:subscribe_to_events, %Aggregate{} = state) do
%Aggregate{application: application, aggregate_uuid: aggregate_uuid} = state
:ok = EventStore.subscribe(application, aggregate_uuid)
{:noreply, state}
end
@doc false
@impl GenServer
def handle_cast(:take_snapshot, %Aggregate{} = state), do: do_take_snapshot(state)
@impl GenServer
def handle_cast({:take_snapshot, lifespan_timeout}, %Aggregate{} = state) do
do_take_snapshot(%Aggregate{state | lifespan_timeout: lifespan_timeout})
end
@doc false
@impl GenServer
def handle_call({:execute_command, %ExecutionContext{} = context}, from, %Aggregate{} = state) do
%ExecutionContext{lifespan: lifespan, command: command} = context
telemetry_metadata = telemetry_metadata(context, from, state)
start_time = telemetry_start(telemetry_metadata)
{result, state} = execute_command(context, state)
lifespan_timeout =
case result do
{:ok, []} ->
aggregate_lifespan_timeout(lifespan, :after_command, command)
{:ok, events} ->
aggregate_lifespan_timeout(lifespan, :after_event, events)
{:error, error} ->
aggregate_lifespan_timeout(lifespan, :after_error, error)
{:error, error, _stacktrace} ->
aggregate_lifespan_timeout(lifespan, :after_error, error)
end
formatted_reply = ExecutionContext.format_reply(result, context, state)
%Aggregate{aggregate_version: aggregate_version, snapshotting: snapshotting} = state
response =
if Snapshotting.snapshot_required?(snapshotting, aggregate_version) do
:ok = GenServer.cast(self(), {:take_snapshot, lifespan_timeout})
{:reply, formatted_reply, state}
else
state = %Aggregate{state | lifespan_timeout: lifespan_timeout}
case lifespan_timeout do
{:stop, reason} -> {:stop, reason, formatted_reply, state}
lifespan_timeout -> {:reply, formatted_reply, state, lifespan_timeout}
end
end
telemetry_metadata = telemetry_metadata(context, from, state)
telemetry_stop(start_time, telemetry_metadata, result)
response
end
@doc false
@impl GenServer
def handle_call(:aggregate_state, _from, %Aggregate{} = state) do
%Aggregate{aggregate_state: aggregate_state} = state
{:reply, aggregate_state, state}
end
@doc false
@impl GenServer
def handle_call(:aggregate_version, _from, %Aggregate{} = state) do
%Aggregate{aggregate_version: aggregate_version} = state
{:reply, aggregate_version, state}
end
@doc false
@impl GenServer
def handle_info({:events, events}, %Aggregate{} = state) do
%Aggregate{application: application, lifespan_timeout: lifespan_timeout} = state
Logger.debug(describe(state) <> " received events: " <> inspect(events))
try do
state =
events
|> Enum.reject(&event_already_seen?(&1, state))
|> Upcast.upcast_event_stream(additional_metadata: %{application: application})
|> Enum.reduce(state, &handle_event/2)
case lifespan_timeout do
{:stop, reason} -> {:stop, reason, state}
lifespan_timeout -> {:noreply, state, lifespan_timeout}
end
catch
{:error, error} ->
Logger.debug(describe(state) <> " stopping due to: " <> inspect(error))
# Stop after event handling returned an error
{:stop, error, state}
end
end
@doc false
@impl GenServer
def handle_info(:timeout, %Aggregate{} = state) do
Logger.debug(describe(state) <> " stopping due to inactivity timeout")
{:stop, :normal, state}
end
defp event_already_seen?(%RecordedEvent{} = event, %Aggregate{} = state) do
%RecordedEvent{stream_version: stream_version} = event
%Aggregate{aggregate_version: aggregate_version} = state
stream_version <= aggregate_version
end
# Handle events appended to the aggregate's stream, received by its
# event store subscription, by applying any missed events to its state.
defp handle_event(%RecordedEvent{} = event, %Aggregate{} = state) do
%RecordedEvent{data: data, stream_version: stream_version} = event
%Aggregate{
aggregate_module: aggregate_module,
aggregate_state: aggregate_state,
aggregate_version: aggregate_version
} = state
if stream_version == aggregate_version + 1 do
# Apply event to aggregate's state
%Aggregate{
state
| aggregate_version: stream_version,
aggregate_state: aggregate_module.apply(aggregate_state, data)
}
else
Logger.debug(describe(state) <> " received an unexpected event: " <> inspect(event))
# Throw an error when an unexpected event is received
throw({:error, :unexpected_event_received})
end
end
defp aggregate_lifespan_timeout(lifespan, function_name, args) do
# Take the last event or the command or error
args = args |> List.wrap() |> Enum.take(-1)
case apply(lifespan, function_name, args) do
timeout when timeout in [:infinity, :hibernate] ->
timeout
:stop ->
{:stop, :normal}
{:stop, _reason} = reply ->
reply
timeout when is_integer(timeout) and timeout >= 0 ->
timeout
invalid ->
Logger.warning(
"Invalid timeout for aggregate lifespan " <>
inspect(lifespan) <>
", expected a non-negative integer, `:infinity`, `:hibernate`, `:stop`, or `{:stop, reason}` but got: " <>
inspect(invalid)
)
:infinity
end
end
defp before_execute_command(_aggregate_state, %ExecutionContext{before_execute: nil}), do: :ok
defp before_execute_command(aggregate_state, %ExecutionContext{} = context) do
%ExecutionContext{handler: handler, before_execute: before_execute} = context
Kernel.apply(handler, before_execute, [aggregate_state, context])
end
defp execute_command(%ExecutionContext{} = context, %Aggregate{} = state) do
%ExecutionContext{command: command, handler: handler, function: function} = context
%Aggregate{aggregate_state: aggregate_state} = state
Logger.debug(describe(state) <> " executing command: " <> inspect(command))
with :ok <- before_execute_command(aggregate_state, context) do
case Kernel.apply(handler, function, [aggregate_state, command]) do
{:error, _error} = reply ->
{reply, state}
none when none in [:ok, nil, []] ->
{{:ok, []}, state}
%Multi{} = multi ->
case Multi.run(multi) do
{:error, _error} = reply ->
{reply, state}
{aggregate_state, pending_events} ->
persist_events(pending_events, aggregate_state, context, state)
end
{:ok, pending_events} ->
apply_and_persist_events(pending_events, context, state)
pending_events ->
apply_and_persist_events(pending_events, context, state)
end
else
{:error, _error} = reply ->
{reply, state}
end
rescue
error ->
stacktrace = __STACKTRACE__
Logger.error(Exception.format(:error, error, stacktrace))
{{:error, error, stacktrace}, state}
end
defp apply_and_persist_events(pending_events, context, %Aggregate{} = state) do
%Aggregate{aggregate_module: aggregate_module, aggregate_state: aggregate_state} = state
pending_events = List.wrap(pending_events)
aggregate_state = apply_events(aggregate_module, aggregate_state, pending_events)
persist_events(pending_events, aggregate_state, context, state)
end
defp apply_events(aggregate_module, aggregate_state, events) do
Enum.reduce(events, aggregate_state, &aggregate_module.apply(&2, &1))
end
defp persist_events(pending_events, aggregate_state, context, %Aggregate{} = state) do
%Aggregate{aggregate_version: expected_version} = state
with :ok <- append_to_stream(pending_events, context, state) do
aggregate_version = expected_version + length(pending_events)
state = %Aggregate{
state
| aggregate_state: aggregate_state,
aggregate_version: aggregate_version
}
{{:ok, pending_events}, state}
else
{:error, :wrong_expected_version} ->
# Fetch missing events from event store
state = AggregateStateBuilder.rebuild_from_events(state)
# Retry command if there are any attempts left
case ExecutionContext.retry(context) do
{:ok, context} ->
Logger.debug(describe(state) <> " wrong expected version, retrying command")
execute_command(context, state)
reply ->
Logger.debug(describe(state) <> " wrong expected version, but not retrying command")
{reply, state}
end
{:error, _error} = reply ->
{reply, state}
end
end
defp append_to_stream([], _context, _state), do: :ok
defp append_to_stream(pending_events, %ExecutionContext{} = context, %Aggregate{} = state) do
%Aggregate{
application: application,
aggregate_uuid: aggregate_uuid,
aggregate_version: expected_version
} = state
%ExecutionContext{
causation_id: causation_id,
correlation_id: correlation_id,
metadata: metadata
} = context
event_data =
Mapper.map_to_event_data(pending_events,
causation_id: causation_id,
correlation_id: correlation_id,
metadata: metadata
)
EventStore.append_to_stream(application, aggregate_uuid, expected_version, event_data)
end
defp do_take_snapshot(%Aggregate{} = state) do
%Aggregate{
aggregate_state: aggregate_state,
aggregate_version: aggregate_version,
lifespan_timeout: lifespan_timeout,
snapshotting: snapshotting
} = state
Logger.debug(describe(state) <> " recording snapshot")
state =
case Snapshotting.take_snapshot(snapshotting, aggregate_version, aggregate_state) do
{:ok, snapshotting} ->
%Aggregate{state | snapshotting: snapshotting}
{:error, error} ->
Logger.warning(describe(state) <> " snapshot failed due to: " <> inspect(error))
state
end
case lifespan_timeout do
{:stop, reason} -> {:stop, reason, state}
lifespan_timeout -> {:noreply, state, lifespan_timeout}
end
end
defp telemetry_start(telemetry_metadata) do
Telemetry.start([:commanded, :aggregate, :execute], telemetry_metadata)
end
defp telemetry_stop(start_time, telemetry_metadata, result) do
event_prefix = [:commanded, :aggregate, :execute]
case result do
{:ok, events} ->
Telemetry.stop(event_prefix, start_time, Map.put(telemetry_metadata, :events, events))
{:error, error} ->
Telemetry.stop(event_prefix, start_time, Map.put(telemetry_metadata, :error, error))
{:error, error, stacktrace} ->
Telemetry.exception(
event_prefix,
start_time,
:error,
error,
stacktrace,
telemetry_metadata
)
end
end
defp telemetry_metadata(%ExecutionContext{} = context, from, %Aggregate{} = state) do
%Aggregate{
application: application,
aggregate_uuid: aggregate_uuid,
aggregate_state: aggregate_state,
aggregate_version: aggregate_version
} = state
{pid, _ref} = from
%{
application: application,
aggregate_uuid: aggregate_uuid,
aggregate_state: aggregate_state,
aggregate_version: aggregate_version,
caller: pid,
execution_context: context
}
end
defp via_name(application, aggregate_module, aggregate_uuid) do
name = name(application, aggregate_module, aggregate_uuid)
Registration.via_tuple(application, name)
end
defp describe(%Aggregate{} = aggregate) do
%Aggregate{
aggregate_module: aggregate_module,
aggregate_uuid: aggregate_uuid,
aggregate_version: aggregate_version
} = aggregate
"#{inspect(aggregate_module)}<#{aggregate_uuid}@#{aggregate_version}>"
end
end
```
|
```css
`inline` element characteristics
Make text unselectable
Hide the scrollbar in webkit browser
Use pseudo-elements to style specific parts of an element
Highlight input forms using `:focus` pseudo-class
```
|
Câmpeni (German: Topesdorf; Hungarian: Topánfalva) is a town in Alba County, Transylvania, Romania. The town administers 21 villages: Boncești, Borlești, Botești (Botesbánya), Certege (Csertés), Coasta Vâscului, Dănduț, Dealu Bistrii, Dealu Capsei, Dric, Fața Abrudului, Florești, Furduiești, Mihoești, Motorăști, Peste Valea Bistrii, Poduri, Sorlița, Tomușești, Valea Bistrii, Valea Caselor, and Vârși (Virs).
History
The town has historical significance as the capital of the "Țara Moților" region. It is believed to be the site where the Revolt of Horea, Cloșca and Crișan (1784–1785) started. Horea was born near Câmpeni in the village that used to be called Arada (since renamed to Horea). His cellar is a tourist attraction in the town.
During the Transylvanian revolution of 1848, Câmpeni was the political and military stronghold of Avram Iancu, a revolutionary leader of the Transylvanian Romanians' national movement. The Avram Iancu Museum is located in the town.
Economy
The town is a regional center for lumber exploitation and the furniture industry. Even though the town is located in a mining region the mining industry is not part of its industrial heritage. Câmpeni is growing in popularity as a tourist center.
Demographics
At the 2021 census, Câmpeni had a population of 6,569. According to the census from 2011, the town had a total population of 6,942; of those, 96.52% were ethnic Romanians, 3.35% ethnic Romani, and 0.08% ethnic Hungarians.
Natives
Valeriu Moldovan (1875–1954), lawyer and politician
Rubin Patiția (1841–1918), lawyer and political activist
Iosif Trifa (1888–1938), Romanian Orthodox priest and evangelist
Valerian Trifa (1914–1987), archbishop of the Romanian Orthodox Church in America and Canada, former fascist political activist
Climate
Câmpeni has a warm-summer humid continental climate (Dfb in the Köppen climate classification).
References
Populated places in Alba County
Localities in Transylvania
Towns in Romania
|
Drążewo is a village in the administrative district of Gmina Sońsk, within Ciechanów County, Masovian Voivodeship, in east-central Poland. It lies approximately west of Sońsk, south of Ciechanów, and north of Warsaw.
References
Villages in Ciechanów County
|
```java
/*
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package org.apache.arrow.driver.jdbc;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.sql.Connection;
import javax.sql.PooledConnection;
import org.apache.arrow.driver.jdbc.authentication.UserPasswordAuthentication;
import org.apache.arrow.driver.jdbc.utils.ConnectionWrapper;
import org.apache.arrow.driver.jdbc.utils.MockFlightSqlProducer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class ArrowFlightJdbcConnectionPoolDataSourceTest {
@RegisterExtension public static final FlightServerTestExtension FLIGHT_SERVER_TEST_EXTENSION;
private static final MockFlightSqlProducer PRODUCER = new MockFlightSqlProducer();
static {
UserPasswordAuthentication authentication =
new UserPasswordAuthentication.Builder()
.user("user1", "pass1")
.user("user2", "pass2")
.build();
FLIGHT_SERVER_TEST_EXTENSION =
new FlightServerTestExtension.Builder()
.authentication(authentication)
.producer(PRODUCER)
.build();
}
private ArrowFlightJdbcConnectionPoolDataSource dataSource;
@BeforeEach
public void setUp() {
dataSource = FLIGHT_SERVER_TEST_EXTENSION.createConnectionPoolDataSource(false);
}
@AfterEach
public void tearDown() throws Exception {
dataSource.close();
}
@Test
public void testShouldInnerConnectionIsClosedReturnCorrectly() throws Exception {
PooledConnection pooledConnection = dataSource.getPooledConnection();
Connection connection = pooledConnection.getConnection();
assertFalse(connection.isClosed());
connection.close();
assertTrue(connection.isClosed());
}
@Test
public void testShouldInnerConnectionShouldIgnoreDoubleClose() throws Exception {
PooledConnection pooledConnection = dataSource.getPooledConnection();
Connection connection = pooledConnection.getConnection();
assertFalse(connection.isClosed());
connection.close();
assertTrue(connection.isClosed());
}
@Test
public void your_sha256_hashses()
throws Exception {
PooledConnection pooledConnection = dataSource.getPooledConnection();
Connection connection = pooledConnection.getConnection();
assertFalse(connection.isClosed());
pooledConnection.close();
assertTrue(connection.isClosed());
}
@Test
public void testShouldReuseConnectionsOnPool() throws Exception {
PooledConnection pooledConnection = dataSource.getPooledConnection("user1", "pass1");
ConnectionWrapper connection = ((ConnectionWrapper) pooledConnection.getConnection());
assertFalse(connection.isClosed());
connection.close();
assertTrue(connection.isClosed());
assertFalse(connection.unwrap(ArrowFlightConnection.class).isClosed());
PooledConnection pooledConnection2 = dataSource.getPooledConnection("user1", "pass1");
ConnectionWrapper connection2 = ((ConnectionWrapper) pooledConnection2.getConnection());
assertFalse(connection2.isClosed());
connection2.close();
assertTrue(connection2.isClosed());
assertFalse(connection2.unwrap(ArrowFlightConnection.class).isClosed());
assertSame(pooledConnection, pooledConnection2);
assertNotSame(connection, connection2);
assertSame(
connection.unwrap(ArrowFlightConnection.class),
connection2.unwrap(ArrowFlightConnection.class));
}
@Test
public void testShouldNotMixConnectionsForDifferentUsers() throws Exception {
PooledConnection pooledConnection = dataSource.getPooledConnection("user1", "pass1");
ConnectionWrapper connection = ((ConnectionWrapper) pooledConnection.getConnection());
assertFalse(connection.isClosed());
connection.close();
assertTrue(connection.isClosed());
assertFalse(connection.unwrap(ArrowFlightConnection.class).isClosed());
PooledConnection pooledConnection2 = dataSource.getPooledConnection("user2", "pass2");
ConnectionWrapper connection2 = ((ConnectionWrapper) pooledConnection2.getConnection());
assertFalse(connection2.isClosed());
connection2.close();
assertTrue(connection2.isClosed());
assertFalse(connection2.unwrap(ArrowFlightConnection.class).isClosed());
assertNotSame(pooledConnection, pooledConnection2);
assertNotSame(connection, connection2);
assertNotSame(
connection.unwrap(ArrowFlightConnection.class),
connection2.unwrap(ArrowFlightConnection.class));
}
}
```
|
Younis Mubarak Obaid Al Mahaijri (; born 12 March 1987), commonly known as Younis Mubarak, is an Omani footballer who plays for Al-Orouba SC.
Club career
On 7 July 2014, he signed a one-year contract extension with Al-Orouba SC.
Club career statistics
International career
Younis was part of the first team squad of the Oman national football team till 2008. He was selected for the national team for the first time in 2006. He has made appearances in the 2007 AFC Asian Cup and the 2010 FIFA World Cup qualification.
References
External links
Younis Mubarak Al-Mahaijri at Goal.com
1987 births
Living people
Omani men's footballers
Oman men's international footballers
Men's association football midfielders
2007 AFC Asian Cup players
Al-Orouba SC players
Sur SC players
Al-Nahda Club (Oman) players
Oman Professional League players
People from Sur, Oman
|
David Wright (born 1964) is an American writer.
Early life and education
Wright grew up in Borger, Texas. His mother is a white Jewish woman who survived the Nazi occupation of Paris. Her parents were affluent, assimilated French Jews. His mother was a member of the French Communist Party; she immigrated to the US in the 1950s as the GI bride of an African-American soldier. He holds a BA from Carleton College and an MFA from the MFA Program for Poets & Writers at the University of Massachusetts Amherst. He also studied at the École des Hautes Études en Sciences Sociales. Before he started teaching creative writing, he was a player/coach on various American football teams in Paris and London. He teaches at the University of Illinois at Urbana-Champaign.
He has also published under the name "David Wright Faladé", in honor of his biological father. His father Maximien Faladé was a devout Catholic from Porto-Novo in Benin, the grandson of Béhanzin, the last King of Dahomey.
Works
Books
Black Cloud Rising, Atlantic Monthly Press, February 2022.
Short stories
"The Sand Banks, 1861" (2020)
Documentary film
Rescue Men: The Story of the Pea Island Lifesavers (2010).
Television journalism
"The Pea Island Story", co-written and co-produced with Stephanie Frederic and David Zoby. Aired on BET Tonight, February 1999.
Awards
2017: International Board on Books for Young People, grades 9–12, Away Running
2011: Fulbright U.S. Scholar Program, Universidade de São Paulo, Brazil.
2009: North Carolina Humanities Council, Large Grant, for production of Rescue Men: The Story of the Pea Island Lifesavers.
2005: Dobie-Paisano Fellowship, University of Texas and the Texas Institute of Letters.
2004: Tennessee Williams Scholar, Sewanee Writers’ Conference.
1999: National Endowment for the Humanities, Summer Institute for College and University Faculty Fellow, W. E. B. Du Bois Institute for Afro-American Research, Harvard University, "The Civil Rights Movement: History and Consequences".
1997–1998: Chancellor’s Minority Postdoctoral Fellowship, Afro-American Studies and Research Program, University of Illinois at Urbana-Champaign.
1994: Zora Neale Hurston/Richard Wright Award, the Zora Neale Hurston/Richard Wright Foundation, Fairfax, VA.
1993: Paul Cuffe Memorial Fellowship, Munson Institute of American Maritime Studies, Mystic, Connecticut.
References
External links
African-American Jews
African-American novelists
American people of Beninese descent
American people of French-Jewish descent
University of Illinois Urbana-Champaign faculty
Harvard Fellows
1964 births
Living people
University of Massachusetts Amherst MFA Program for Poets & Writers alumni
Carleton College alumni
Frank C. Munson Institute of American Maritime History alumni
People from Borger, Texas
21st-century American novelists
21st-century African-American writers
20th-century African-American people
Fulbright alumni
|
Leksvik Church () is a parish church of the Church of Norway in Indre Fosen municipality in Trøndelag county, Norway. It is located in the village of Leksvik. It is the church for the Leksvik parish which is part of the Fosen prosti (deanery) in the Diocese of Nidaros. The white, wooden church was built in a long church style in 1668 using plans drawn up by the architects Ole Jonsen Hindrum and Nils Olufsen. The church seats about 300 people.
History
The earliest existing historical records of the church date back to the year 1533, but the church was likely built during the 12th century. The church was built on the Røstad farm in Leksvik, so historically it was known as Røstad Church. The original church was about with a choir. Around 1648, the church received a new roof and tower. Soon after in 1652-1654 a newly constructed choir and sacristy was built on the north side of the old building. In 1667, most of the church was torn down except for the relatively new choir and sacristy and a new timber-framed nave was built on the same site. At the same time, the choir roof was raised several meters higher. Therefore by 1670, the entire building had been rebuilt step by step and no more of the medieval church structure remained.
In 1814, this church served as an election church (). Together with more than 300 other parish churches across Norway, it was a polling station for elections to the 1814 Norwegian Constituent Assembly which wrote the Constitution of Norway. This was Norway's first national elections. Each church parish was a constituency that elected people called "electors" who later met together in each county to elect the representatives for the assembly that was to meet in Eidsvoll later that year.
During the reconstruction in the 1860s the church lost much of its older interior furniture, but during later reconstruction efforts in the 1950s and 1960s, much of the 17th century furniture was returned. Today the church has new seating, installed in the mid-1990s. The church also contains a very rare crucifix from the Middle Ages, and the sword of Anders Solli. Legend has it that he used the sword to defend himself from the wolves that attacked and killed him in the woods of Leksvik in 1612.
Priests
The following people have been priests of Leksvik Church:
See also
List of churches in Nidaros
References
External links
Indre Fosen
Churches in Trøndelag
Wooden churches in Norway
Long churches in Norway
17th-century Church of Norway church buildings
Churches completed in 1668
12th-century establishments in Norway
Norwegian election church
|
```go
/*
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
ginkgo -nodes=N
where N is the number of nodes you desire.
*/
package remote
import (
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/types"
)
type configAndSuite struct {
config config.GinkgoConfigType
summary *types.SuiteSummary
}
type Aggregator struct {
nodeCount int
config config.DefaultReporterConfigType
stenographer stenographer.Stenographer
result chan bool
suiteBeginnings chan configAndSuite
aggregatedSuiteBeginnings []configAndSuite
beforeSuites chan *types.SetupSummary
aggregatedBeforeSuites []*types.SetupSummary
afterSuites chan *types.SetupSummary
aggregatedAfterSuites []*types.SetupSummary
specCompletions chan *types.SpecSummary
completedSpecs []*types.SpecSummary
suiteEndings chan *types.SuiteSummary
aggregatedSuiteEndings []*types.SuiteSummary
specs []*types.SpecSummary
startTime time.Time
}
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
aggregator := &Aggregator{
nodeCount: nodeCount,
result: result,
config: config,
stenographer: stenographer,
suiteBeginnings: make(chan configAndSuite),
beforeSuites: make(chan *types.SetupSummary),
afterSuites: make(chan *types.SetupSummary),
specCompletions: make(chan *types.SpecSummary),
suiteEndings: make(chan *types.SuiteSummary),
}
go aggregator.mux()
return aggregator
}
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
aggregator.suiteBeginnings <- configAndSuite{config, summary}
}
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
aggregator.beforeSuites <- setupSummary
}
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
aggregator.afterSuites <- setupSummary
}
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
//noop
}
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
aggregator.specCompletions <- specSummary
}
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
aggregator.suiteEndings <- summary
}
func (aggregator *Aggregator) mux() {
loop:
for {
select {
case configAndSuite := <-aggregator.suiteBeginnings:
aggregator.registerSuiteBeginning(configAndSuite)
case setupSummary := <-aggregator.beforeSuites:
aggregator.registerBeforeSuite(setupSummary)
case setupSummary := <-aggregator.afterSuites:
aggregator.registerAfterSuite(setupSummary)
case specSummary := <-aggregator.specCompletions:
aggregator.registerSpecCompletion(specSummary)
case suite := <-aggregator.suiteEndings:
finished, passed := aggregator.registerSuiteEnding(suite)
if finished {
aggregator.result <- passed
break loop
}
}
}
}
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
aggregator.startTime = time.Now()
}
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
return
}
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
totalNumberOfSpecs := 0
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
}
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
aggregator.specs = append(aggregator.specs, specSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) flushCompletedSpecs() {
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
return
}
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
aggregator.announceBeforeSuite(setupSummary)
}
for _, specSummary := range aggregator.completedSpecs {
aggregator.announceSpec(specSummary)
}
for _, setupSummary := range aggregator.aggregatedAfterSuites {
aggregator.announceAfterSuite(setupSummary)
}
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
aggregator.completedSpecs = []*types.SpecSummary{}
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
}
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
if setupSummary.State != types.SpecStatePassed {
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
if setupSummary.State != types.SpecStatePassed {
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
}
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
} else {
aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
}
case types.SpecStatePending:
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
case types.SpecStateSkipped:
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
case types.SpecStateTimedOut:
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStatePanicked:
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStateFailed:
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
return false, false
}
aggregatedSuiteSummary := &types.SuiteSummary{}
aggregatedSuiteSummary.SuiteSucceeded = true
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
if !suiteSummary.SuiteSucceeded {
aggregatedSuiteSummary.SuiteSucceeded = false
}
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
}
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
aggregator.stenographer.SummarizeFailures(aggregator.specs)
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
return true, aggregatedSuiteSummary.SuiteSucceeded
}
```
|
Drama is a 2022 Indian Tamil-language drama film directed by Aju Kizhumala and starring Kishore. It was released on 23 September 2022.
Cast
Kishore as Arjun
Charle as Murthy
Jai Bala as Uday
Kavya Bellu as Vaigaa
Nakulan Vincent as Rajapandian
Production
Prior to release, the film garnered attention for being the "longest one-shot film in India". In late 2020, the entire film was shot in about 8 hours with 80 technicians on the set. Prior to the shoot, the cast members rehearsed for up to 180 days. Soon after its shoot, the makers marketed it as "the first movie in the Indian film industry to be entirely completed in a single shot", but later backtracked after the theatrical release of R. Parthiban's Iravin Nizhal (2022), another single shot film.
The film's director, Aju Kizhamala, revealed that he had rejected advances from OTT service providers in late 2020 to ensure the film had a theatrical release.
Reception
The film was released on 23 September 2022 across Tamil Nadu. A critic from Maalai Malar gave the film a mixed review, noting that it "lacked excitement". Film critic Malini Mannath gave the film a positive review, writing "it’s appreciable that the director has managed to confine his story telling to less than two hours of viewing time. ‘Drama’ is at its best a promising piece of work from a new entrant to the Tamil screen".
References
External links
2022 films
2020s Tamil-language films
|
```html
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>Struct total_enricher</title>
<link rel="stylesheet" href="../../../../../../doc/src/boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.79.1">
<link rel="home" href="../../index.html" title="Chapter 1. Boost.Icl">
<link rel="up" href="../../header/boost/icl/map_hpp.html" title="Header <boost/icl/map.hpp>">
<link rel="prev" href="total_absorber.html" title="Struct total_absorber">
<link rel="next" href="map.html" title="Class template map">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr>
<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../../boost.png"></td>
<td align="center"><a href="../../../../../../index.html">Home</a></td>
<td align="center"><a href="../../../../../libraries.htm">Libraries</a></td>
<td align="center"><a href="path_to_url">People</a></td>
<td align="center"><a href="path_to_url">FAQ</a></td>
<td align="center"><a href="../../../../../../more/index.htm">More</a></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="total_absorber.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../header/boost/icl/map_hpp.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="map.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
<div class="refentry">
<a name="boost.icl.total_enricher"></a><div class="titlepage"></div>
<div class="refnamediv">
<h2><span class="refentrytitle">Struct total_enricher</span></h2>
<p>boost::icl::total_enricher</p>
</div>
<h2 xmlns:rev="path_to_url~gregod/boost/tools/doc/revision" class="refsynopsisdiv-title">Synopsis</h2>
<div xmlns:rev="path_to_url~gregod/boost/tools/doc/revision" class="refsynopsisdiv"><pre class="synopsis"><span class="comment">// In header: <<a class="link" href="../../header/boost/icl/map_hpp.html" title="Header <boost/icl/map.hpp>">boost/icl/map.hpp</a>>
</span>
<span class="keyword">struct</span> <a class="link" href="total_enricher.html" title="Struct total_enricher">total_enricher</a> <span class="special">{</span>
<span class="keyword">enum</span> <a name="boost.icl.total_enricher.@9"></a>@9 <span class="special">{</span> absorbs_identities = = false <span class="special">}</span><span class="special">;</span>
<span class="keyword">enum</span> <a name="boost.icl.total_enricher.@10"></a>@10 <span class="special">{</span> is_total = = true <span class="special">}</span><span class="special">;</span>
<span class="special">}</span><span class="special">;</span></pre></div>
</div>
<table xmlns:rev="path_to_url~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
GmbH<p>
file LICENSE_1_0.txt or copy at <a href="path_to_url" target="_top">path_to_url
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="total_absorber.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../header/boost/icl/map_hpp.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="map.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
</body>
</html>
```
|
Polistes fuscatus, whose common name is the dark or northern paper wasp, is widely found in eastern North America, from southern Canada through the southern United States. It often nests around human development. However, it greatly prefers areas in which wood is readily available for use as nest material, therefore they are also found near and in woodlands and savannas. P. fuscatus is a social wasp that is part of a complex society based around a single dominant foundress along with other cofoundresses and a dominance hierarchy.
Taxonomy and phylogeny
P. fuscatus is a part of the order Hymenoptera, the suborder Apocrita, the family of Vespidae, and the subfamily Polistinae, the second-largest subfamily within the Vespidae, of which all are social wasps. The Polistinae comprise four tribes, including Polistini, Epiponini, Mischocyttarini, and Ropalidiini. It is characterized by two major behaviors: colony founding and reproductive dominance. P. fuscatus is part of the Polistini tribe and in the genus Polistes. Polistes is one of the five independent-founding groups. Older taxonomic concepts considered P. fuscatus to have a much broader definition, treating several species, including P. apachus, P. aurifer, P. bellicosus, P. carolina, P. dorsalis, P. metricus, and P. rubiginosus, as varieties or subspecies of a single species. A study in which bootstrap analysis was conducted concluded that the narrower definition of P. fuscatus is part of the New World subgenus Fuscopolistes. In addition to P. fuscatus, Fuscopolistes includes separate species concepts for P. bellicosus, P. apachus, P. aurifer, P. carolina, P. dorsalis, and P. metricus, all of which are the closest relatives of P. fuscatus.
Description
The length of P. fuscatus often ranges between . The fore wing length ranges between ; in general, the fore wing of males is above , whereas females have a fore wing length above . Both males and females have rather slender bodies and have a waist that connects the thorax to the abdomen. The female has a venomous sting. The pain of its sting is commonly compared to being pricked with a large needle, such of that of a tattoo. Like coloration (below), length can also vary by season of emergence.
The physical characteristics of the P. fuscatus are highly dependent on the geographic location of its habitat. Throughout the United States, three color pattern trends represent different regions throughout the country. The male is identified by its darkened apical flagellomeres in addition to its darkened dorsal surface of the apical flagellomeres that is common to other species of wasps. Northern females on the other hand are easily identified by the blackening of their entire bodies which may or may not have markings of other colors. Many southern P. fuscatus individuals, however, have additional markings and may resemble wasps of other species.
The facial and abdominal markings of P. fuscatus are highly variable, including a variety of different patterns, such as small dots, long stripes, clypeus blotches, yellow abdominal dots, upper clypeus stripes, and combinations of both clypeus edge and tip colorations. Furthermore, some wasps have these facial and abdominal patterns in brown and black instead of yellow. These marking colors, however, are often influenced by the geographic location of the wasp.
Distribution and habitat
Polistes fuscatus's distribution along the eastern half of North America ranges from southern Canada to the United States. The most northern extent of its range is Chilcotin, British Columbia, and it reaches as far south as Texas and Florida. Although P. fuscatus prefers wooded areas for the readily available resources to build the nest, it also is often seen in areas which humans inhabit. Nests are produced using wood provided from their habitat, masticated with fluid produced by its mouth to create a pulp-like substance. However, a recently mutated form of the wasp has developed an attraction to colder lamps, such as fluorescent lamps, and they commonly use areas surrounding them as a habitat to lay their eggs.
Another eusocial wasp species, Polistes dominula, has been recognized as a threat to P. fuscatus. P. dominula has been displacing P. fuscatus at many of the formerly P. fuscatus-dominated areas. P. dominula was likely replacing P. fuscatus through indirect or exploitative competition, which was consistent with the finding that P. dominula was significantly more productive than P. fuscatus.
Lifecycle
The lifecycle lasts about one year. In early spring, the new foundress emerging from hibernation starts a new colony, building an umbrella-shaped nest made of a papery material and suspended from a single stalk. The foundress lays eggs into individual cells. The first generation is composed of infertile female workers. In the next generation, multiple foundresses are hatched with communal nests, but the other fertile females accept the dominance of a single female and raise offspring cooperatively. Later in summer, the next year's foundresses are produced, and they mate with males. The newly mated foundresses hibernate in winter, while old founding foundresses, workers (sterile females), and males die.
Colony cycle
Colony stages can be separated into three stages based on nest content: pre-emergence (nest initiation to first adult), emergence-enlargement (emergence of first adult to beginning brood decline), and post-enlargement (no further new cell addition). The total number of wasps in the colony can be well over 200.
Prior to nest initiation, females come together post hibernation in clusters before separating and starting their own colonies in early spring. During this time, the wasps are particularly aggressive when other wasps encroach on their territory; this aggression may be associated with the development of their ovaries. Colony founding can be either independent or swarm founding. Independent founding consists of the founding of small, simple nests that are constructed without a paper envelope by a single or few foundresses with one or several inseminated egg-layers and no workers. Swarm founding includes the construction of large swarms and are founded by multiple foundresses with many workers. Nests are normally initiated by a single foundress during early May; however, if another foundress joins, the nest may be founded by several foundresses. In the early beginning of the founding of the nest, females exhibit much aggression in order to assert dominance to determine the hierarchical ranking; dominant females will exhibit oophagy and become foundresses.
In the beginning of the pre-emergence stage, there are very few eggs in the nest. The mean of the pre-emergence stage is about 48 days long; it includes the egg, larval, and pupal stages and may also be affected by nutrition and temperature. The first eggs that are laid are all female workers that can care for future reproductive females. After the first eggs are laid, both male and female eggs are laid until mid-September; the laying of male eggs has been shown to be correlated with increased oviposition rate of the dominant females. After the hatching of both male and female eggs, only female adults emerge, though eggs laid after the end of July no longer emerge and reach adulthood. The foundress disappears at the end of July, which is when both the number of laid eggs and the rate at which the growth of the nest declines, though other reproductive females may continue to lay eggs. The brood itself begins to decline towards late August, thus leading to the conclusion that brood declination occurs when reproduction no longer occurs. After reaching the post-enlargement behavior, brood destruction occurs in which wasps begin exhibiting abortive behavior by either throwing larva out or feeding them to existing nest mates; if not aborted, these larvae eventually emerge as abnormal adults. After this destruction, the adult wasps abandon the nests. Prior to hibernation, the wasps cluster together in aggregations to mate and then begin hibernation until the next season.
Behavior
Polistes fuscatus is an eusocial organism that has a hierarchical social system usually centered around one foundress. Although this species is classified as eusocial, its social organization is not as evolved as other eusocial organisms. Foundress-initiated interactions can be placed into two broad categories: solicitations and non-solicitations. Solicitations include “receipt of water, nectar, pulp, or prey from returned foragers,” while non-solicitations include, “antennation, lunging/bumping, chasing, grappling, and biting”. Foundresses spend substantially less time off the nest compared to workers. Workers vary significantly in time spent off the nest, which correlates with foraging efforts. More dominant workers spend less time off the nest compared with less dominant workers. Other eusocial insects, such as soldier termites, have developed guard polymorphs that specialize in nest defense. Paper wasps on the other hand, have only workers and foundresses who defend the nest together. The foundress is the most aggressive defender of the nest since she has the most reproductive investment. In some cases, Polistes fuscatus has been shown to share nests with a closely related species, Polistes metricus.
Dominance
Polistes fuscatus has a linear dominance hierarchy that revolves around the fertility of each individual wasp; those that are more dominant within the nest generally have the larger or more developed ovaries. The hierarchy is first formed in the pre-emergence period between foundresses in which they fight aggressively to establish dominance until the hierarchy is established, and only later formed among the workers of the colony such that they are integrated into the hierarchy. When the foundress is removed, the second highest ranking female then takes the foundress place in being the primary egg layer.
The posture of wasps is very telling of which is dominant or subordinate. The dominant wasp generally sits higher than the subordinate, whereas the subordinates have a lower stance. For wasps of equal rank, it is not uncommon for them to continuously try to rise higher and begin to aggressively fight each other until they fall due to losing their foothold. This is often called the falling fight, though it is more common for one wasp to act as a subordinate.
Dominance ranking may also affect the location females may be at within the nest and how far away wasps are spaced. In P. fuscatus, those who are higher on the dominance ranking make continuous darts at other lower-ranking wasps such that other wasps will not closely sit near the higher ranking wasp.
Nest recognition
Members of a colony are able to recognize non-resident wasps by how they approach the nest and by their dominance behavior. Members of the colony approach the nest in a swift and purposeful fashion, whereas wasps that are not part of the colony hover by the nest and do not exhibit a specific direction. Oftentimes, they hover by the nest without landing. If spotted, members of the colony begin to act aggressively and alarm others of the possible intruder. Non-resident wasps are often chased out of the nest within the first five minutes of entering.
On occasion, however, resident wasps may mistake other resident wasps as non-resident wasps. This occurs when the mistaken wasp has just come back from foraging and is too full to obtain the velocity necessary to fly towards the nest as if it were a resident of the nest. Sometimes, to obtain the speed necessary, the wasp attempts to approach the nest several times. Dominance relations also allow for members to recognize nest mates as each wasp is of a particular rank; when a non-resident arrives and has no dominance rank, it is conceived as being unfamiliar. Wasps that are not part of the colony are only accepted without aggression when a dominance hierarchy or conflict has not occurred.
Individual recognition
Individual recognition is highly important in the formation of behavioral interactions between members of different castes within a population of P. fuscatus. The ability to recognize individuals is vital for the existence of a linear dominance hierarchy, in which is a determinant for many aspects of behavior of the paper wasp, including the amount of food and work an individual wasp would be able to have or need to accomplish, the amount of aggression one may receive, and the number of offspring a specific wasp may produce. The ability to recognize individuals also helps dictate how one wasp may treat another, whether it be to hold a dominant role or to act submissively within an interaction.
Polistes fuscatus has the capability to not only recognize where wasps of their population may fall in the hierarchy, but may also be able to recognize individual nest-mates through specific facial and abdominal markings. One study indicated that if the facial and abdominal markings of a wasp were to be changed through painting with different color paints, mainly black or yellow, when the wasp was reintroduced into the nest, it would receive much aggression until it was reaccepted back into the colony (i.e. when the nest-mates felt familiar with that wasp). The study was able to indicate that recognition was specific to the individual given that there was no relationship between specific markings and dominance rank, rather markings were purely used for individual recognition. Furthermore, previous research by Bura and Gamboa indicated that wasps use chemical cues to identify nest-mates and wasps that are foreign to the nest would be immediately chased away within the first five minutes. That the reintroduced wasps were not chased away is a strong indication that facial and abdominal patterns are not used for nest-mate recognition, but is only used for individual recognition.
The ability to recognize individuals may not only be useful for behavioral interaction, but may also be used to reduce aggressive interaction between individuals of different dominance ranks, as well as to help foundresses determine and regulate the amount of resources each individual within the nest receives.
Researchers found that when Polistes fuscatus paper wasps live isolated, their anterior optic tubercle, area responsible for processing of visual colors, grows more relative to mushroom body. Moreover, they lose ability to recognize other paper wasps' colored faces.
Colony defense
A greater part in the division of labor in colony defense is given to the foundress of the colony. Foundresses come into contact and interact with non-resident wasps and returning nest-mates more compared to other resident wasps and are the first to come into contact with the wasp intruders at a much higher rate. Although this finding could be attributed to the foundress's location preference of being at the face of the nest, it was found that even normalizing for the preferred location, foundresses still encountered non-nest mates at a higher rate than expected. In addition, foundress wasps are much less tolerant of non-resident and returning nest mates compared to non-foundress nest mates; however, both the foundress and the colony members become less tolerant of non-resident wasps later on during the colony cycle.
Foundresses may be less tolerant of intruders compared to other colony members due to the consequences of accepting non-nest mates into the colony. Often, female intruders that join the colony attempt to evict the previous foundress and usurp the position. Whereas this result is harmful for the foundress, other members of the nest are not negatively impacted; they would simply remain in the nest and serve the new foundress. Therefore, much of the defense of the colony is left up to the foundress.
Colony defense against vertebrate predators
Polistes fuscatus invests much in the nests. Nests provide locations that members of the colony can return to and act as a central location that can allow for more efficient work in reproduction and foraging behavior. Although nests can provide many benefits, it also has the disadvantage of concentrating all investment in one area; this concentration allows for vertebrate predators to have the ability to destroy an entire nest and therefore destroy all investment. Vertebrate predators of P. fuscatus include foxes, rodents, and birds. Judd's study on the defensive behavior of colonies of the paper wasp indicated that the behavior of wasps was highly dependent on reproductive investment. Prior to the emergence of many adult wasps, the wasps acted much more aggressively; however, when the nest no longer contained any brood and, therefore, not much investment, the wasps were much more likely to flee rather than act aggressively against the vertebrate predator.
Foraging behavior
Edwards indicated in his work that social wasps, “collect water, plant fibers, and carbohydrates, and hunt arthropod prey or scavenge animal proteins.” Water is used for the following processes: nest cooling, construction, and metabolism; plant fibers are used for construction, and carbohydrates and protein is used as food and energy. Water is a vital resource for wasps given its many capabilities, and many wasps will go to a variety of places to obtain it, such as puddles and ponds, or even drinking fountains and faucets. Wasps are able to obtain water by imbibing it and regurgitating it once they return to the nest and are able to use it for construction by mixing it with the masticated plant fibers. These plant fibers are collected from dead wood. By mixing the plant fibers with water, wasps are able to create pulp which is then used to help the construction of nests.
Polistes fuscatus is considered to be a generalist prey forager, but may also act as a specialist due to its habit of commonly returning to a specific location or to prey on the same species. They use the scavenged animal protein from both vertebrates and arthropods such as: caterpillars, flies, alate ants, termites, spiders, bees, and other wasps to help the development of their brood. Social wasps collect carbohydrates from nectar, sap, and fruits and may store them within the nest; some wasps may even steal or consume carbohydrates from other carbohydrate foraging or making arthropods. Although the foraging of social wasps is not as developed as some other arthropod species given its weakness in recruitment, the ability to communicate to nest mates of the location of a resource stronghold, it may impact the greater ecosystem.
Diet
Polistes fuscatus eats arthropod prey, animal proteins, carbohydrates and protein. They will eat caterpillars, flies, ants, termites, spiders, bees, and other wasps. Carbohydrates may include nectar, sap, and fruits.
Differential egg eating
Egg laying by subordinate females and the oophagy of these eggs by dominant female wasps will occur until two weeks after the first female eggs emerge. Prior to these two weeks, egg layers will continuously eat other female wasps’ eggs approximately eleven minutes post being laid; however, no egg layers would ever eat their own eggs, indicating that they could recognize their own individual eggs. It can be conjectured that subordinate egg layers could not lay eggs as quickly as dominant egg layers given the dominant egg layers’ greater supply of ova from the subordinate egg layers indicating that oophagy and oviposition occur close together. Previous studies have also shown that subordinate egg layers may no longer lay eggs after associating with the higher-ranking females after a certain period; this is probably due to the necessity of expending energy during foraging and inability to invest as much into their own eggs.
Sexual behavior
Mating
Prior to hibernation, males and females will undergo mating; they will aggregate together in sunlit areas that are relatively higher in location. Males will sit out waiting to pursue females in order to mate. Once a female is spotted, unlike the usual passive male within the nest, the sexually responsive male will attempt to mount the female and begin performing abdominal stroking. Females will react aggressively in order to struggle free. If the female were to elude the male's hold, the male wasp would return to its position to attempt to copulate again. While copulating, the male exhibits several movements, such as abdominal stroking, extrusion of genitals, grasping the female abdomen, rhythmic antennal vibrations, and grasping of the female antennae. Antennas and antennal movement are important for copulation. Some studies have indicated that females without antenna and females who do not depress their antennae cannot copulate. It has also been proposed that antennal movements help copulation through assisting the attachment of genitals. In addition, research by Post and Jeanne has shown that Polistes fuscatus females have no preference on mating with related or unrelated males.
Reproduction
The mating season for Polistes fuscatus is during the spring and summer, after the nest has been abandoned. Venom is released by females that contains a sex pheromone that induces copulatory behavior in males. The continual release of the venom causes males to try to copulate with females when they are unreceptive on the nest, thus interrupting the activities of the colony. After mating has occurred, the foundress will lay an initial generation of infertile female workers. Later on in the life of the nest, male and fertile female offspring are produced.
The eggs capable of becoming foundresses are laid during the summer. Laying these eggs during the summer ensures that the larvae are well-fed due to the great environmental conditions and abundance of food. These eggs hatch before fall and the resulting offspring hibernate during fall and winter. The new foundresses or co-foundresses emerge in the spring to begin new nests and lay eggs. After laying eggs that will later develop into new foundresses, the old foundresses die along with all accompanying workers and males. As opposed to other eusocial insects such as vespid wasps, Polistes fuscatus have not been found to preferentially mate with their siblings or have sibling recognition mechanisms to aid in kin selection during reproduction. This is surprising since there are many advantages of inbreeding for haplodiploid organisms.
Sex allocation
According to Fisher's theory of sex ratio selection, when competition for mates is population wide, parents will evolve to invest equally in both sexes. However, in eusocial hierarchies, there is often conflict between the workers and the foundress to promote their genes within the colony. The foundress favors a 1:1 sex ratio, but the workers favor female progeny because they share approximately 75% (r=0.75) of their genes with their sisters, provided that the foundress only mated once. In Polistes fuscatus, the sex ratio is usually 1:1 for several reasons. First, males generally leave the nest to scout for mates soon after they reach adulthood, promoting population competition for mates. Second, the number of workers within a colony is relatively small (generally less than 40) making it less likely for a worker to confront the foundress. Also, since colonies are annual and workers are reared by the foundress's subordinate foundresses, the foundress can manipulate how much food they receive as larvae. Polistes fuscatus foundresses likely mate with multiple males so that the relatedness of workers is less than if they all shared the same father's genes. Finally, in the second generation of the foundresses offspring, males are usually reared earlier than reproductive females. Thus when the workers have the opportunity to bias the sex ratio, there are few male larvae present. Additionally, the males have usually completed part of their development, giving them a higher reproductive value than new eggs. Thus the costs of destroying male larvae or replacing the male larvae with their own eggs is not worth the investment.
References
External links
Biolib
Polistes fuscatus on Cirrusimage
fuscatus
Insects described in 1793
Hymenoptera of North America
|
```ocaml
(* Unison file synchronizer: src/remote.mli *)
module Thread : sig
val unwindProtect : (unit -> 'a Lwt.t) -> (exn -> unit Lwt.t) -> 'a Lwt.t
end
(* A pair of functions enabling conversion from type 'a to a 2.51-compatible
type and the other way around.
The conversion functions are needed because the 2.51-compatible types must
be frozen in time and never changed in future. Type 'a can and will change
in time as enhancements are added and old code is removed.
When a type is changed, breaking compatibility with 2.51, then respective
conversion functions must also be added. *)
type 'a convV0Fun
val makeConvV0FunArg :
('a -> 'compat)
-> ('compat -> 'a)
-> 'a convV0Fun * 'b convV0Fun
val makeConvV0FunRet :
('b -> 'compat)
-> ('compat -> 'b)
-> 'a convV0Fun * 'b convV0Fun
val makeConvV0Funs :
('a -> 'compata)
-> ('compata -> 'a)
-> ('b -> 'compatb)
-> ('compatb -> 'b)
-> 'a convV0Fun * 'b convV0Fun
(* Register a server function. The result is a function that takes a host
name as argument and either executes locally or else communicates with a
remote server, as appropriate. (Calling registerServerCmd also has the
side effect of registering the command under the given name, so that when
we are running as a server it can be looked up and executed when
requested by a remote client.) *)
(* It is not recommended to use this function in new code unless the cmd is
truly independent of any roots/replicas. Use [registerRootCmd] or one of
the other functions instead. *)
val registerHostCmd :
string (* command name *)
-> ?convV0: 'a convV0Fun * 'b convV0Fun
(* 2.51-compatibility functions for args and result *)
-> 'a Umarshal.t -> 'b Umarshal.t
-> ('a -> 'b Lwt.t) (* local command *)
-> ( Common.root (* -> host (the root path is ignored) *)
-> 'a (* arguments *)
-> 'b Lwt.t) (* -> (suspended) result *)
(* A variant of registerHostCmd, for constructing a remote command to be
applied to a particular root (host + fspath).
-
A naming convention: when a `root command' is built from a
corresponding `local command', we name the two functions
<funcName>OnRoot and <funcName>Local *)
val registerRootCmd :
string (* command name *)
-> ?convV0: (Fspath.t * 'a) convV0Fun * 'b convV0Fun
(* 2.51-compatibility functions for args
and result *)
-> 'a Umarshal.t -> 'b Umarshal.t
-> ((Fspath.t * 'a) -> 'b Lwt.t) (* local command *)
-> ( Common.root (* -> root *)
-> 'a (* additional arguments *)
-> 'b Lwt.t) (* -> (suspended) result *)
(* Test whether a command exits on some root *)
val commandAvailable :
Common.root -> (* root *)
string -> (* command name *)
bool Lwt.t
(* Enter "server mode", reading and processing commands from a remote
client process until killed *)
val beAServer : unit -> unit
val waitOnPort : string list -> string -> unit
(* Whether the server should be killed when the client terminates *)
val killServer : bool Prefs.t
(* Establish a connection to the remote server (if any) corresponding
to the root and return the canonical name of the root *)
val canonizeRoot :
string -> Clroot.clroot -> (string -> Terminal.termInteract) option ->
Common.root Lwt.t
(* Test if connection to the remote server (if any) corresponding
to the root is established. Always returns true for local roots *)
val isRootConnected : Common.root -> bool
(* Close the connection to server and run all cleanup and [at_conn_close]
handlers. Can also be called for a local root; in this case only the
cleanup and [at_conn_close] handlers are run (as there is no connection
to close). *)
val clientCloseRootConnection : Common.root -> unit
(* Statistics *)
val emittedBytes : float ref
val receivedBytes : float ref
(* Establish a connection to the server.
First call openConnectionStart, then loop:
call openConnectionPrompt, if you get a prompt,
respond with openConnectionReply if desired.
After you get None from openConnectionPrompt,
call openConnectionEnd.
Call openConnectionCancel to abort the connection.
*)
type preconnection
val openConnectionStart : Clroot.clroot -> preconnection option
val openConnectionPrompt : preconnection -> string option
val openConnectionReply : preconnection -> string -> unit
val openConnectionEnd : preconnection -> unit
val openConnectionCancel : preconnection -> unit
(* return the canonical name of the root. The connection
to the root must have already been established by
the openConnection sequence. *)
val canonize : Clroot.clroot -> Common.root
(****)
type msgId = int
module MsgIdMap : Map.S with type key = msgId
val newMsgId : unit -> msgId
type connection
val connectionVersion : connection -> int
val connectionOfRoot : Common.root -> connection
val registerServerCmd :
string
-> ?convV0: 'a convV0Fun * 'b convV0Fun
-> 'a Umarshal.t -> 'b Umarshal.t
-> (connection -> 'a -> 'b Lwt.t)
-> connection -> 'a -> 'b Lwt.t
val intSize : int
val encodeInt : int -> Bytearray.t * int * int
val decodeInt : Bytearray.t -> int -> int
val registerRootCmdWithConnection :
string (* command name *)
-> ?convV0: 'a convV0Fun * 'b convV0Fun
(* 2.51-compatibility functions for args
and result *)
-> 'a Umarshal.t -> 'b Umarshal.t
-> (connection -> 'a -> 'b Lwt.t) (* local command *)
-> Common.root (* root on which the command is executed *)
-> Common.root (* other root *)
-> 'a (* additional arguments *)
-> 'b Lwt.t (* result *)
val streamingActivated : bool Prefs.t
val registerStreamCmd :
string ->
(connection -> 'a ->
(Bytearray.t * int * int) list -> (Bytearray.t * int * int) list * int) *
(connection -> Bytearray.t -> int -> 'a) ->
(connection -> 'a -> unit) ->
connection -> (('a -> unit Lwt.t) -> 'b Lwt.t) -> 'b Lwt.t
(* Register a function to be run when the connection between client and server
is closed (willingly or unexpectedly). The function should not raise
exceptions. If it does then running some of the other registered functions
may be skipped (which may not be an issue as the exception is likely going
to quit the process).
Registered functions are only expected to be useful when the connection is
closed but the process keeps running (a socket server, for example). Do not
use it as a substitute for [at_exit].
These functions are additionally run when "closing" a local sync when there
is no actual connection.
Keep in mind that a function registered like this can be called immediately
when a lost connection is detected, before any exception indicating lost
connection is raised. *)
val at_conn_close : ?only_server:bool -> (unit -> unit) -> unit
(* Register resources to be cleaned up when the connection between client and
server closes (normally or exceptionally). This cleanup is additionally run
when "closing" a local sync when there is no actual connection.
Closing the resources is still the responsibility of the code opening the
resources but it is not always possible to run the resource cleanup code
(due to an Lwt thread being stopped, for example). In those cases the
registered resources are cleaned up when the connection is closed, as a
last resort.
The returned functions must be used to track the resources registered for
cleanup. *)
type ('a, 'b, 'c) resourceC =
{ register : 'a -> 'a; (* Register an opened resource for cleanup *)
release : 'a -> 'b; (* Unregister and close the resource normally *)
release_noerr : 'a -> 'c } (* Same as above; don't raise exceptions *)
val resourceWithConnCleanup :
('a -> 'b) (* Function to close the resource normally *)
-> ('a -> 'c) (* Function to close the resource, don't raise exceptions *)
-> ('a, 'b, 'c) resourceC (* Functions to track resources for cleanup *)
(* Make an [Lwt_util.region] which is automatically purged and reset when
the connection between client and server closes. This cleanup is also
run when "closing" a local sync when there is no actual connection. *)
val lwtRegionWithConnCleanup : int -> Lwt_util.region ref
```
|
```c++
/*
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
*
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "platform/graphics/filters/FEConvolveMatrix.h"
#include "SkMatrixConvolutionImageFilter.h"
#include "platform/graphics/filters/SkiaImageFilterBuilder.h"
#include "platform/text/TextStream.h"
#include "wtf/OwnPtr.h"
namespace blink {
FEConvolveMatrix::FEConvolveMatrix(Filter* filter, const IntSize& kernelSize,
float divisor, float bias, const IntPoint& targetOffset, EdgeModeType edgeMode,
const FloatPoint& kernelUnitLength, bool preserveAlpha, const Vector<float>& kernelMatrix)
: FilterEffect(filter)
, m_kernelSize(kernelSize)
, m_divisor(divisor)
, m_bias(bias)
, m_targetOffset(targetOffset)
, m_edgeMode(edgeMode)
, m_kernelUnitLength(kernelUnitLength)
, m_preserveAlpha(preserveAlpha)
, m_kernelMatrix(kernelMatrix)
{
ASSERT(m_kernelSize.width() > 0);
ASSERT(m_kernelSize.height() > 0);
}
PassRefPtrWillBeRawPtr<FEConvolveMatrix> FEConvolveMatrix::create(Filter* filter, const IntSize& kernelSize,
float divisor, float bias, const IntPoint& targetOffset, EdgeModeType edgeMode,
const FloatPoint& kernelUnitLength, bool preserveAlpha, const Vector<float>& kernelMatrix)
{
return adoptRefWillBeNoop(new FEConvolveMatrix(filter, kernelSize, divisor, bias, targetOffset, edgeMode, kernelUnitLength,
preserveAlpha, kernelMatrix));
}
FloatRect FEConvolveMatrix::mapPaintRect(const FloatRect& rect, bool forward)
{
FloatRect result = rect;
result.moveBy(forward ? -m_targetOffset : m_targetOffset - m_kernelSize);
result.expand(m_kernelSize);
return result;
}
IntSize FEConvolveMatrix::kernelSize() const
{
return m_kernelSize;
}
void FEConvolveMatrix::setKernelSize(const IntSize& kernelSize)
{
ASSERT(kernelSize.width() > 0);
ASSERT(kernelSize.height() > 0);
m_kernelSize = kernelSize;
}
const Vector<float>& FEConvolveMatrix::kernel() const
{
return m_kernelMatrix;
}
void FEConvolveMatrix::setKernel(const Vector<float>& kernel)
{
m_kernelMatrix = kernel;
}
float FEConvolveMatrix::divisor() const
{
return m_divisor;
}
bool FEConvolveMatrix::setDivisor(float divisor)
{
ASSERT(divisor);
if (m_divisor == divisor)
return false;
m_divisor = divisor;
return true;
}
float FEConvolveMatrix::bias() const
{
return m_bias;
}
bool FEConvolveMatrix::setBias(float bias)
{
if (m_bias == bias)
return false;
m_bias = bias;
return true;
}
IntPoint FEConvolveMatrix::targetOffset() const
{
return m_targetOffset;
}
bool FEConvolveMatrix::setTargetOffset(const IntPoint& targetOffset)
{
if (m_targetOffset == targetOffset)
return false;
m_targetOffset = targetOffset;
return true;
}
EdgeModeType FEConvolveMatrix::edgeMode() const
{
return m_edgeMode;
}
bool FEConvolveMatrix::setEdgeMode(EdgeModeType edgeMode)
{
if (m_edgeMode == edgeMode)
return false;
m_edgeMode = edgeMode;
return true;
}
FloatPoint FEConvolveMatrix::kernelUnitLength() const
{
return m_kernelUnitLength;
}
bool FEConvolveMatrix::setKernelUnitLength(const FloatPoint& kernelUnitLength)
{
ASSERT(kernelUnitLength.x() > 0);
ASSERT(kernelUnitLength.y() > 0);
if (m_kernelUnitLength == kernelUnitLength)
return false;
m_kernelUnitLength = kernelUnitLength;
return true;
}
bool FEConvolveMatrix::preserveAlpha() const
{
return m_preserveAlpha;
}
bool FEConvolveMatrix::setPreserveAlpha(bool preserveAlpha)
{
if (m_preserveAlpha == preserveAlpha)
return false;
m_preserveAlpha = preserveAlpha;
return true;
}
SkMatrixConvolutionImageFilter::TileMode toSkiaTileMode(EdgeModeType edgeMode)
{
switch (edgeMode) {
case EDGEMODE_DUPLICATE:
return SkMatrixConvolutionImageFilter::kClamp_TileMode;
case EDGEMODE_WRAP:
return SkMatrixConvolutionImageFilter::kRepeat_TileMode;
case EDGEMODE_NONE:
return SkMatrixConvolutionImageFilter::kClampToBlack_TileMode;
default:
return SkMatrixConvolutionImageFilter::kClamp_TileMode;
}
}
PassRefPtr<SkImageFilter> FEConvolveMatrix::createImageFilter(SkiaImageFilterBuilder* builder)
{
RefPtr<SkImageFilter> input(builder->build(inputEffect(0), operatingColorSpace()));
SkISize kernelSize(SkISize::Make(m_kernelSize.width(), m_kernelSize.height()));
int numElements = kernelSize.width() * kernelSize.height();
SkScalar gain = SkFloatToScalar(1.0f / m_divisor);
SkScalar bias = SkFloatToScalar(m_bias * 255);
SkIPoint target = SkIPoint::Make(m_targetOffset.x(), m_targetOffset.y());
SkMatrixConvolutionImageFilter::TileMode tileMode = toSkiaTileMode(m_edgeMode);
bool convolveAlpha = !m_preserveAlpha;
OwnPtr<SkScalar[]> kernel = adoptArrayPtr(new SkScalar[numElements]);
for (int i = 0; i < numElements; ++i)
kernel[i] = SkFloatToScalar(m_kernelMatrix[numElements - 1 - i]);
SkImageFilter::CropRect cropRect = getCropRect(builder->cropOffset());
return adoptRef(SkMatrixConvolutionImageFilter::Create(kernelSize, kernel.get(), gain, bias, target, tileMode, convolveAlpha, input.get(), &cropRect));
}
static TextStream& operator<<(TextStream& ts, const EdgeModeType& type)
{
switch (type) {
case EDGEMODE_UNKNOWN:
ts << "UNKNOWN";
break;
case EDGEMODE_DUPLICATE:
ts << "DUPLICATE";
break;
case EDGEMODE_WRAP:
ts << "WRAP";
break;
case EDGEMODE_NONE:
ts << "NONE";
break;
}
return ts;
}
TextStream& FEConvolveMatrix::externalRepresentation(TextStream& ts, int indent) const
{
writeIndent(ts, indent);
ts << "[feConvolveMatrix";
FilterEffect::externalRepresentation(ts);
ts << " order=\"" << m_kernelSize << "\" "
<< "kernelMatrix=\"" << m_kernelMatrix << "\" "
<< "divisor=\"" << m_divisor << "\" "
<< "bias=\"" << m_bias << "\" "
<< "target=\"" << m_targetOffset << "\" "
<< "edgeMode=\"" << m_edgeMode << "\" "
<< "kernelUnitLength=\"" << m_kernelUnitLength << "\" "
<< "preserveAlpha=\"" << m_preserveAlpha << "\"]\n";
inputEffect(0)->externalRepresentation(ts, indent + 1);
return ts;
}
} // namespace blink
```
|
```c++
#include <Core/MySQL/PacketsProtocolText.h>
#include <Core/MySQL/MySQLUtils.h>
#include <Columns/ColumnNullable.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <IO/WriteHelpers.h>
#include <Common/assert_cast.h>
#include <Core/MySQL/IMySQLWritePacket.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypesDecimal.h>
namespace DB
{
namespace MySQLProtocol
{
namespace ProtocolText
{
ResultSetRow::ResultSetRow(const Serializations & serializations, const DataTypes & data_types, const Columns & columns_, size_t row_num_)
: columns(columns_), row_num(row_num_)
{
FormatSettings format_settings = {.bool_true_representation = "1", .bool_false_representation = "0"};
for (size_t i = 0; i < columns.size(); ++i)
{
DataTypePtr data_type = removeLowCardinalityAndNullable(data_types[i]);
TypeIndex type_index = data_type->getTypeId();
if (columns[i]->isNullAt(row_num))
{
payload_size += 1;
serialized.emplace_back("\xfb");
}
// Arbitrary precision DateTime64 needs to be forced into precision 6, as it is the maximum that MySQL supports
else if (type_index == TypeIndex::DateTime64)
{
WriteBufferFromOwnString ostr;
ColumnPtr col = columns[i]->convertToFullIfNeeded();
if (col->isNullable())
col = assert_cast<const ColumnNullable &>(*col).getNestedColumnPtr();
auto components = MySQLUtils::getNormalizedDateTime64Components(data_type, col, row_num);
writeDateTimeText<'-', ':', ' '>(LocalDateTime(components.whole, DateLUT::instance(getDateTimeTimezone(*data_type))), ostr);
ostr.write('.');
writeDateTime64FractionalText<DateTime64>(components.fractional, 6, ostr);
payload_size += getLengthEncodedStringSize(ostr.str());
serialized.push_back(std::move(ostr.str()));
}
else
{
WriteBufferFromOwnString ostr;
serializations[i]->serializeText(*columns[i], row_num, ostr, format_settings);
payload_size += getLengthEncodedStringSize(ostr.str());
serialized.push_back(std::move(ostr.str()));
}
}
}
size_t ResultSetRow::getPayloadSize() const
{
return payload_size;
}
void ResultSetRow::writePayloadImpl(WriteBuffer & buffer) const
{
for (size_t i = 0; i < columns.size(); ++i)
if (columns[i]->isNullAt(row_num))
buffer.write(serialized[i].data(), 1);
else
writeLengthEncodedString(serialized[i], buffer);
}
void ComFieldList::readPayloadImpl(ReadBuffer & payload)
{
// Command byte has been already read from payload.
readNullTerminated(table, payload);
readStringUntilEOF(field_wildcard, payload);
}
ColumnDefinition::ColumnDefinition()
: character_set(0x00), column_length(0), column_type(MYSQL_TYPE_DECIMAL), flags(0x00)
{
}
ColumnDefinition::ColumnDefinition(
String schema_, String table_, String org_table_, String name_, String org_name_, uint16_t character_set_, uint32_t column_length_,
ColumnType column_type_, uint16_t flags_, uint8_t decimals_, bool with_defaults_)
: schema(std::move(schema_)), table(std::move(table_)), org_table(std::move(org_table_)), name(std::move(name_)),
org_name(std::move(org_name_)), character_set(character_set_), column_length(column_length_), column_type(column_type_),
flags(flags_), decimals(decimals_), is_comm_field_list_response(with_defaults_)
{
}
ColumnDefinition::ColumnDefinition(
String name_, uint16_t character_set_, uint32_t column_length_, ColumnType column_type_, uint16_t flags_, uint8_t decimals_)
: ColumnDefinition("", "", "", std::move(name_), "", character_set_, column_length_, column_type_, flags_, decimals_)
{
}
size_t ColumnDefinition::getPayloadSize() const
{
return 12 +
getLengthEncodedStringSize("def") +
getLengthEncodedStringSize(schema) +
getLengthEncodedStringSize(table) +
getLengthEncodedStringSize(org_table) +
getLengthEncodedStringSize(name) +
getLengthEncodedStringSize(org_name) +
getLengthEncodedNumberSize(next_length) +
is_comm_field_list_response;
}
void ColumnDefinition::readPayloadImpl(ReadBuffer & payload)
{
String def;
readLengthEncodedString(def, payload);
assert(def == "def");
readLengthEncodedString(schema, payload);
readLengthEncodedString(table, payload);
readLengthEncodedString(org_table, payload);
readLengthEncodedString(name, payload);
readLengthEncodedString(org_name, payload);
next_length = readLengthEncodedNumber(payload);
payload.readStrict(reinterpret_cast<char *>(&character_set), 2);
payload.readStrict(reinterpret_cast<char *>(&column_length), 4);
payload.readStrict(reinterpret_cast<char *>(&column_type), 1);
payload.readStrict(reinterpret_cast<char *>(&flags), 2);
payload.readStrict(reinterpret_cast<char *>(&decimals), 1);
payload.ignore(2);
}
void ColumnDefinition::writePayloadImpl(WriteBuffer & buffer) const
{
writeLengthEncodedString(std::string("def"), buffer); /// always "def"
writeLengthEncodedString(schema, buffer);
writeLengthEncodedString(table, buffer);
writeLengthEncodedString(org_table, buffer);
writeLengthEncodedString(name, buffer);
writeLengthEncodedString(org_name, buffer);
writeLengthEncodedNumber(next_length, buffer);
buffer.write(reinterpret_cast<const char *>(&character_set), 2);
buffer.write(reinterpret_cast<const char *>(&column_length), 4);
buffer.write(reinterpret_cast<const char *>(&column_type), 1);
buffer.write(reinterpret_cast<const char *>(&flags), 2);
buffer.write(reinterpret_cast<const char *>(&decimals), 1);
writeChar(0x0, 2, buffer);
if (is_comm_field_list_response)
{
/// We should write length encoded int with string size
/// followed by string with some "default values" (possibly it's column defaults).
/// But we just send NULL for simplicity.
writeChar(0xfb, buffer);
}
}
ColumnDefinition getColumnDefinition(const String & column_name, const DataTypePtr & data_type)
{
ColumnType column_type;
CharacterSet charset = CharacterSet::binary;
int flags = 0;
uint8_t decimals = 0;
DataTypePtr normalized_data_type = removeLowCardinalityAndNullable(data_type);
TypeIndex type_index = normalized_data_type->getTypeId();
switch (type_index)
{
case TypeIndex::UInt8:
column_type = ColumnType::MYSQL_TYPE_TINY;
flags = ColumnDefinitionFlags::BINARY_FLAG | ColumnDefinitionFlags::UNSIGNED_FLAG;
break;
case TypeIndex::UInt16:
column_type = ColumnType::MYSQL_TYPE_SHORT;
flags = ColumnDefinitionFlags::BINARY_FLAG | ColumnDefinitionFlags::UNSIGNED_FLAG;
break;
case TypeIndex::UInt32:
column_type = ColumnType::MYSQL_TYPE_LONG;
flags = ColumnDefinitionFlags::BINARY_FLAG | ColumnDefinitionFlags::UNSIGNED_FLAG;
break;
case TypeIndex::UInt64:
column_type = ColumnType::MYSQL_TYPE_LONGLONG;
flags = ColumnDefinitionFlags::BINARY_FLAG | ColumnDefinitionFlags::UNSIGNED_FLAG;
break;
case TypeIndex::Int8:
column_type = ColumnType::MYSQL_TYPE_TINY;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Int16:
column_type = ColumnType::MYSQL_TYPE_SHORT;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Int32:
column_type = ColumnType::MYSQL_TYPE_LONG;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Int64:
column_type = ColumnType::MYSQL_TYPE_LONGLONG;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Float32:
column_type = ColumnType::MYSQL_TYPE_FLOAT;
flags = ColumnDefinitionFlags::BINARY_FLAG;
decimals = 31;
break;
case TypeIndex::Float64:
column_type = ColumnType::MYSQL_TYPE_DOUBLE;
flags = ColumnDefinitionFlags::BINARY_FLAG;
decimals = 31;
break;
case TypeIndex::Date:
case TypeIndex::Date32:
column_type = ColumnType::MYSQL_TYPE_DATE;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::DateTime:
case TypeIndex::DateTime64:
column_type = ColumnType::MYSQL_TYPE_DATETIME;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Decimal32:
case TypeIndex::Decimal64:
column_type = ColumnType::MYSQL_TYPE_DECIMAL;
flags = ColumnDefinitionFlags::BINARY_FLAG;
break;
case TypeIndex::Decimal128: {
// MySQL Decimal has max 65 precision and 30 scale
// Decimal256 (min scale is 39) is higher than the MySQL supported range and handled in the default case
// See path_to_url
const auto & type = assert_cast<const DataTypeDecimal128 &>(*normalized_data_type);
if (type.getPrecision() > 65 || type.getScale() > 30)
{
column_type = ColumnType::MYSQL_TYPE_STRING;
charset = CharacterSet::utf8_general_ci;
}
else
{
column_type = ColumnType::MYSQL_TYPE_DECIMAL;
flags = ColumnDefinitionFlags::BINARY_FLAG;
}
break;
}
default:
column_type = ColumnType::MYSQL_TYPE_STRING;
charset = CharacterSet::utf8_general_ci;
break;
}
return ColumnDefinition(column_name, charset, 0, column_type, flags, decimals);
}
}
}
}
```
|
() is a 1939 Norwegian drama film directed by Leif Sinding, based on a book by Gabriel Scott, that stars Georg Richter and Karin Meyer.
Plot
Albert (Georg Richter) is placed in an orphanage, where he is treated like a slave. He tries to escape, but is caught. Then a young girl, Gunda (Eva Lunde), appears.
Cast
Georg Richter as Albert
Karin Meyer as Albert's mom
Eva Lunde as Gunda
Tryggve Larssen as Flugum, gårdbruker
Kjell Willy Johansen as Anton
Irene Thomsen Lie as Dina
Per Kvist as Mathias
Asbjørn Toms as Lorang
Henry Nyrén as Petter
Harry Braude as Doffen
Aage Johansen as Kalle
P. Steenfeldt Foss as Emil
Ole Johansen as Jakob
Frank Martinsen as Nils
Hans Bille as Inspektøren
Joachim Holst-Jensen as Wollert
Harald Steen as Sjømannen
External links
De vergeløse at the Norwegian Film Institute
De vergeløse at MUBI
1939 films
1939 drama films
Films directed by Leif Sinding
Norwegian drama films
Norwegian black-and-white films
1930s Norwegian-language films
|
```xml
import * as React from 'react';
import { StyleSheet, View } from 'react-native';
import { Snackbar, Button, List, Text, Switch } from 'react-native-paper';
import { PreferencesContext, useExampleTheme } from '..';
import ScreenWrapper from '../ScreenWrapper';
const SHORT_MESSAGE = 'Single-line snackbar';
const LONG_MESSAGE =
'Snackbar with longer message which does not fit in one line';
const SnackbarExample = () => {
const preferences = React.useContext(PreferencesContext);
const theme = useExampleTheme();
const [options, setOptions] = React.useState({
showSnackbar: false,
showAction: true,
showCloseIcon: false,
showLongerMessage: false,
showLongerAction: false,
});
const {
showSnackbar,
showAction,
showCloseIcon,
showLongerMessage,
showLongerAction,
} = options;
const action = {
label: showLongerAction ? 'Toggle Theme' : 'Action',
onPress: () => {
preferences?.toggleTheme();
},
};
return (
<>
<ScreenWrapper contentContainerStyle={styles.container}>
<List.Section title="Snackbar options">
<View style={styles.row}>
<Text>Action button</Text>
<Switch
value={showAction}
onValueChange={() =>
setOptions({ ...options, showAction: !showAction })
}
/>
</View>
{theme.isV3 && (
<View style={styles.row}>
<Text>Close icon button</Text>
<Switch
value={showCloseIcon}
onValueChange={() =>
setOptions({ ...options, showCloseIcon: !showCloseIcon })
}
/>
</View>
)}
<View style={styles.row}>
<Text>Longer message</Text>
<Switch
value={showLongerMessage}
onValueChange={() =>
setOptions({
...options,
showLongerMessage: !showLongerMessage,
})
}
/>
</View>
<View style={styles.row}>
<Text>Longer action</Text>
<Switch
value={showLongerAction}
onValueChange={() =>
setOptions({
...options,
showLongerAction: !showLongerAction,
})
}
/>
</View>
</List.Section>
<View style={styles.wrapper}>
<Button
mode="outlined"
onPress={() =>
setOptions({ ...options, showSnackbar: !showSnackbar })
}
>
{showSnackbar ? 'Hide' : 'Show'}
</Button>
</View>
</ScreenWrapper>
<Snackbar
visible={showSnackbar}
onDismiss={() => setOptions({ ...options, showSnackbar: false })}
action={showAction ? action : undefined}
onIconPress={
showCloseIcon
? () => setOptions({ ...options, showSnackbar: false })
: undefined
}
duration={Snackbar.DURATION_MEDIUM}
style={showLongerAction && styles.longerAction}
>
{showLongerMessage ? LONG_MESSAGE : SHORT_MESSAGE}
</Snackbar>
</>
);
};
SnackbarExample.title = 'Snackbar';
const styles = StyleSheet.create({
container: {
flex: 1,
},
wrapper: {
justifyContent: 'center',
alignItems: 'center',
},
row: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
paddingVertical: 8,
paddingHorizontal: 16,
},
longerAction: {
flexDirection: 'column',
},
});
export default SnackbarExample;
```
|
Charles William Reed (21 March 1912 – 28 July 1964), known as Chick or Charlie Reed, was an English professional footballer who scored 55 goals from 187 appearances in the Football League playing as a forward or left half for Sheffield United, Lincoln City, Southport, Chesterfield, Mansfield Town and Notts County.
Football career
Reed was born in Holbeach, Lincolnshire, and played local football in the Lincolnshire area before a successful trial led to his signing for Sheffield United. He made his debut in the 1931–32 season, and scored his first goal on 19 September 1931 as Sheffield United beat Birmingham 3–1 in the Football League First Division.
After two seasons with Sheffield United, during which he played only twice in the League, Reed moved to Lincoln City, newly promoted to the Second Division. He played in only a third of Lincoln's games in his first season, and around a half in the next, nevertheless finishing as the club's leading scorer for 1933–34, though with only seven goals as the club were relegated to the Third Division North. Reed appeared regularly in 1934–35, and scored 17 goals from only 25 games – only one goal behind Johnny Campbell's tally of 18, which had come from a full season's worth of matches – before leaving the club to join Third Division rivals Southport in February 1935.
A highlight of the year Reed spent at Southport came against Hartlepools United in April 1935 when, after "picking up the ball in his own half, he ran 70 yards and shrugged off four challenges before smacking it into the net to score a memorable goal".
Reed joined Chesterfield for a fee of £800, scored on debut in a 3–1 defeat of Tranmere Rovers, and helped the club to the championship of the Third Division North and consequent promotion. He played regularly in the Second Division until January 1937, when he played his last game for the club, the third-round FA Cup defeat to Arsenal, and returned to non-League football with Spalding United.
He spent the 1937–38 season with Mansfield Town and the next season with Notts County before dropping back into Lincolnshire local football with Pinchbeck and then with Crowland.
Reed died at his home in Spalding, Lincolnshire, in 1964 at the age of 52.
References
1912 births
1964 deaths
People from Holbeach
English men's footballers
Men's association football forwards
Spalding United F.C. players
Sheffield United F.C. players
Lincoln City F.C. players
Southport F.C. players
Chesterfield F.C. players
Mansfield Town F.C. players
Notts County F.C. players
English Football League players
|
```java
package edu.umd.cs.findbugs.detect;
import edu.umd.cs.findbugs.AbstractIntegrationTest;
import edu.umd.cs.findbugs.test.matcher.BugInstanceMatcher;
import edu.umd.cs.findbugs.test.matcher.BugInstanceMatcherBuilder;
import org.junit.jupiter.api.Test;
import static edu.umd.cs.findbugs.test.CountMatcher.containsExactly;
import static org.hamcrest.MatcherAssert.assertThat;
class Issue1498Test extends AbstractIntegrationTest {
@Test
void testIssue() {
System.setProperty("frc.debug", "true");
performAnalysis("ghIssues/Issue1498.class");
BugInstanceMatcher bugTypeMatcher = new BugInstanceMatcherBuilder().bugType("IM_MULTIPLYING_RESULT_OF_IREM").build();
assertThat(getBugCollection(), containsExactly(3, bugTypeMatcher));
}
}
```
|
Sir William Lawrence, 1st Baronet (16 July 1783 – 5 July 1867) was an English surgeon who became President of the Royal College of Surgeons of London and Serjeant Surgeon to the Queen.
In his mid-thirties, he published two books of his lectures which contained pre-Darwinian ideas on man's nature and, effectively, on evolution. He was forced to withdraw the second (1819) book after fierce criticism; the Lord Chancellor ruled it blasphemous. Lawrence's transition to respectability occurred gradually, and his surgical career was highly successful. In 1822, Lawrence was elected a member of the American Philosophical Society in Philadelphia.
Lawrence had a long and successful career as a surgeon. He reached the top of his profession, and just before his death in 1867 the Queen rewarded him with a baronetcy (see Lawrence baronets).
Early life and education
Lawrence was born in Cirencester, Gloucestershire, the son of William Lawrence, the town's chief surgeon and physician, and Judith Wood. His father's side of the family were descended from the Fettiplace family; His great-great-grandfather (also William Lawrence) married Elizabeth Fettiplace, granddaughter of Sir Edmund Fettiplace. His younger brother Charles Lawrence was one of the founding members of the Royal Agricultural College at Cirencester.
He was educated at Elmore Court School in Gloucester. At 15, he was apprenticed to, and lived with, John Abernethy (FRS 1796) for five years.
Career
Surgical career
Said to be a brilliant scholar, Lawrence was the translator of several anatomical works written in Latin, and was fully conversant with the latest research on the continent. He had good looks and a charming manner, and was a fine lecturer. His quality as a surgeon was never questioned. Lawrence helped the radical campaigner Thomas Wakley found the Lancet journal, and was prominent at mass meetings for medical reform in 1826. Elected to the Council of the RCS in 1828, he became its president in 1846, and again in 1855. He delivered their Hunterian Oration in 1834.
During Lawrence's surgical career he held the posts of Professor of Anatomy and Surgery, Royal College of Surgeons (1815–1822); Surgeon to the hospitals of Bridewell and Bethlem, and to the London Infirmary for Diseases of the Eye; Demonstrator of Anatomy, then Assistant Surgeon, later Surgeon, St Bartholomew's Hospital (1824–1865). Later in his career, he was appointed Surgeon Extraordinary, later Serjeant Surgeon, to the Queen. His specialty was ophthalmology, although he practised in and lectured and wrote on all branches of surgery. Pugin and Queen Victoria were among his patients with eye problems.
Shelley and his second wife Mary Shelley consulted him on a variety of ailments from 1814. Mary's novel Frankenstein might have been inspired by the vitalist controversy between Lawrence and Abernethy, and "Lawrence could have guided the couple's reading in the physical sciences". Both Samuel Coleridge and John Keats were also influenced by the vitalist controversy
Despite reaching the height of his profession, with the outstanding quality of his surgical work, and his excellent textbooks, Lawrence is mostly remembered today for an extraordinary period in his early career which brought him fame and notoriety, and led him to the brink of ruin.
Controversy and Chancery
At the age of 30, in 1813, Lawrence was elected a Fellow of the Royal Society. In 1815, he was appointed Professor of Anatomy and Surgery by the College of Surgeons. His lectures started in 1816, and the set was published the same year. The book was immediately attacked by Abernethy and others for materialism, and for undermining the moral welfare of the people. One of the issues between Lawrence and his critics concerned the origin of thoughts and consciousness. For Lawrence, as for ourselves, mental processes were a function of the brain. John Abernethy and others thought differently: they explained thoughts as the product of vital acts of an immaterial kind. Abernethy also published his lectures, which contained his support for John Hunter's vitalism, and his objections to Lawrence's materialism.
In subsequent years Lawrence vigorously contradicted his critics until, in 1819, he published a second book, known by its short title of the Natural history of man. The book caused a storm of disapproval from conservative and clerical quarters for its supposed atheism, and within the medical profession because he advocated a materialist rather than vitalist approach to human life. He was linked by his critics with such other 'revolutionaries' as Thomas Paine and Lord Byron. It was "the first great scientific issue that widely seized the public imagination in Britain, a premonition of the debate over Darwin's theory of evolution by natural selection, exactly forty years later".
Hostility from the established Church of England was guaranteed. "A vicious review in the Tory Quarterly Review execrated his materialist explanation of man and mind"; the Lord Chancellor, Lord Eldon, in the Court of Chancery (1822), ruled his lectures blasphemous, on the grounds that the book contradicted Holy Scripture (the Bible). This destroyed the book's copyright. Lawrence was also repudiated by his own teacher, John Abernethy, with whom he had already had a controversy about John Hunter's teachings. There were supporters, such as Richard Carlile and Thomas Forster, and "The Monthly Magazine", in which Lawrence was compared to Galileo. However, faced with persecution, perhaps prosecution, and certainly ruin through the loss of surgical patients, Lawrence withdrew the book and resigned from his teaching position. The time had not yet arrived when a science which dealt with man as a species could be conducted without interference from the religious authorities.
It is interesting that the Court of Chancery was acting, here, in its most ancient role, that of a court of conscience. This entailed the moral law applied to prevent peril to the soul of the wrongdoer through mortal sin. The remedy was given to the plaintiff (the Crown, in this case) to look after the wrongdoer's soul; the benefit to the plaintiff was only incidental. This is also the explanation for specific performance, which compels the sinner to put matters right. The whole conception is mediæval in origin.
It is difficult to find a present-day parallel. The withholding of copyright, though only an indirect financial penalty, was both an official act and a hostile signal. We do not seem to have a word for this kind of indirect pressure, though suppression of dissent comes closer than censorship. Perhaps the modern 'naming and shaming' comes closest. The importance of respectability, reputation and public standing were critical in this case, as so often in traditional societies.
Transition to respectability
After repudiating his book, Lawrence returned to respectability, but not without regrets. He wrote in 1830 to William Hone, who was acquitted of libel in 1817, explaining his expediency and commending Hone's "much greater courage in these matters".
His last major contribution to the debate was an article on "Life" in the 1819 Rees's Cyclopaedia although this volume had in fact appeared in 1812.
He continued to espouse radical ideas and, led by the famous radical campaigner Thomas Wakley, Lawrence was part of the small group which launched The Lancet, and wrote material for it. Lawrence wrote pungent editorials, and chaired the public meetings in 1826 at the Freemasons' Tavern. He was also co-owner of the Aldersgate Private Medical Academy, with Frederick Tyrrell.
The 1826 meetings
Meetings for members of the college were attended by about 1200 people. The meetings were called to protest against the way surgeons abused their privileges to set student fees and control appointments.
In his opening speech Lawrence criticised the by-laws of the College of Surgeons for preventing all but a few teachers in London, Dublin, Edinburgh, Glasgow and Aberdeen from issuing certificates of attendance at preparatory lectures. He pointed out that Aberdeen and Glasgow had no cadavers for dissection, without which anatomy could not be properly taught.
A proposed change in the regulations of the College of Surgeons would soon cut the ground from under the private summer schools, since diplomas taken in the summer were not to be recognised.
"It would appear from the new regulations that sound knowledge was the sort acquired in the winter, when the hospital lecturers delivered their courses, while unsound knowledge was imparted in the summer when only the private schools could provide the instruction". Lawrence in his opening speech, Freemason's Tavern, 1826.
Lawrence concluded by protesting against the exclusion of the great provincial teachers from giving recognised certificates.
Gradual change
However, gradually Lawrence conformed more to the style of the College of Surgeons, and was elected to their Council in 1828. This somewhat wounded Wakley, who complained to Lawrence, and made some remarks in the Lancet. But, true to form, Wakley soon saw Lawrence's rise in the college as providing him with an inside track into the working of the institution he was hoping to reform. For some years Lawrence hunted with the Lancet and ran with the college. From the inside, Lawrence was able to help forward several of the much-needed reforms espoused by Wakley. The College of Surgeons was at last reformed, to some extent at least, by a new charter in 1843.
This episode marks Lawrence's return to respectability; in fact, Lawrence succeeded Abernethy as the 'dictator' of Bart's.
His need for respectability and worldly success might have been influenced by his marriage in 1828, at the age of 45, to the 25-year-old socially ambitious Louisa Senior.
At any rate, from then on Lawrence's career went ever forward. He never looked back: he became President of the Royal College of Surgeons, and Serjeant-Surgeon to Queen Victoria. Before he died she made him a baronet. He had for many years declined such honours, and family tradition was that he finally accepted to help his son's courtship of an aristocratic young woman (which did not succeed). "Never again [did] he venture to express his views on the processes of evolution, on the past or the future of man." He did, however, warn the young T.H. Huxley – in vain, it must be said – not to broach the dangerous topic of the evolution of man.
In 1844 Carl Gustav Carus, the physiologist and painter, made "a visit to Mr Lawrence, author of a work on the "Physiology of Man" which had interested me much some years ago, but which had rendered the author obnoxious to the clergy... He appears to have allowed himself to be frightened by this, and is now merely a practising surgeon, who keeps his Sunday in the old English fashion, and has let physiology and psychology alone for the present. I found him a rather dry, but honest man". Looking back in 1860 on his controversies with Abernethy, Lawrence wrote of "events which though important at the time of occurrence have long ceased to occupy my thoughts".
In 1828, he was elected a foreign member of the Royal Swedish Academy of Sciences and in 1855 a Foreign Honorary Member of the American Academy of Arts and Sciences.
Darwin
The careful anonymity in which the Vestiges of the Natural History of Creation was published in 1844, and the very great caution shown by Darwin in publishing his own evolutionary ideas, can be seen in the context of the need to avoid a direct conflict with the religious establishment. In 1838 Darwin referred in his "C" transmutation notebook to a copy of Lawrence's "Lectures on physiology, zoology, and the natural history of man", and historians have speculated that he brooded about the implied consequences of publishing his own ideas.
In Lawrence's day the impact of laws on sedition and blasphemy were even more threatening than they were in Darwin's time. Darwin referred to Lawrence (1819) six times in his Descent of man (1871).
Lawrence's Natural history of man contained some remarkable anticipations of later thought, but was ruthlessly suppressed. To this day, many historical accounts of evolutionary ideas do not mention Lawrence's contribution. He is omitted, for example, from many of the Darwin biographies, from some evolution textbooks, essay collections, and even from accounts of pre-Darwinian science and religion.
Although the only idea of interest which Darwin found in Lawrence was that of sexual selection in man, the influence on Alfred Russel Wallace, was more positive. Wallace "found in Lawrence a possible mechanism of organic change, that of spontaneous variation leading to the formation of new species".
Context
Lawrence was one of three British medical men who wrote on evolution-related topics from 1813 to 1819. They would all have been familiar with Erasmus Darwin and Lamarck at least; and probably also Malthus. Two (Prichard and Lawrence) dedicated their works to Blumenbach, the founder of physical anthropology. "The men who took up the challenge of Lamarck were three English physicians, Wells, Lawrence and Prichard... All three men denied soft heredity (Lamarckism)" This account is not too accurate in biographical terms, as Lawrence was actually a surgeon, Wells was born in Carolina to a Scottish family, and Prichard was a Scot. However, it is correct in principle on the main issue. Each grasped aspects of Darwin's theory, yet none saw the whole picture, and none developed the ideas any further. The later publication of Robert Chambers' Vestiges and Matthew's Naval timber was more explicit; the existence of the whole group suggests there was something real (though intangible) about the intellectual atmosphere in Britain which is captured by the phrase 'evolution was in the air'.
The years 1815–1835 saw much political and social turmoil in Britain, not least in the medical profession. There were radical medical students and campaigners in both Edinburgh and London, the two main training centres for the profession at the time. Many of these were materialists who held views favouring evolution, but of a Lamarckian or Geoffroyan kind. It is the allegiance to hard inheritance or to natural selection which distinguishes Lawrence, Prichard and Wells, because those ideas have survived, and are part of the present-day account of evolution.
Lawrence on heredity
The existence of races is a token of change in the human species, and suggests there is some significance in geographical separation. Lawrence noted that racial characteristics were inherited, not caused by the direct effect of, for instance, climate. As an example, he considered the way skin colour was inherited by children of African origin when born in temperate climates: how their colour developed without exposure to the sun, and how this continued through generations. This was evidence against the direct effect of climate.
Lawrence's ideas on heredity were many years ahead of their time, as this extract shows: "The offspring inherit only [their parents'] connate peculiarities and not any of the acquired qualities". This is as clear a rejection of soft inheritance as one can find. However, Lawrence qualified it by including the origin of birth defects owing to influences on the mother (an old folk superstition). So Mayr places Wilhelm His, Sr. in 1874 as the first unqualified rejection of soft inheritance. However, the number of places in the text where Lawrence explicitly rejects the direct action of the environment on heredity justifies his recognition as an early opponent of Geoffroyism.
Darlington's interpretation
Here, as seen by Cyril Darlington, are some of the ideas presented by Lawrence in his book, much abbreviated and rephrased in more modern terms:
Mental as well as physical differences in man are inherited.
Races of man have arisen by mutations such as may be seen in litters of kittens.
Sexual selection has improved the beauty of advanced races and governing classes.
The separation of races preserves their characters.
'Selections and exclusions' are the means of change and adaptation.
Men can be improved by selection in breeding just as domesticated cattle can be. Conversely, they can be ruined by inbreeding, a consequence which can be observed in many royal families.
Zoological study, the treatment of man as an animal, is the only proper foundation for teaching and research in medicine, morals, or even in politics.
Darlington's account goes further than other commentators. He seems to credit Lawrence with a modern appreciation of selection (which he definitely did not have); subsequently, Darlington's account was criticised as an over-statement. Darlington does not claim Lawrence actually enunciated a theory of evolution, though passages in Lawrence's book do suggest that races were historically developed. On heredity and adaptation, and the rejection of Lamarckism (soft inheritance), Lawrence is quite advanced.
Content of the second book
The introductory sections
Lecture I: introductory to the lectures of 1817.Reply to the charges of Mr Abernethy; Modern history and progress of comparative anatomy.
This follows the first publication of Lawrence's ideas in 1816, and Abernethy's criticism of them in his lectures for 1817.
"Gentlemen! I cannot presume to address you again... without first publicly clearing myself from a charge publicly made... of propagating opinions detrimental to society... for the purpose of loosening those restraints, on which the welfare of mankind depends."*[footnote] Physiological lectures, exhibiting a general view of Mr Hunter's Physiology &c &c. by John Abernethy FRS. [references] "too numerous to be particularized." This book of lectures at the same College of Surgeons contained the charge of which Lawrence complained. In this very long footnote Lawrence says that the elementary anatomy in Abernethy's text is used "like water in a medical prescription... an innocent vehicle for the more active ingredients."
The early part of the 1819 book is marked by Lawrence's reaction to Abernethy's attack on the 'materialism' of the first book. After a long preamble, in which Lawrence extols the virtues of freedom of speech, he eventually gets to the point:
"It is alleged that there is a party of modern sceptics, co-operating in the diffusion of these noxious opinions with a no less terrible band of French physiologists, for the purpose of demoralising mankind! Such is the general tenor of the accusation..." p3
"Where, Gentlemen! shall we find proofs of this heavy charge? p4 I see the animal functions inseparable from the animal organs... examine the mind... Do we not see it actually built up before our eyes by the actions of the five external senses, and of the gradually developed internal faculties? p5 (see also p74-81 on the functions of the brain)I say, physiologically speaking... because the theological doctrine of the soul, and its separate existence, has nothing to do with this physiological question, but rests on a species of proof altogether different." p6
Lawrence is here arguing that medical questions should be answered by medical evidence, in other words, he is arguing for rational thought and empiricism instead of revelation or received religion. In particular, he insisted that mental activity was produced as a function of the brain, and has nothing to do with metaphysical concepts such as the 'soul'. Also, there is an implication, never quite stated, that Abernethy's motive might be venal; that jealousy (for example) might be revealed by "a consideration of the real motives" (phrase from his long initial footnote). It is absolutely clear that the conflict predates the publication of Lawrence's book.
Evidence from geology and palaeontology
The discussion drawn from stratigraphy is interesting:
"The inferior layers, or the first in order of time, contain the remains most widely different from the animals of the living creation; and as we advance to the surface there is a gradual approximation to our present species." p39
Refers to Cuvier, Brongniart and Lamarck in France, and Parkinson in Britain in connection with fossils:
"... the extinct races of animals... those authentic memorials of beings... whose living existence... has been supposed, with considerable probability, to be of older date than the formation of the human race." p39
Summary of ideas on human races
Chapter VII raises the issue of whether different races have similar diseases (p162 et seq) and ends with a list of reasons for placing man in one distinct species. The reasons are mostly anatomical with some behavioural, such as speech. They remain valid today.
Next there is a lengthy discussion of variation in man, and of the differences between races. Then he considers causation. Lectures of 1818, Chapter IX: On the causes of the varieties of the human species:
"Having examined the principal points in which the several tribes of the human species differ from each other... I proceed to inquire whether the diversities enumerated ... are to be considered as characteristic distinctions coeval with the origin of the species, or as a result of subsequent variation; and in the event of the latter... whether they are the effect of external... causes, or of native or congenital variety." p343 "Great influence has at all times been ascribed to climate... [but] we have abundance of proof that [differences of climate] are entirely inadequate to account for the differences between the different races of men. p343–4
He shows clearly in several places that differences between races (and between varieties of domesticated animals) are inherited, and not caused by the direct action of the environment; then follows this admission:
"We do not understand the exact nature of the process by which it [meaning the correspondence between climate and racial characteristics] is effected." p345
So, after insisting on empirical (non-religious) evidence, he has clearly rejected Lamarckism but has not thought of natural selection.
Ideas on mechanism
Although in places Lawrence disclaims all knowledge of how the differences between races arose, elsewhere there are passages which hint at a mechanism. In Chapter IX, for example, we find:
"These signal diversities which constitute differences of race in animals... can only be explained by two principles... namely, the occasional production of an offspring with different characters from those of the parents, as a native or congenital variety; [ie heritable] and the propagation of such varieties by generation." p348 [continues with examples of heritable variety in offspring in one litter of kittens, or sheep. This is Mendelian inheritance and segregation]
Passages like this are interpreted by Darlington in his first two points above; there is more on variety and its origin in Chapter IV, p67-8. It is clear that Lawrence's understanding of heredity was well ahead of his time, (ahead of Darwin, in fact) and that he only lacks the idea of selection to have a fully-fledged theory of evolution.
Introduction of the word biology
At least five people have been claimed as the first to use the word biology:
Michael Christoph Hanov (Philosophiae naturalis sive physicae dogmaticae: Geologia, biologia, phytologia generalis et dendrologia, 1767)
Karl Friedrich Burdach (in 1800)
Gottfried Reinhold Treviranus (Biologie oder Philosophie der lebenden Natur, 1802). Treviranus used it to apply to the study of human life and character.
Jean-Baptiste Lamarck (Hydrogéologie, 1802, p. 8)
Lawrence, in 1819. According to the OED, Lawrence was the first person to use the word in English.
Contradiction of the Bible
Direct contradiction of the Bible was something Lawrence might have avoided, but his honesty and forthright approach led him onto this dangerous ground:
"The representations of all the animals being brought before Adam in the first instance and subsequently of their being collected in the ark... are zoogically impossible." p169
"The entire or even partial inspiration of the... Old Testament has been, and is, doubted by many persons, including learned divines and distinguished oriental and biblical scholars. The account of the creation and of subsequent events, has the allegorical character common to eastern compositions..." p168-9 incl. footnotes.
"The astronomer does not portray the heavenly motions, or lay down the laws which govern them, according to the Jewish scriptures [Old Testament] nor does the geologist think it necessary to modify the results of experience according to the contents of the Mosaic writings. I conclude then, that the subject is open for discussion." p172
Passages such as these, fully in the tradition of British empiricism and the Age of Enlightenment, were no doubt pointed out to the Lord Chancellor. In his opinion, the subject was not open for discussion.
Ealing Park
In June 1838, Lawrence purchased the Ealing Park mansion along with the surrounding 100 acres known as "Little Ealing" (then in Middlesex) at a purchase price of £9,000 (). Ealing Park is described by Pevsner as "Low and long; nine bays with pediment over the centre and an Ionic one-storeyed colonnade all along." The property was grandly furnished, as may be seen from the catalogue of the sale of the contents after her death. The estate boasted livestock, including poultry of all sorts, cows, sheep and pigs. There were thousands of bedding plants, "tove plants, more than 600 plants in early forcing houses, nearly a hundred camellias, and more.
However, they mainly lived on Whitehall Place in City of Westminster. His son later sold Ealing Park.
Personal life and family
On 4 August 1823, Lawrence married Louisa Senior (1803–1855), the daughter of a Mayfair haberdasher, who built up social fame through horticulture. They had two sons and three daughters. Their elder son died in childhood but their second son, Sir Trevor Lawrence, 2nd Baronet, was himself a prominent horticulturist and was for many years President of the Royal Horticultural Society. One daughter died at age 18 months and the other two died unmarried.
William James (10 October 1829 – buried 5 November 1839)
John James Trevor (30 December 1831 – 22 December 1913)
Mary Louisa (28 August 1833 – buried 7 March 1835)
Louisa Elizabeth (22 February 1836 – 4 January 1920)
Mary Wilhelmina (1 November 1839 – 24 November 1920)
Louisa Lawrence died 14 August 1855. Lawrence suffered an attack of apoplexy whilst descending the stairs at the College of Surgeons and died on 5 July 1867 at his house, 18 Whitehall Place, London.
References
Bibliography
Lawrence, William FRS 1816. An introduction to the comparative anatomy and physiology, being the two introductory lectures delivered at the Royal College of Surgeons on the 21st and 25th of March 1816. J. Callow, London. 179pp. [Chapter 2 'On life' was the start of his troubles, and caused the first attacks of the grounds of materialism &c]
Lawrence, William FRS 1819. Lectures on physiology, zoology and the natural history of man. J. Callow, London. 579pp. Reprinted 1822. There were a number of unauthorized reprints of this work, pirated (in the sense that the author went unrecompensed) but seemingly unexpurgated. These editions also lacked the protection of copyright, and date from 1819 to 1848. Some of them were by quite respectable publishers. Desmond's view is that the Chancery decision was "a ringing endorsement to atheist ears. Six pauper presses pirated the offending book, keeping it in print for decades. As a result, although officially withdrawn, Lawrence's magnum opus could be found on every dissident's bookshelf." Desmond & Moore 1991. Darwin p253. The text of all editions is probably identical, though no-one has published a full bibliographical study.
1822 W. Benbow. 500pp. Darwin's copy was of this edition.
1822 Kaygill & Price (no plates). 2 vols, 288+212pp.
1823 J&C Smith (new plates). 532pp.
1838 J. Taylor. ('twelve new engravings'; seventh edition – stereotyped). 396pp.
1844 J. Taylor (old plates; 'ninth edition – stereotyped). 396pp.
1848 H.G. Bohn (ninth edition, as above).
The British Library also holds a number of pamphlets, mostly attacking Lawrence's ideas.
Lawrence, William FRS 1807. Treatise on hernia. Callow, London. Later editions from 1816 entitled Treatise on ruptures: an anatomical description of each species with an account of its symptoms, progress, and treatment. 5th and last ed 1858. "The standard text for many years" Morton, A medical bibliography #3587.
[Lawrence, William] 1819. 'Life', an anonymous article in Abraham Rees' Cyclopaedia, vol 22. Longman, London.
Lawrence, W. 1833. A treatise on the diseases of the eye. Churchill, London. This work is based on lectures delivered at the London Ophthalmic Infirmary; later edition 1845. "He did much to advance the surgery of the eye. This comprehensive work marks an epoch in ophthalmic surgery." Morton, A medical bibliography #5849.
Lawrence, William 1834. The Hunterian Oration, delivered at the Royal College of Surgeons on the 14th of February 1834. Churchill, London.
Lawrence, William 1863. Lectures on surgery. London.
External links
Biography in Plarr's Lives of the Fellows Online
1783 births
1867 deaths
People from Cirencester
Fellows of the American Academy of Arts and Sciences
Fellows of the Royal Society
English zoologists
English surgeons
Proto-evolutionary biologists
Fellows of the Royal College of Surgeons of England
Members of the Royal Swedish Academy of Sciences
501
William
19th-century English writers
|
The 2010 Women's Four Nations Cup was the second Hockey Four Nations Cup, an international women's field hockey tournament, consisting of a series of test matches. It was held in Germany, from June 25 to 27, 2010, and featured four of the top nations in women's field hockey.
Competition format
The tournament featured the national teams of Argentina, India, Ireland, and the hosts, Germany, competing in a round-robin format, with each team playing each other once. Three points will be awarded for a win, one for a draw, and none for a loss.
Officials
The following umpires were appointed by the International Hockey Federation to officiate the tournament:
Stella Bartlema (NED)
Lynn Cowie-McAlister (AUS)
Michelle Meister (GER)
Carol Metchette (IRE)
Anupama Puchimanda (IND)
Results
All times are local (Central European Time).
Fixtures
Statistics
Goalscorers
References
External links
Tournament Website
2010
2010 in women's field hockey
field hockey
field hockey
|
Svarte is a locality situated in Ystad Municipality, Skåne County, Sweden with 902 inhabitants in 2010.
References
Populated places in Ystad Municipality
Populated places in Skåne County
|
```javascript
module.exports = {
getMeta: function(meta) {
function getAttr(attr) {
var root = meta.dc || meta.dcterms, key;
if (root && root[attr]) {
for(key in root) {
if (key == attr) {
return root[key];
}
}
}
for(key in meta) {
var bits = key.split('.');
if (bits.length > 1) {
var b0 = bits[0];
var b1 = bits.slice(1).join('.');
if ((b0 == "dcterms" || b0 == "dc") && b1 == attr) {
return meta[key];
}
}
}
}
return {
title: getAttr("title"),
description: getAttr("description"),
author: getAttr("creator"),
date: getAttr("date") || getAttr("date.issued") || getAttr("created") || getAttr("modified")
};
}
};
```
|
```vue
<template>
<div class="card">
<div class="card-header text-bg-primary">
<div class="d-flex align-items-center">
<div class="flex-fill">
<h5 class="card-title">
{{ $gettext('Microphone') }}
</h5>
</div>
<div class="flex-shrink-0 ps-3">
<volume-slider v-model.number="trackGain" />
</div>
</div>
</div>
<div class="card-body">
<div class="d-flex align-items-center">
<div class="d-flex-shrink-0">
<div class="control-group">
<div class="btn-group btn-group-sm">
<button
type="button"
class="btn btn-danger"
:class="{ active: isPlaying }"
@click="togglePlaying"
>
<icon :icon="IconMic" />
</button>
<button
type="button"
class="btn"
:class="{ 'btn-primary': trackPassThrough }"
@click="trackPassThrough = !trackPassThrough"
>
{{ $gettext('Cue') }}
</button>
</div>
</div>
</div>
<div class="flex-fill ps-3">
<div class="form-group microphone-entry mb-0">
<label
for="select_microphone_source"
class="mb-2"
>
{{ $gettext('Microphone Source') }}
</label>
<div class="controls">
<select
id="select_microphone_source"
v-model="device"
class="form-control"
>
<option
v-for="device_row in audioInputs"
:key="device_row.deviceId"
:value="device_row.deviceId"
>
{{ device_row.label }}
</option>
</select>
</div>
</div>
</div>
</div>
<div
v-if="isPlaying"
class="mt-3"
>
<div class="progress mb-2">
<div
class="progress-bar"
:style="{ width: volume+'%' }"
/>
</div>
</div>
</div>
</div>
</template>
<script setup lang="ts">
import Icon from '~/components/Common/Icon.vue';
import VolumeSlider from "~/components/Public/WebDJ/VolumeSlider.vue";
import {useDevicesList} from "@vueuse/core";
import {ref, watch} from "vue";
import {useWebDjTrack} from "~/components/Public/WebDJ/useWebDjTrack";
import {usePassthroughSync} from "~/components/Public/WebDJ/usePassthroughSync";
import {useWebDjSource} from "~/components/Public/WebDJ/useWebDjSource";
import {IconMic} from "~/components/Common/icons";
const {
source,
isPlaying,
trackGain,
trackPassThrough,
volume,
prepare,
stop
} = useWebDjTrack();
const {
createMicrophoneSource
} = useWebDjSource();
usePassthroughSync(trackPassThrough, 'microphone');
const {audioInputs} = useDevicesList({
requestPermissions: true,
constraints: {audio: true, video: false}
});
const device = ref(null);
watch(audioInputs, (inputs) => {
if (device.value === null) {
device.value = inputs[0]?.deviceId;
}
});
let destination = null;
const createSource = () => {
if (source.value != null) {
source.value.disconnect(destination);
}
createMicrophoneSource(device.value, (newSource) => {
source.value = newSource;
newSource.connect(destination);
});
};
watch(device, () => {
if (source.value === null || destination === null) {
return;
}
createSource();
});
const play = () => {
destination = prepare();
createSource();
}
const togglePlaying = () => {
if (isPlaying.value) {
stop();
} else {
play();
}
}
</script>
```
|
The 1811–1812 New Madrid earthquakes () were a series of intense intraplate earthquakes beginning with an initial earthquake of moment magnitude 7.2–8.2 on December 16, 1811, followed by a moment magnitude 7.4 aftershock on the same day. Two additional earthquakes of similar magnitude followed in January and February 1812. They remain the most powerful earthquakes to hit the contiguous United States east of the Rocky Mountains in recorded history. The earthquakes, as well as the seismic zone of their occurrence, were named for the Mississippi River town of New Madrid, then part of the Louisiana Territory and now within the U.S. state of Missouri.
The epicenters of the earthquakes were located in an area that at the time was at the distant western edge of the American frontier, only sparsely settled by European settlers. Contemporary accounts have led seismologists to estimate that these stable continental region earthquakes were felt strongly throughout much of the central and eastern United States, across an area of roughly , and moderately across nearly 3 million km2 (1 million sq mi). The 1906 San Francisco earthquake, by comparison, was felt moderately over roughly . The New Madrid earthquakes were interpreted variously by American Indian tribes, but one consensus was universally accepted: the powerful earthquake had to have meant something. For many tribes in Tecumseh's pan-Indian alliance, it meant that Tecumseh and his brother the Prophet must be supported.
The three earthquakes and their major aftershocks
December 16, 1811, 8:15 UTC (2:15 am local time): M 7.2–8.2, epicenter in what is now northeast Arkansas. It caused only slight damage to man-made structures, mainly because of the sparse population in the epicentral area. The future location of Memphis, Tennessee, experienced level IX shaking on the Mercalli intensity scale. A seismic seiche propagated upriver, and Little Prairie (a village that was on the site of the former Fort San Fernando, near the site of present-day Caruthersville, Missouri) was heavily damaged by soil liquefaction. Modified Mercalli intensity VII or greater was observed over a area.
December 16, 1811 (aftershock), 14:15 UTC (8:15 am local time): M 7.4, epicenter in northeast Arkansas. This shock followed the first earthquake by six hours and was similar in intensity.
January 23, 1812, 15:00 UTC (9:00 am local time): M 7.0–8.0, epicenter in the Missouri Bootheel. The meizoseismal area was characterized by general ground warping, ejections, fissuring, severe landslides, and caving of stream banks. Johnston and Schweig attributed this earthquake to a rupture on the New Madrid North Fault. This may have placed strain on the Reelfoot Fault.
February 7, 1812, 9:45 UTC (3:45 am local time): M 7.4–8.6, epicenter near New Madrid, Missouri. The town of New Madrid was destroyed. In St. Louis, Missouri, many houses were severely damaged and their chimneys were toppled. This shock was definitively attributed to the Reelfoot Fault by Johnston and Schweig. Uplift along a segment of this reverse fault created temporary waterfalls on the Mississippi at Kentucky Bend, created waves that propagated upstream, and caused the formation of Reelfoot Lake by obstructing streams in what is now Lake County, Tennessee. The maximum Modified Mercalli intensity observed was XII.
The many more aftershocks include one magnitude 7 aftershock to the December 16, 1811, earthquake which occurred at 6:00 UTC (12:00 am local time) on December 17, 1811, and one magnitude 7 aftershock to the February 7, 1812, earthquake which occurred on the same day at 4:40 UTC (10:40 pm local time). Susan Hough, a seismologist of the United States Geological Survey (USGS), has estimated the earthquakes' magnitudes as around magnitude 7.
Eyewitness accounts
John Bradbury, a fellow of the Linnean Society, was on the Mississippi on the night of December 15, 1811, and describes the tremors in great detail in his Travels in the Interior of America in the Years 1809, 1810 and 1811, published in 1817:
Eliza Bryan in New Madrid, Territory of Missouri, wrote the following eyewitness account in March 1812:
John Reynolds (1788–1865), the fourth governor of Illinois, among other political posts, mentions the earthquake in his biography My Own Times: Embracing Also the History of My Life (1855):
The Shaker diarist Samuel Swan McClelland described the effects of the earthquake on the Shaker settlement at West Union (Busro), Indiana, where the earthquakes contributed to the temporary abandonment of the westernmost Shaker community.
Geologic setting
The underlying cause of the earthquakes is not well understood, but modern faulting seems to be related to an ancient geologic feature buried under the Mississippi River alluvial plain, known as the Reelfoot Rift. The New Madrid Seismic Zone (NMSZ) is made up of reactivated faults that formed when what is now North America began to split or rift apart during the breakup of the supercontinent Rodinia in the Neoproterozoic era (about 750 million years ago). Faults were created along the rift and igneous rocks formed from magma that was being pushed towards the surface. The resulting rift system failed, but has remained as an aulacogen (a scar or zone of weakness) deep underground.
In recent decades, minor earthquakes have continued. The epicenters of over 4,000 earthquakes can be identified from seismic measurements taken since 1974. They originate from the seismic activity of the Reelfoot Rift. The zone, colored red on the map, is called the New Madrid Seismic Zone. New forecasts estimate a 7 to 10 percent chance, in the next 50 years, of a repeat of a major earthquake like those that occurred in 1811–1812, which likely had magnitudes between 7.6 and 8.0. A 25 to 40% chance exists, in a 50-year time span, of a magnitude 6.0 or greater earthquake.
In a report filed in November 2008, the U.S. Federal Emergency Management Agency warned that a serious earthquake in the New Madrid Seismic Zone could result in "the highest economic losses due to a natural disaster in the United States", further predicting "widespread and catastrophic" damage across Alabama, Arkansas, Illinois, Indiana, Kentucky, Mississippi, Missouri, and particularly Tennessee, where a 7.7 magnitude quake or greater would cause damage to tens of thousands of structures affecting water distribution, transportation systems, and other vital infrastructure.
Aftermath
The quakes caused extensive changes to the region's topography. Subsidence, uplift, fissures, landslides and riverbank collapses were common. Trees were uprooted by the intense shaking; others were drowned when subsided land was flooded. Reelfoot Lake was formed in Tennessee by subsidence ranging from 1.5 meters, up to 6 meters in some places. Lake St. Francis, in eastern Arkansas, was expanded by subsidence, with sand and coal being ejected from fissures in the adjacent swamps as water levels rose by 8 to 9 meters. Waves from the Mississippi River caused boats to wash ashore; river banks rose, sand bars were destroyed, and some islands completely disappeared. Sand blows also occurred in Missouri, Tennessee, and Arkansas, destroying farmland.
Due to the nature of the underlying rock mass, which contains few fractures or faults, the seismic waves generated from the earthquakes were able to travel great distances without being interrupted. Persons as far away as Canada felt the ground shaking. Intense effects were widely felt in Illinois, Arkansas, Tennessee, Kentucky and Missouri.
The number of people who died as a result of the earthquake is unknown; the frontier setting meant that the region was sparsely populated and communications and records were poor. Predominantly wood construction meant that few people died from falling buildings, though the intense shaking caused many chimneys to fall, wood structures to crack, and damage from falling trees, particularly in the epicentral area during the first earthquake on December 16, 1811.
Rated at VII on the Mercalli Intensity Scale, the New Madrid earthquakes remain the strongest recorded earthquakes east of the Rocky Mountains. The Reelfoot Rift, a 500 million year old rift zone identified as the primary driver of the quakes, remains poorly understood; however, geologists estimate the risk of another earthquake as great as the New Madrid Earthquake within the next 50 years is 7 to 10 percent. While the risk of a smaller magnitude earthquake to occur in this location in the next 50 years is about 25 to 40 percent. As a result of these findings, highways, buildings, skyscrapers, and bridges were all reevaluated.
The earthquakes strengthened the Shawnee prophet, Tenskwatawa after the defeat at the Battle of Tippecanoe and the destruction of Prophetstown, with local Native Americans seeing it as a vindication of his teachings.
Gallery
See also
List of earthquakes in the United States
References
Further reading
Jay Feldman. When the Mississippi Ran Backwards : Empire, Intrigue, Murder, and the New Madrid Earthquakes Free Press, 2005.
Conevery Bolton Valencius, The Lost History of the New Madrid Earthquakes The University of Chicago Press, 2013.
Conevery Bolton Valencius, "Accounts of the New Madrid earthquakes: personal narratives and seismology over the last two centuries," in Deborah R. Coen, ed., "Witness to Disaster: Earthquakes and Expertise in Comparative Perspective," *Science in Context*, 25, no. 1 (February 2012): 17–48.
External links
The Enigma of the New Madrid Earthquakes of 1811–1812. Johnson, A.C. and Schweig, E.S. (1996) Annual Review of Earth and Planetary Sciences, Volume 24, pp. 339–384. SAO/NASA Astrophysics Data System (ADS)
The Introduction to The Lost History of the New Madrid Earthquakes by Conevery Bolton Valencius.
The New Madrid Fault Zone (NMFZ) (links to maps, history, predictions, etc. from the Arkansas Center for Earthquake Education)
Steamboat Adventure: The New Madrid Earthquakes (dozens of contemporary accounts of the earthquake, provided by Hanover College)
USGS, Summary of 1811-1812 New Madrid Earthquake Sequence, at https://www.usgs.gov/natural-hazards/earthquake-hazards/science/summary-1811-1812-new-madrid-earthquakes-sequence?qt-science_center_objects=0#qt-science_center_objects
The "Hard Shock:" The New Madrid Earthquakes. The History Guy
1811 earthquakes
1812 earthquakes
Earthquakes in the United States
Natural disasters in Arkansas
Natural disasters in Illinois
Natural disasters in Kentucky
Natural disasters in Missouri
Natural disasters in Mississippi
Natural disasters in Tennessee
December 1811 events
January 1812 events
February 1812 events
1812 in Missouri Territory
|
The Voice of Bugle Ann is a 1936 American drama film directed by Richard Thorpe and starring Lionel Barrymore and Maureen O'Sullivan. It was based on a novel of the same name by MacKinlay Kantor.
Plot synopsis
The countrymen in the hills of Missouri take the hounds on night fox hunts. This goes on until Jacob Terry comes into the county and decides to raise sheep and install a woven wire fence. This upsets the neighbors, as they are concerned about the dogs entering his fences and terrorizing the sheep. Jacob vows to shoot any dogs or people that he finds on his land. Bengy Davis is in love with Camden Terry and that alone causes problems. But when the hound, Bugle Ann is missing one night, both sides are out with guns to settle the score.
Cast
Lionel Barrymore as Spring Davis
Maureen O'Sullivan as Camden Terry
Eric Linden as Benjy Davis
Dudley Digges as Jacob Terry
Spring Byington as Ma Davis
Charley Grapewin as Cal Royster
Henry Wadsworth as Bake Royster
William Newell as Mr. Tanner
James Macklin as Del Royster
Jonathan Hale as District Attorney
Frederick Burton as The warden
See also
Lionel Barrymore filmography
Production dates
25 November—30 December 1935
References
External links
1936 films
American drama films
Films based on American novels
Films directed by Richard Thorpe
Metro-Goldwyn-Mayer films
1936 drama films
American black-and-white films
Films based on works by MacKinlay Kantor
1930s American films
|
Bozcayurt, formerly known as Mandama (Greek: Μαντάμα), is a village in the Güzelyurt District, Aksaray Province, Turkey. Its population is 355 (2021).
References
Villages in Güzelyurt District, Aksaray
|
2007 World Cup may refer to:
Alpine skiing: 2007 Alpine Skiing World Cup
American football: 2007 IFAF World Cup in Japan
Baseball: 2007 Baseball World Cup in Taiwan
Biathlon: 2007 Biathlon World Cup
Bobsleigh: 2007 Bobsleigh World Cup
Cricket: 2007 Cricket World Cup hosted by the West Indies
Cricket: ICC World Twenty20 hosted by South Africa
Cross-country skiing: 2006-2007 Cross-Country Skiing World Cup
Cycling (track): 2007 UCI Track Cycling World Cup Classics
Cyclo-cross: 2006/07 UCI Cyclo-cross World Cup
Football (soccer): 2007 FIFA Club World Cup in Japan
Football (soccer): 2007 FIFA U-17 World Cup in South Korea
Football (soccer): 2007 FIFA U-20 World Cup in Canada
Football (soccer): 2007 FIFA Women's World Cup in China
Freestyle skiing: 2007 Freestyle Skiing World Cup
Golf: 2007 Omega Mission Hills World Cup
Luge: 2007 Luge World Cup
Nordic combined: 2007 Nordic Combined World Cup
Rugby Union: 2007 Rugby World Cup in France
Short track: 2007 Short Track Speed Skating World Cup
Skeleton: 2007 Skeleton World Cup
Ski jumping: 2007 Ski Jumping World Cup
Snowboarding: 2007 Snowboarding World Cup
Speed skating: 2007 Speed Skating World Cup
Speedway: 2007 Speedway World Cup
Volleyball: 2007 FIVB Women's World Cup
See also
2007 World Championships (disambiguation)
2007 World Junior Championships (disambiguation)
2007 Continental Championships (disambiguation)
|
```c
/* Test of <signal.h> substitute.
This program is free software: you can redistribute it and/or modify
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
along with this program. If not, see <path_to_url */
/* Written by Eric Blake <ebb9@byu.net>, 2009. */
#include <config.h>
#include <signal.h>
/* Check for required types. */
struct
{
size_t a;
uid_t b;
volatile sig_atomic_t c;
sigset_t d;
pid_t e;
#if 0
/* Not guaranteed by gnulib. */
pthread_t f;
struct timespec g;
#endif
} s;
/* Check that NSIG is defined. */
int nsig = NSIG;
int
main (void)
{
switch (0)
{
/* The following are guaranteed by C. */
case 0:
case SIGABRT:
case SIGFPE:
case SIGILL:
case SIGINT:
case SIGSEGV:
case SIGTERM:
/* The following is guaranteed by gnulib. */
#if GNULIB_SIGPIPE || defined SIGPIPE
case SIGPIPE:
#endif
/* Ensure no conflict with other standardized names. */
#ifdef SIGALRM
case SIGALRM:
#endif
/* On Haiku, SIGBUS is mistakenly equal to SIGSEGV. */
#if defined SIGBUS && SIGBUS != SIGSEGV
case SIGBUS:
#endif
#ifdef SIGCHLD
case SIGCHLD:
#endif
#ifdef SIGCONT
case SIGCONT:
#endif
#ifdef SIGHUP
case SIGHUP:
#endif
#ifdef SIGKILL
case SIGKILL:
#endif
#ifdef SIGQUIT
case SIGQUIT:
#endif
#ifdef SIGSTOP
case SIGSTOP:
#endif
#ifdef SIGTSTP
case SIGTSTP:
#endif
#ifdef SIGTTIN
case SIGTTIN:
#endif
#ifdef SIGTTOU
case SIGTTOU:
#endif
#ifdef SIGUSR1
case SIGUSR1:
#endif
#ifdef SIGUSR2
case SIGUSR2:
#endif
#ifdef SIGSYS
case SIGSYS:
#endif
#ifdef SIGTRAP
case SIGTRAP:
#endif
#ifdef SIGURG
case SIGURG:
#endif
#ifdef SIGVTALRM
case SIGVTALRM:
#endif
#ifdef SIGXCPU
case SIGXCPU:
#endif
#ifdef SIGXFSZ
case SIGXFSZ:
#endif
/* SIGRTMIN and SIGRTMAX need not be compile-time constants. */
#if 0
# ifdef SIGRTMIN
case SIGRTMIN:
# endif
# ifdef SIGRTMAX
case SIGRTMAX:
# endif
#endif
;
}
return s.a + s.b + s.c + s.e;
}
```
|
```go
/*
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package awstasks
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"k8s.io/klog/v2"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter"
)
// +kops:fitask
type InternetGateway struct {
Name *string
Lifecycle fi.Lifecycle
ID *string
VPC *VPC
// Shared is set if this is a shared InternetGateway
Shared *bool
// Tags is a map of aws tags that are added to the InternetGateway
Tags map[string]string
}
var _ fi.CompareWithID = &InternetGateway{}
func (e *InternetGateway) CompareWithID() *string {
return e.ID
}
func findInternetGateway(ctx context.Context, cloud awsup.AWSCloud, request *ec2.DescribeInternetGatewaysInput) (*ec2types.InternetGateway, error) {
response, err := cloud.EC2().DescribeInternetGateways(ctx, request)
if err != nil {
return nil, fmt.Errorf("error listing InternetGateways: %v", err)
}
if response == nil || len(response.InternetGateways) == 0 {
return nil, nil
}
if len(response.InternetGateways) != 1 {
return nil, fmt.Errorf("found multiple InternetGateways matching tags")
}
igw := response.InternetGateways[0]
return &igw, nil
}
func (e *InternetGateway) Find(c *fi.CloudupContext) (*InternetGateway, error) {
ctx := c.Context()
cloud := awsup.GetCloud(c)
request := &ec2.DescribeInternetGatewaysInput{}
shared := fi.ValueOf(e.Shared)
if shared {
if fi.ValueOf(e.VPC.ID) == "" {
return nil, fmt.Errorf("VPC ID is required when InternetGateway is shared")
}
request.Filters = []ec2types.Filter{awsup.NewEC2Filter("attachment.vpc-id", *e.VPC.ID)}
} else {
if e.ID != nil {
request.InternetGatewayIds = []string{fi.ValueOf(e.ID)}
} else {
request.Filters = cloud.BuildFilters(e.Name)
}
}
igw, err := findInternetGateway(ctx, cloud, request)
if err != nil {
return nil, err
}
if igw == nil {
return nil, nil
}
actual := &InternetGateway{
ID: igw.InternetGatewayId,
Name: findNameTag(igw.Tags),
Tags: intersectTags(igw.Tags, e.Tags),
}
klog.V(2).Infof("found matching InternetGateway %q", *actual.ID)
for _, attachment := range igw.Attachments {
actual.VPC = &VPC{ID: attachment.VpcId}
}
// Prevent spurious comparison failures
actual.Shared = e.Shared
actual.Lifecycle = e.Lifecycle
if shared {
actual.Name = e.Name
}
if e.ID == nil {
e.ID = actual.ID
}
// We don't set the tags for a shared IGW
if fi.ValueOf(e.Shared) {
actual.Tags = e.Tags
}
return actual, nil
}
func (e *InternetGateway) Run(c *fi.CloudupContext) error {
return fi.CloudupDefaultDeltaRunMethod(e, c)
}
func (s *InternetGateway) CheckChanges(a, e, changes *InternetGateway) error {
if a != nil {
// TODO: I think we can change it; we just detach & attach
if changes.VPC != nil {
return fi.CannotChangeField("VPC")
}
}
return nil
}
func (_ *InternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *InternetGateway) error {
ctx := context.TODO()
shared := fi.ValueOf(e.Shared)
if shared {
// Verify the InternetGateway was found and matches our required settings
if a == nil {
return fmt.Errorf("InternetGateway for shared VPC was not found")
}
return nil
}
if a == nil {
klog.V(2).Infof("Creating InternetGateway")
request := &ec2.CreateInternetGatewayInput{
TagSpecifications: awsup.EC2TagSpecification(ec2types.ResourceTypeInternetGateway, e.Tags),
}
response, err := t.Cloud.EC2().CreateInternetGateway(ctx, request)
if err != nil {
return fmt.Errorf("error creating InternetGateway: %v", err)
}
e.ID = response.InternetGateway.InternetGatewayId
}
if a == nil || (changes != nil && changes.VPC != nil) {
klog.V(2).Infof("Creating InternetGatewayAttachment")
attachRequest := &ec2.AttachInternetGatewayInput{
VpcId: e.VPC.ID,
InternetGatewayId: e.ID,
}
_, err := t.Cloud.EC2().AttachInternetGateway(ctx, attachRequest)
if err != nil {
return fmt.Errorf("error attaching InternetGateway to VPC: %v", err)
}
}
return t.AddAWSTags(*e.ID, e.Tags)
}
type terraformInternetGateway struct {
VPCID *terraformWriter.Literal `cty:"vpc_id"`
Tags map[string]string `cty:"tags"`
}
func (_ *InternetGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *InternetGateway) error {
ctx := context.TODO()
shared := fi.ValueOf(e.Shared)
if shared {
// Not terraform owned / managed
// But ... attempt to discover the ID so TerraformLink works
if e.ID == nil {
request := &ec2.DescribeInternetGatewaysInput{}
vpcID := fi.ValueOf(e.VPC.ID)
if vpcID == "" {
return fmt.Errorf("VPC ID is required when InternetGateway is shared")
}
request.Filters = []ec2types.Filter{awsup.NewEC2Filter("attachment.vpc-id", vpcID)}
igw, err := findInternetGateway(ctx, t.Cloud.(awsup.AWSCloud), request)
if err != nil {
return err
}
if igw == nil {
klog.Warningf("Cannot find internet gateway for VPC %q", vpcID)
} else {
e.ID = igw.InternetGatewayId
}
}
return nil
}
tf := &terraformInternetGateway{
VPCID: e.VPC.TerraformLink(),
Tags: e.Tags,
}
return t.RenderResource("aws_internet_gateway", *e.Name, tf)
}
func (e *InternetGateway) TerraformLink() *terraformWriter.Literal {
shared := fi.ValueOf(e.Shared)
if shared {
if e.ID == nil {
klog.Fatalf("ID must be set, if InternetGateway is shared: %s", e)
}
klog.V(4).Infof("reusing existing InternetGateway with id %q", *e.ID)
return terraformWriter.LiteralFromStringValue(*e.ID)
}
return terraformWriter.LiteralProperty("aws_internet_gateway", *e.Name, "id")
}
```
|
```xml
import { Component } from '@angular/core';
import { Code } from '@domain/code';
@Component({
selector: 'image-doc',
template: `
<app-docsectiontext>
<p>Any content such as an image can be placed inside an Inplace.</p>
</app-docsectiontext>
<div class="card">
<p-inplace>
<ng-template pTemplate="display">
<div class="inline-flex align-items-center">
<span class="pi pi-image" style="vertical-align: middle"></span>
<span class="ml-2">View Picture</span>
</div>
</ng-template>
<ng-template pTemplate="content">
<img src="path_to_url" alt="Nature" />
</ng-template>
</p-inplace>
</div>
<app-code [code]="code" selector="inplace-image-demo"></app-code>
`
})
export class ImageDoc {
code: Code = {
basic: `<p-inplace>
<ng-template pTemplate="display">
<div class="inline-flex align-items-center">
<span class="pi pi-image" style="vertical-align: middle"></span>
<span class="ml-2">View Picture</span>
</div>
</ng-template>
<ng-template pTemplate="content">
<img
src="path_to_url"
alt="Nature" />
</ng-template>
</p-inplace>`,
html: `<div class="card">
<p-inplace>
<ng-template pTemplate="display">
<div class="inline-flex align-items-center">
<span class="pi pi-image" style="vertical-align: middle"></span>
<span class="ml-2">View Picture</span>
</div>
</ng-template>
<ng-template pTemplate="content">
<img
src="path_to_url"
alt="Nature" />
</ng-template>
</p-inplace>
</div>`,
typescript: `import { Component } from '@angular/core';
import { InplaceModule } from 'primeng/inplace';
@Component({
selector: 'inplace-image-demo',
templateUrl: './inplace-image-demo.html',
standalone: true,
imports: [InplaceModule]
})
export class InplaceImageDemo {}`
};
}
```
|
Bette Swenson Orsini (December 2, 1925March 26, 2011) was an American journalist for the St. Petersburg Times. In 1980, she won the Pulitzer Prize for National Reporting with Charles Stafford for an investigation of the Church of Scientology.
Biography
Orsini was born on December 2, 1925, in St. Petersburg, Florida. After attending both St. Petersburg High School and St. Petersburg Junior College, she attended the University of Florida, and graduated with a Bachelor of Arts in psychology. Afterwards, Orsini worked for the St. Petersburg Times, the Arkansas Democrat-Gazette, and the Richmond News Leader. Orsini spent forty-one years working at the St. Petersburg Times, beginning in 1946. That year, she placed second in a national contest for the "best-looking newspaperwoman". Orsini was also a body double for Lizabeth Scott in Dead Reckoning (1947). She was also a marathon waterskier. By 1963, she was the education reporter for the paper. The following year, she was involved in a marathon ski run from St. Petersburg to New York City for the 1964 New York World's Fair. Over 28 days, a ten-person group skied the to New York. For her reporting, Orsini received the American Political Science Association Public Affairs Reporting award (1967) and a National Headliners Award (1970). In 1974, after an exposé of a scandal involving Floyd T. Christian, she received a Scripps Howard Foundation Award.
Orsini began investigating the Church of Scientology and its expansion into Clearwater, Florida, in the middle of the 1970s. Over three years of investigating, she and Stafford published fourteen stories that criticized the church, investigating its belief system and corrupt practices. By 1976, a church memo was issued that deemed Stafford and Orsini 'enemies' of the church, and stated that their ranks should be infiltrated. In an obituary published in the Tampa Bay Times, she was called the "prime source of stories about Scientology's financial and social structure". The church repeatedly attempted to get Orsini fired, and otherwise attempted to stop her reporting. For this series of articles, she won the Pulitzer Prize for National Reporting in 1980. She died on March 26, 2011.
References
1925 births
2011 deaths
Pulitzer Prize for National Reporting winners
University of Florida alumni
St. Petersburg College alumni
Tampa Bay Times
|
Jonas Aspelin (8 September 1884 – 3 September 1964) was a Norwegian businessperson.
He was born in Kristiania as a son of Gustaf Aspelin (1857–1917) and Elisa Holmboe (1865–1926). He was thus a maternal great-grandson of Leonhard Christian Borchgrevink Holmboe and nephew of Carl Fredrik Holmboe. His sister Karen married Ragnar Sommar Bruzelius, mayor of Gustaf's city of origin Kristianstad.
Jonas Aspelin attended commerce school, and spent about five years abroad in Germany, England and the United States. His father had founded an eponymous company for wholesale of metals in 1881. Gustaf Aspelin died in 1917, and Jonas took over, and later took his brother Knut on board as co-owner. He was also a deputy chair of the employers' association Jerngrossistenes Forening and supervisory council member of Storebrand.
He was appointed Swedish consul to Norway in 1924, and promoted to consul-general in 1929. He served in this capacity for about thirty years. He also chaired the friendship society Swedish Society, which had been co-founded and chaired by his father.
Early in his career he was decorated as a Commander of the Order of Vasa and a Knight of the Order of the Polar Star. In 1952 he became a Knight, First Class of the Order of St. Olav. He died in September 1964 and was buried in Ris.
References
1884 births
1964 deaths
Businesspeople from Oslo
Norwegian people of Swedish descent
20th-century Norwegian businesspeople
Norwegian expatriates in Germany
Norwegian expatriates in the United Kingdom
Norwegian expatriates in the United States
Commanders of the Order of Vasa
Knights of the Order of the Polar Star
Holmboe family
|
The 2021 Wheelchair Rugby League World Cup will feature eight international teams, with each consisting of up to a twelve-man squad.
Group A
Australia
Peter Arbuckle (Queensland)
Cory Cannane (New South Wales)
Craig Cannane (New South Wales)
Richard Engles (New South Wales)
Brad Grove (New South Wales)
Shaun Harre (Queensland)
James Hill (Queensland)
Diab Karim (New South Wales)
Liam Luff (New South Wales)
Bayley McKenna (Queensland)
Zac Schumacher (Queensland)
Adam Tannock (Queensland).
England
Sebastien Bechara (Catalans Dragons)
Jack Brown (Halifax Panthers)
Wayne Boardman (Halifax Panthers)
Nathan Collins (Leeds Rhinos)
Joe Coyd (London Roosters)
Rob Hawkins (Halifax Panthers)
Tom Halliwell (Leeds Rhinos)
Lewis King (London Roosters)
Adam Rigby (Wigan Warriors)
Declan Roberts (Wigan Warriors)
James Simpson (Leeds Rhinos)
Ireland
Toby Burton-Carter (Warrington Wolves)
Tom Martin (Halifax Panthers)
Rick Rodgers (Argonauts Demi-gods and Skeleton Army)
Stephen Campbell
Paddy Forbes
Kenneth Maloney (Gravesend Dynamite)
Scott Robertson
Peter Johnston (Argonauts Demi-gods and Skeleton Army)
Phil Roberts (Wigan Warriors)
James McCarthy
Oran Spain
Nash Jennings
Spain
David Berty (St Toulousain)
Jorge Gelade-Panzo (Dragons Handi)
Theo Gonzalez (Handisport Roannais)
Joel lacombe (Dragons Handi)
Yannick Martin (Montauban)
Fabien Moisdon (Dragons Handi)
Raphaël Monedero (Dragons Handi)
David Raymond (Biganos)
Wilfrid Seron (St Toulousain)
Group B
France
Mostefa Abassi (Saint-Jory)
Lionel Alazard (Montauban)
Jérémy Bourson (Dragons Handi)
Gilles Clausells (Dragons Handi)
Nicolas Clausells (Dragons Handi)
Dany Denuwelaere (Montauban)
Thomas Duhalde (Euskadi)
Florian Guttadoro (SO Avignon)
Guillaume Mautz (SO Avignon)
Julien Penella (Euskadi)
Arno Vargas (Dragons Handi)
Yann Verdi (SO Avignon).
Jonathan Hivernat (Dragons Handi, standby)
Adrien Zittel (Arbent, standby).
Scotland
Dave Anderson (West Wales Raiders)
Gregor Anderson (Dundee Dragons)
David Birtles (Dundee Dragons)
Connor Blackmore (Dundee Dragons)
Dan Grant (Gravesend)
Paul Hartley (Glasgow RL)
Peter Lauder (unattached)
Michael Mellon (Dundee Dragons)
Graeme Stewart (Glasgow RL)
Cadyn Thompson (Dundee Dragons)
John Willans (Dundee Dragons)
Callum Young (Warrington Wolves)
United States
Jeffrey Townsend (captain)
Jesse Lind (vice-captain)
MacKenzie Johnson
Michah Stewart
William Johnstone
Lavern Anderson
Freddie Smith
Gabi Cha
Andy Kingsley
Matthew Wooloff (Wigan Warriors)
Jabrier Lee
Jensen Blaine
Wales
Stephen Halsey (North Wales Crusaders)
Scott Trigg-Turner (North Wales Crusaders)
Gary Preece (Hereford Harriers)
Mason Baker (North Wales Crusaders)
Jodie Boyd-Ward (Leeds Rhinos)
Andrew Higgins (Hereford Harriers)
Stuart Williams (North Wales Crusaders)
Lucie Roberts (North Wales Crusaders)
Martin Lane (Hereford Harriers)
Mark Williams (Wigan Warriors)
Harry Jones (North Wales Crusaders)
Alan Caron (Hereford Harriers).
References
2021 Rugby League World Cup
Rugby League World Cup squads
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.