in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
django-oscar__django-oscar-1235
Misprint in commit f56226a oscar/apps/dashboard/catalogue/forms.py have a misprint after commit f56226aa2f0e18538a1095a558c76312166bb11a in line 382: ``` python class StockAlertSearchForm(forms.Form): tatus = forms.CharField(label=_('Status')) ``` tatus -> status.
[ { "content": "from django import forms\nfrom django.core.exceptions import ValidationError, MultipleObjectsReturned\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\nfrom treebeard.forms import MoveNodeForm, movenodeform_factory\n\nfrom oscar.core.u...
[ { "content": "from django import forms\nfrom django.core.exceptions import ValidationError, MultipleObjectsReturned\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\nfrom treebeard.forms import MoveNodeForm, movenodeform_factory\n\nfrom oscar.core.u...
diff --git a/oscar/apps/dashboard/catalogue/forms.py b/oscar/apps/dashboard/catalogue/forms.py index 4a5a26489ec..2683e216c14 100644 --- a/oscar/apps/dashboard/catalogue/forms.py +++ b/oscar/apps/dashboard/catalogue/forms.py @@ -379,7 +379,7 @@ def clean(self): class StockAlertSearchForm(forms.Form): - tatus = forms.CharField(label=_('Status')) + status = forms.CharField(label=_('Status')) class ProductCategoryForm(forms.ModelForm):
scikit-hep__awkward-2213
Example in merge_union_of_records is a no-op ### Which documentation? Python docstrings ### What needs to be documented? In the example given, the result type is the same as the input type: <details> <summary> Quote from the docstring </summary> ```python Simplifies unions of records, e.g. >>> array = ak.Array([{"a": 1}, {"b": 2}]) into records of options, i.e. >>> ak.merge_union_of_records(array) <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'> ``` </details> ```python >>> import awkward as ak >>> ak.Array([{"a": 1}, {"b": 2}]).type.show() 2 * { a: ?int64, b: ?int64 } >>> ak.merge_union_of_records(ak.Array([{"a": 1}, {"b": 2}])).type.show() 2 * { a: ?int64, b: ?int64 } ```
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, hi...
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, hi...
diff --git a/src/awkward/operations/ak_merge_union_of_records.py b/src/awkward/operations/ak_merge_union_of_records.py index c389dccb83..3694909496 100644 --- a/src/awkward/operations/ak_merge_union_of_records.py +++ b/src/awkward/operations/ak_merge_union_of_records.py @@ -23,7 +23,7 @@ def merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None): Simplifies unions of records, e.g. - >>> array = ak.Array([{"a": 1}, {"b": 2}]) + >>> array = ak.concatenate(([{"a": 1}], [{"b": 2}])) into records of options, i.e.
zestedesavoir__zds-site-800
Erreur dans /membres/?q=... via AJAX Suite à l'ajout de la réponse sous forme JSON de `/membres/` ( #677 ), le GET de cette page avec le header `X-Requestes-With: XMLHttpRequest` plante, dû au fait que la lib JSON n'est pas importé dans le fichier `zds/membres/view.py` ``` bash [07/Jun/2014 14:30:49] "GET /membres/?q=a HTTP/1.1" 500 15042 Internal Server Error: /membres/ Traceback (most recent call last): File "~/.local/lib/python2.7/site-packages/django/core/handlers/base.py", line 112, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "zds/member/decorator.py", line 21, in _can_read_now return func(request, *args, **kwargs) File "zds/member/views.py", line 55, in index data = json.dumps(results) NameError: global name 'json' is not defined ```
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nimport os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_requi...
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nimport os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_requi...
diff --git a/zds/member/views.py b/zds/member/views.py index 532ac726ae..b1f1502b79 100644 --- a/zds/member/views.py +++ b/zds/member/views.py @@ -55,6 +55,8 @@ def index(request): results.append(member_json) data = json.dumps(results) + mimetype = "application/json" + return HttpResponse(data, mimetype) else:
pytorch__TensorRT-74
Create some sort of serialization / deserialization functionality With INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included.
[ { "content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch im...
[ { "content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch im...
diff --git a/BUILD b/BUILD index 48f2ae5338..02829531b2 100644 --- a/BUILD +++ b/BUILD @@ -8,6 +8,8 @@ pkg_tar( "//core/conversion:include", "//core/conversion/conversionctx:include", "//core/conversion/converters:include", + "//core/conversion/var:include", + "//core/conversion/tensorcontainer:include", "//core/conversion/evaluators:include", "//core/execution:include", "//core/lowering:include", @@ -35,6 +37,15 @@ pkg_tar( ) +pkg_tar( + name = "bin", + package_dir = "bin/", + srcs = [ + "//cpp/trtorchc:trtorchc", + ], + mode = "0755", +) + pkg_tar( @@ -46,6 +57,7 @@ pkg_tar( ], deps = [ ":lib", + ":bin", ":include", ":include_core", ], diff --git a/README.md b/README.md index befe86e8fe..60cfe55e94 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ compile_settings.op_precision = torch::kFloat; auto trt_mod = trtorch::CompileGraph(ts_mod, compile_settings); // Run like normal auto results = trt_mod.forward({in_tensor}); +// Save module for later +trt_mod.save("trt_torchscript_module.ts"); ... ``` @@ -46,6 +48,7 @@ trt_ts_module = trtorch.compile(torch_script_module, compile_settings) input_data = input_data.half() result = trt_ts_module(input_data) +torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") ``` > Notes on running in lower precisions: diff --git a/core/compiler.cpp b/core/compiler.cpp index 2f94ba8ead..be0dc895d8 100644 --- a/core/compiler.cpp +++ b/core/compiler.cpp @@ -6,7 +6,9 @@ #include "NvInfer.h" #include "ATen/core/function_schema.h" +#include "ATen/core/jit_type.h" +#include "torch/custom_class.h" #include "torch/csrc/jit/frontend/function_schema_parser.h" #include "torch/csrc/jit/ir/ir.h" #include "torch/csrc/jit/passes/pass_manager.h" @@ -40,32 +42,70 @@ c10::FunctionSchema GenerateGraphSchema(torch::jit::script::Module mod, std::str void AddEngineToGraph(torch::jit::script::Module mod, std::shared_ptr<torch::jit::Graph>& g, std::string& serialized_engine) { - execution::EngineID uid = execution::RegisterEngineFromSerializedEngine(serialized_engine); - auto num_io = execution::GetEngineIO(uid); - - auto self = g->addInput("self.1"); + auto engine = execution::TRTEngine(mod._ivalue()->name(), serialized_engine); + // Get required metadata about the engine out + auto num_io = engine.num_io; + auto name = engine.name; + + // Add the engine as an attribute of the module, this will let the engine be serialized and deserialized + auto engine_ptr = c10::make_intrusive<execution::TRTEngine>(engine); + mod.register_attribute( + name, + c10::getCustomClassType<c10::intrusive_ptr<execution::TRTEngine>>(), + c10::IValue(std::move(engine_ptr)), + false + ); + + // Add the module as an input into the graph + auto self = g->addInput("self_1"); self->setType(mod.type()); - auto id_val = g->insertConstant(uid); + // Start by retriveing the engine from the module attribute list + auto engine_node = g->createGetAttr(self, name); + g->block()->appendNode(engine_node); + // Add inputs to the graph corresponding to the number of input tensors expected by the engine + // Also store those inputs in a vector so that they can be coalesced into a single list at runtime std::vector<torch::jit::Value*> engine_inputs; - engine_inputs.push_back(id_val); - for (uint64_t i = 0; i < num_io.first; i++) { - auto in_val = g->addInput(""); + auto in_val = g->addInput(std::string("input_") + std::to_string(i)); in_val->setType(c10::TensorType::get()); engine_inputs.push_back(in_val); } - auto engine_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs), num_io.second); - g->block()->appendNode(engine_node); - - if (engine_node->outputs().size() > 1) { - auto return_tuple_node = g->createTuple(engine_node->outputs()); + // Create a node that will merge all of the input tensors into a single list argument to the trt::execute_engine op + // Creates: prim::ListConstruct(<input tensors>) + auto input_list_node = g->createList(c10::TensorType::get(), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs)); + g->block()->appendNode(input_list_node); + + // Make a list of inputs to the actual trt::execute_engine op + // Note: Ordering of list and then engine is because we can pop off the engine first which contains all the metadata + // needed for execution + std::vector<torch::jit::Value*> execute_node_inputs; + execute_node_inputs.push_back(input_list_node->outputs()[0]); + execute_node_inputs.push_back(engine_node->outputs()[0]); + + // Create the actual execution node trt::execute_engine using the assembled inputs + auto execute_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(execute_node_inputs), 1); + g->block()->appendNode(execute_node); + execute_node->outputs()[0]->setType(c10::ListType::ofTensors()); + + // Create a node to unpack the list into seperate tensors, in the case of there being only one tensor, the tensor will be returned, + // otherwise they are returned as a tuple of tensors. + // Creates: prim::ListUnpack(<engine output>) + auto unpack_node = g->createListUnpack(execute_node->outputs()[0], num_io.second); + g->block()->appendNode(unpack_node); + + // If there are multiple output tensors from TensorRT we wrap them in a tuple to return + if (unpack_node->outputs().size() > 1) { + // Creates prim::TupleConstruct(<output tensors>) using outputs of the unpack node + auto return_tuple_node = g->createTuple(unpack_node->outputs()); g->block()->appendNode(return_tuple_node); + // Set the output as the produced tuple g->registerOutput(return_tuple_node->outputs()[0]); } else { - g->registerOutput(engine_node->outputs()[0]); + // Set the output as the sole output tensor + g->registerOutput(unpack_node->outputs()[0]); } LOG_DEBUG(*g << "(AddEngineToGraph)\n"); diff --git a/core/conversion/InterfaceTypes.cpp b/core/conversion/InterfaceTypes.cpp index ac90085583..3ec3d93178 100644 --- a/core/conversion/InterfaceTypes.cpp +++ b/core/conversion/InterfaceTypes.cpp @@ -34,7 +34,7 @@ InputRange::InputRange(std::vector<int64_t> d) { min = util::toDims(d); max = util::toDims(d); input_shape = util::toDims(d); - + input_is_dynamic = false; } @@ -67,6 +67,7 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_ dim.insert(max_shape[i]); if (dim.size() != 1) { dyn_shape.push_back(-1); + input_is_dynamic = true; } else { dyn_shape.push_back(opt_shape[i]); } diff --git a/core/conversion/conversion.cpp b/core/conversion/conversion.cpp index 911e58e039..fc4e75ca88 100644 --- a/core/conversion/conversion.cpp +++ b/core/conversion/conversion.cpp @@ -155,6 +155,10 @@ void AddInputs(ConversionCtx* ctx, profile->setDimensions(trt_in->getName(), nvinfer1::OptProfileSelector::kOPT, dims.opt); profile->setDimensions(trt_in->getName(), nvinfer1::OptProfileSelector::kMAX, dims.max); + if (dims.input_is_dynamic) { + ctx->input_is_dynamic = true; + } + ctx->value_tensor_map[in] = trt_in; } diff --git a/core/conversion/conversion.h b/core/conversion/conversion.h index 529d04f6b6..1c7a790025 100644 --- a/core/conversion/conversion.h +++ b/core/conversion/conversion.h @@ -15,6 +15,7 @@ struct InputRange { nvinfer1::Dims max; nvinfer1::Dims opt; nvinfer1::Dims input_shape; + bool input_is_dynamic = false; // Should we restrict to unsigned? InputRange(std::vector<int64_t> d); InputRange(std::vector<int64_t> min_shape, diff --git a/core/conversion/conversionctx/ConversionCtx.h b/core/conversion/conversionctx/ConversionCtx.h index 76653037a9..abd49cf22e 100644 --- a/core/conversion/conversionctx/ConversionCtx.h +++ b/core/conversion/conversionctx/ConversionCtx.h @@ -42,6 +42,7 @@ struct ConversionCtx { ~ConversionCtx(); + bool input_is_dynamic = false; nvinfer1::IBuilder* builder; nvinfer1::INetworkDefinition* net; nvinfer1::IBuilderConfig* cfg; diff --git a/core/conversion/converters/impl/batch_norm.cpp b/core/conversion/converters/impl/batch_norm.cpp index bd923310a0..a7b6045737 100644 --- a/core/conversion/converters/impl/batch_norm.cpp +++ b/core/conversion/converters/impl/batch_norm.cpp @@ -19,12 +19,24 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() auto orig_shape = input->getDimensions(); auto shape = util::toVec(orig_shape); auto options = torch::TensorOptions().dtype(torch::kFloat32); - auto gamma = args[1].unwrapToTensor(at::full({shape}, 1, {options})); - auto beta = args[2].unwrapToTensor(at::full({shape}, 1, {options})); - auto mean = args[3].unwrapToTensor(at::full({shape}, 0, {options})); - auto var = args[4].unwrapToTensor(at::full({shape}, 0, {options})); + + torch::Tensor gamma, beta, mean, var; + + if (ctx->input_is_dynamic) { + gamma = args[1].unwrapToTensor(); + beta = args[2].unwrapToTensor(); + mean = args[3].unwrapToTensor(); + var = args[4].unwrapToTensor(); + } else { + gamma = args[1].unwrapToTensor(at::full({shape}, 1, {options})); + beta = args[2].unwrapToTensor(at::full({shape}, 1, {options})); + mean = args[3].unwrapToTensor(at::full({shape}, 0, {options})); + var = args[4].unwrapToTensor(at::full({shape}, 0, {options})); + } + auto eps = args[7].unwrapToDouble(1e-5f); + LOG_DEBUG("momentum disregarded"); LOG_DEBUG("training disregarded"); LOG_DEBUG("cudnn disregarded"); diff --git a/core/conversion/converters/impl/concat.cpp b/core/conversion/converters/impl/concat.cpp index da3853291c..2063d8921f 100644 --- a/core/conversion/converters/impl/concat.cpp +++ b/core/conversion/converters/impl/concat.cpp @@ -8,7 +8,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto cat_registrations = RegisterNodeConversionPatterns() +auto cat_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/constant.cpp b/core/conversion/converters/impl/constant.cpp index 432eb6bf85..1c23cb6a8b 100644 --- a/core/conversion/converters/impl/constant.cpp +++ b/core/conversion/converters/impl/constant.cpp @@ -7,7 +7,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto constant_registrations = RegisterNodeConversionPatterns() +auto constant_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "trt::const(Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/conv_deconv.cpp b/core/conversion/converters/impl/conv_deconv.cpp index 37cf3ff3ad..3388a26741 100644 --- a/core/conversion/converters/impl/conv_deconv.cpp +++ b/core/conversion/converters/impl/conv_deconv.cpp @@ -9,7 +9,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto conv_registrations = RegisterNodeConversionPatterns() +auto conv_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ R"SIG(aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, diff --git a/core/conversion/converters/impl/element_wise.cpp b/core/conversion/converters/impl/element_wise.cpp index 375e7a2d8f..4cb2e03a19 100644 --- a/core/conversion/converters/impl/element_wise.cpp +++ b/core/conversion/converters/impl/element_wise.cpp @@ -68,7 +68,7 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera } -auto element_wise_registrations = RegisterNodeConversionPatterns() +auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/linear.cpp b/core/conversion/converters/impl/linear.cpp index f4c49ec020..e22664afe0 100644 --- a/core/conversion/converters/impl/linear.cpp +++ b/core/conversion/converters/impl/linear.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto linear_registrations = RegisterNodeConversionPatterns() +auto linear_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::linear(Tensor input, Tensor weight, Tensor? bias = None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/matrix_multiply.cpp b/core/conversion/converters/impl/matrix_multiply.cpp index c6d2d99f1e..cbebdc13b2 100644 --- a/core/conversion/converters/impl/matrix_multiply.cpp +++ b/core/conversion/converters/impl/matrix_multiply.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto mm_registrations = RegisterNodeConversionPatterns() +auto mm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::matmul(Tensor self, Tensor other) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/pooling.cpp b/core/conversion/converters/impl/pooling.cpp index 04472ce5fc..e18c78c1ed 100644 --- a/core/conversion/converters/impl/pooling.cpp +++ b/core/conversion/converters/impl/pooling.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto pooling_registrations = RegisterNodeConversionPatterns() +auto pooling_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/reduce.cpp b/core/conversion/converters/impl/reduce.cpp index 0127f83285..16e0d9dd83 100644 --- a/core/conversion/converters/impl/reduce.cpp +++ b/core/conversion/converters/impl/reduce.cpp @@ -11,7 +11,7 @@ namespace { -auto reduce_registrations = RegisterNodeConversionPatterns() +auto reduce_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::mean(Tensor self, *, ScalarType? dtype=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/shape.cpp b/core/conversion/converters/impl/shape.cpp index d5b3577a34..613ce43fe9 100644 --- a/core/conversion/converters/impl/shape.cpp +++ b/core/conversion/converters/impl/shape.cpp @@ -9,7 +9,7 @@ namespace converters { namespace impl { namespace { -static auto shape_registrations = RegisterNodeConversionPatterns() +static auto shape_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ // To use in static input size cases (explicit batch) "aten::size.int(Tensor self, int dim) -> (Tensor)", diff --git a/core/conversion/converters/impl/shuffle.cpp b/core/conversion/converters/impl/shuffle.cpp index ceda35a5d9..951635a8fc 100644 --- a/core/conversion/converters/impl/shuffle.cpp +++ b/core/conversion/converters/impl/shuffle.cpp @@ -9,7 +9,7 @@ namespace converters { namespace impl { namespace { -static auto shuffle_registrations = RegisterNodeConversionPatterns() +static auto shuffle_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { @@ -50,12 +50,10 @@ static auto shuffle_registrations = RegisterNodeConversionPatterns() [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { auto in = args[0].ITensor(); auto in_shape = util::toVec(in->getDimensions()); - auto ex_tensor = torch::rand(in_shape); - auto new_shape = ex_tensor.view(args[1].unwrapToIntList().vec()).sizes(); auto shuffle = ctx->net->addShuffle(*in); TRTORCH_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n); - shuffle->setReshapeDimensions(util::toDims(new_shape)); + shuffle->setReshapeDimensions(util::toDims(args[1].unwrapToIntList().vec())); shuffle->setName(util::node_info(n).c_str()); auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0)); diff --git a/core/conversion/converters/impl/softmax.cpp b/core/conversion/converters/impl/softmax.cpp index 35f6f04ef1..6a81b974a2 100644 --- a/core/conversion/converters/impl/softmax.cpp +++ b/core/conversion/converters/impl/softmax.cpp @@ -7,7 +7,7 @@ namespace converters { namespace impl { namespace { -static auto softmax_registrations = RegisterNodeConversionPatterns() +static auto softmax_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::softmax.int(Tensor self, int dim, int? dtype=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/tensorcontainer/TensorContainer.cpp b/core/conversion/tensorcontainer/TensorContainer.cpp index 536d578eae..6fad66335d 100644 --- a/core/conversion/tensorcontainer/TensorContainer.cpp +++ b/core/conversion/tensorcontainer/TensorContainer.cpp @@ -6,7 +6,7 @@ namespace conversion { namespace { static auto tensor_container = - torch::class_<TensorContainer>("_eval_ivalue_types", "TensorContainer") + torch::class_<TensorContainer>("_trtorch_eval_ivalue_types", "TensorContainer") .def(torch::init<>()); } // namespace } // conversion diff --git a/core/conversion/var/BUILD b/core/conversion/var/BUILD index e1c92efb12..247f939e48 100644 --- a/core/conversion/var/BUILD +++ b/core/conversion/var/BUILD @@ -30,7 +30,7 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar") pkg_tar( name = "include", - package_dir = "core/conversion/arg/", + package_dir = "core/conversion/var/", srcs = [ "Var.h", "Var_inl.h" diff --git a/core/execution/BUILD b/core/execution/BUILD index 009092d3e6..1741249624 100644 --- a/core/execution/BUILD +++ b/core/execution/BUILD @@ -14,7 +14,6 @@ cc_library( ], srcs = [ "TRTEngine.cpp", - "TRTEngineManager.cpp", "register_trt_op.cpp", ], deps = [ diff --git a/core/execution/TRTEngine.cpp b/core/execution/TRTEngine.cpp index 3370ea6f5b..3d4dbc8033 100644 --- a/core/execution/TRTEngine.cpp +++ b/core/execution/TRTEngine.cpp @@ -10,12 +10,32 @@ namespace trtorch { namespace core { namespace execution { -TRTEngine::TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine) { +std::string slugify(std::string s) { + std::replace(s.begin(), s.end(), '.', '_'); + return s; +} + +TRTEngine::TRTEngine(std::string serialized_engine) + : logger(std::string("[] - "), + util::logging::get_logger().get_reportable_severity(), + util::logging::get_logger().get_is_colored_output_on()) { + std::string _name = "deserialized_trt"; + new (this) TRTEngine(_name, serialized_engine); +} + +TRTEngine::TRTEngine(std::string mod_name, std::string serialized_engine) + : logger(std::string("[") + mod_name + std::string("_engine] - "), + util::logging::get_logger().get_reportable_severity(), + util::logging::get_logger().get_is_colored_output_on()) { + rt = nvinfer1::createInferRuntime(logger); + name = slugify(mod_name) + "_engine"; + cuda_engine = rt->deserializeCudaEngine(serialized_engine.c_str(), serialized_engine.size()); // Easy way to get a unique name for each engine, maybe there is a more descriptive way (using something associated with the graph maybe) id = reinterpret_cast<EngineID>(cuda_engine); + exec_ctx = cuda_engine->createExecutionContext(); uint64_t inputs = 0; @@ -40,7 +60,28 @@ TRTEngine& TRTEngine::operator=(const TRTEngine& other) { return (*this); } +// TODO: Implement a call method +// c10::List<at::Tensor> TRTEngine::Run(c10::List<at::Tensor> inputs) { +// auto input_vec = inputs.vec(); +// auto output_vec = RunCudaEngine(exec_ctx, num_io, input_vec); +// +// return c10::List<at::Tensor>(output_vec); +// } + +static auto TRTORCH_UNUSED TRTEngineTSRegistrtion = torch::class_<TRTEngine>("tensorrt", "Engine") + .def(torch::init<std::string>()) + // TODO: .def("__call__", &TRTEngine::Run) + // TODO: .def("run", &TRTEngine::Run) + .def_pickle( + [](const c10::intrusive_ptr<TRTEngine>& self) -> std::string { + auto serialized_engine = self->cuda_engine->serialize(); + return std::string((const char*)serialized_engine->data(), serialized_engine->size()); + }, + [](std::string seralized_engine) -> c10::intrusive_ptr<TRTEngine> { + return c10::make_intrusive<TRTEngine>(std::move(seralized_engine)); + } + ); + } // namespace execution } // namespace core } // namespace trtorch - diff --git a/core/execution/TRTEngineManager.cpp b/core/execution/TRTEngineManager.cpp deleted file mode 100644 index 27a6aeff28..0000000000 --- a/core/execution/TRTEngineManager.cpp +++ /dev/null @@ -1,82 +0,0 @@ -#include "core/util/prelude.h" -#include "core/execution/execution.h" - -namespace trtorch { -namespace core { -namespace execution { -namespace { -class TRTEngineManager { -public: - TRTEngineManager() - : logger_("[TRTorch Execution Manager] - ", - util::logging::get_logger().get_reportable_severity(), - util::logging::get_logger().get_is_colored_output_on()) { - } - - TRTEngine* get_engine(EngineID uid) { - auto iter = engine_registry_.find(uid); - - TRTORCH_ASSERT(iter != engine_registry_.end(), "Unabled to find requested engine (ID: " << uid << ") in TensorRT Execution Manager"); - - return &(iter->second); - } - - // TODO: Should we have standing engines ready to run or should we be creating execution contexts JIT? - EngineID register_engine(std::string& serialized_engine) { - auto engine = TRTEngine(logger_, serialized_engine); - EngineID uid = engine.id; - engine_registry_[uid] = std::move(engine); - LOG_DEBUG(logger_, "Registering new engine (ID: " << std::hex << uid << ") in TensorRT Execution Manager"); - return uid; - } - - void deregister_engine(EngineID uid) { - auto iter = engine_registry_.find(uid); - TRTORCH_ASSERT(iter != engine_registry_.end(), "Unabled to find requested engine (ID: " << uid << ") in TensorRT Execution Manager"); - - auto engine = iter->second; - // Doing this here since for some reason the destructor causes segfaults - engine.exec_ctx->destroy(); - engine.cuda_engine->destroy(); - engine_registry_.erase(uid); - } - -private: - util::logging::TRTorchLogger logger_; - std::unordered_map<EngineID, TRTEngine> engine_registry_; -}; - -TRTEngineManager& get_engine_manager() { - static TRTEngineManager engine_man; - return engine_man; -} -} // namespace - -uint64_t RegisterEngineFromSerializedEngine(std::string& serialized_engine) { - return get_engine_manager().register_engine(serialized_engine); -} - -nvinfer1::ICudaEngine* GetCudaEngine(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->cuda_engine; -} - -nvinfer1::IExecutionContext* GetExecCtx(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->exec_ctx; -} - -std::pair<uint64_t, uint64_t> GetEngineIO(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->num_io; -} - -void DeregisterEngine(EngineID id) { - get_engine_manager().deregister_engine(id); -} - -} // namespace execution -} // namespace core -} // namespace trtorch - - diff --git a/core/execution/execution.h b/core/execution/execution.h index 8c50dd4207..9b0ca41cb4 100644 --- a/core/execution/execution.h +++ b/core/execution/execution.h @@ -2,6 +2,9 @@ #include <utility> #include "NvInfer.h" #include "ATen/core/function_schema.h" +#include "torch/custom_class.h" +#include "core/util/prelude.h" + namespace trtorch { namespace core { @@ -9,25 +12,25 @@ namespace execution { using EngineID = int64_t; -struct TRTEngine { +struct TRTEngine : torch::CustomClassHolder { // Each engine needs it's own runtime object nvinfer1::IRuntime* rt; nvinfer1::ICudaEngine* cuda_engine; nvinfer1::IExecutionContext* exec_ctx; std::pair<uint64_t, uint64_t> num_io; EngineID id; + std::string name; + util::logging::TRTorchLogger logger; TRTEngine() = default; - TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine); + TRTEngine(std::string serialized_engine); + TRTEngine(std::string mod_name, std::string serialized_engine); TRTEngine& operator=(const TRTEngine& other); + // TODO: Implement a call method + //c10::List<at::Tensor> Run(c10::List<at::Tensor> inputs); }; -void RegisterEngineOp(TRTEngine& engine); -uint64_t RegisterEngineFromSerializedEngine(std::string& serialized_engine); -nvinfer1::ICudaEngine* GetCudaEngine(EngineID id); -nvinfer1::IExecutionContext* GetExecCtx(EngineID id); -std::pair<uint64_t, uint64_t> GetEngineIO(EngineID id); -void DeregisterEngine(EngineID id); +std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pair<uint64_t, uint64_t> io, std::vector<at::Tensor>& inputs); } // namespace execution } // namespace core diff --git a/core/execution/register_trt_op.cpp b/core/execution/register_trt_op.cpp index d9f57452dc..b7c10912be 100644 --- a/core/execution/register_trt_op.cpp +++ b/core/execution/register_trt_op.cpp @@ -9,7 +9,6 @@ namespace trtorch { namespace core { namespace execution { -namespace { std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pair<uint64_t, uint64_t> io, std::vector<at::Tensor>& inputs) { std::vector<void*> gpu_handles; @@ -47,6 +46,7 @@ std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pai return outputs; } +namespace { c10::AliasAnalysisKind aliasAnalysisFromSchema() { return c10::AliasAnalysisKind::FROM_SCHEMA; } @@ -54,27 +54,19 @@ c10::AliasAnalysisKind aliasAnalysisFromSchema() { // Switched to a global operator because op implementations need to be non-capturing lambdas in PYT 1.5.0+ torch::jit::RegisterOperators jit_registry({ torch::jit::Operator( - "trt::execute_engine(int id, ...) -> ...", + "trt::execute_engine(Tensor[] inputs, __torch__.torch.classes.tensorrt.Engine engine) -> Tensor[]", [](torch::jit::Stack& stack) -> int { - size_t num_inputs = torch::jit::pop(stack).toInt(); // Verify calling convention (right to left or left to right) - std::vector<at::Tensor> inputs; - for (uint64_t i = 0; i < num_inputs - 1; i++) { - at::Tensor in; - torch::jit::pop(stack, in); - inputs.insert(inputs.begin(), std::move(in)); - } + auto engine = torch::jit::pop(stack).toCustomClass<TRTEngine>(); + LOG_DEBUG("Attempting to run engine (ID: " << std::hex << engine->name << ")"); + + auto inputs = torch::jit::pop(stack).toTensorVector(); - int64_t id = torch::jit::pop(stack).toInt(); - LOG_DEBUG("Attempting to run engine (ID: " << std::hex << id << ")"); - auto io = GetEngineIO(id); - auto num_out = io.second; + auto io = engine->num_io; - auto ctx = GetExecCtx(id); + auto ctx = engine->exec_ctx; auto outputs = RunCudaEngine(ctx, io, inputs); - for (uint64_t o = 0; o < num_out; o++) { - torch::jit::push(stack, std::move(outputs[o])); - } + torch::jit::push(stack, std::move(outputs)); return 0; }, aliasAnalysisFromSchema()) diff --git a/cpp/api/include/trtorch/ptq.h b/cpp/api/include/trtorch/ptq.h index afae26a85c..ce59395b4c 100644 --- a/cpp/api/include/trtorch/ptq.h +++ b/cpp/api/include/trtorch/ptq.h @@ -104,18 +104,17 @@ class Int8Calibrator : Algorithm { std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file(cache_file_path_, std::ios::binary); - cache_file >> std::noskipws; - if (cache_file.good()) { - std::copy(std::istream_iterator<char>(cache_file), - std::istream_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } return nullptr; } @@ -220,23 +219,17 @@ class Int8CacheCalibrator : Algorithm { std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file; - cache_file.open(cache_file_path_, std::ios::in | std::ios::binary); - cache_file.unsetf(std::ios::skipws); - cache_file.seekg(0, std::ios::beg); - cache_.reserve(cache_file.tellg()); - cache_file.seekg(0, std::ios::beg); - if (cache_file.good()) { - std::cout << "Trying to read cache" << std::endl; - std::copy(std::istreambuf_iterator<char>(cache_file), - std::istreambuf_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } diff --git a/cpp/api/include/trtorch/trtorch.h b/cpp/api/include/trtorch/trtorch.h index 9b3f98e355..8f26e0bd8f 100644 --- a/cpp/api/include/trtorch/trtorch.h +++ b/cpp/api/include/trtorch/trtorch.h @@ -142,6 +142,14 @@ struct TRTORCH_API ExtraInfo { * @return false */ constexpr bool operator==(DataType other) const { return value == other.value; } + /** + * @brief Comparision operator for DataType + * + * @param other + * @return true + * @return false + */ + constexpr bool operator==(DataType::Value other) const { return value == other; } /** * @brief Comparision operator for DataType * @@ -150,6 +158,14 @@ struct TRTORCH_API ExtraInfo { * @return false */ constexpr bool operator!=(DataType other) const { return value != other.value; } + /** + * @brief Comparision operator for DataType + * + * @param other + * @return true + * @return false + */ + constexpr bool operator!=(DataType::Value other) const { return value != other; } private: Value value; }; diff --git a/cpp/trtorchc/BUILD b/cpp/trtorchc/BUILD new file mode 100644 index 0000000000..7fa89836f5 --- /dev/null +++ b/cpp/trtorchc/BUILD @@ -0,0 +1,14 @@ +package(default_visibility = ["//visibility:public"]) + +cc_binary( + name = "trtorchc", + srcs = [ + "main.cpp" + ], + deps = [ + "@libtorch//:libtorch", + "@libtorch//:caffe2", + "//third_party/args", + "//cpp/api:trtorch" + ], +) diff --git a/cpp/trtorchc/README.md b/cpp/trtorchc/README.md new file mode 100644 index 0000000000..25a59efb27 --- /dev/null +++ b/cpp/trtorchc/README.md @@ -0,0 +1,87 @@ +# trtorhc + +trtorchc is a compiler CLI application using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against libtrtorch.so +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with `torch.jit.load()` and run like you would run any other module. + + +``` +trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options +``` + +e.g. +``` +trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 +``` \ No newline at end of file diff --git a/cpp/trtorchc/main.cpp b/cpp/trtorchc/main.cpp new file mode 100644 index 0000000000..5dab59a4ea --- /dev/null +++ b/cpp/trtorchc/main.cpp @@ -0,0 +1,366 @@ +#include <iostream> +#include <sstream> +#include <stdlib.h> +#include <unistd.h> + +#ifdef linux +#include <linux/limits.h> +#else +#define PATH_MAX 260 +#endif + +#include "NvInfer.h" +#include "third_party/args/args.hpp" +#include "torch/torch.h" +#include "torch/script.h" +#include "trtorch/trtorch.h" + +bool checkRtol(const at::Tensor& diff, const std::vector<at::Tensor> inputs, float threshold) { + double maxValue = 0.0; + for (auto& tensor : inputs) { + maxValue = fmax(tensor.abs().max().item<float>(), maxValue); + } + trtorch::logging::log(trtorch::logging::Level::kDEBUG, std::string("Max Difference: ") + std::to_string(diff.abs().max().item<float>())); + trtorch::logging::log(trtorch::logging::Level::kDEBUG, std::string("Acceptable Threshold: ") + std::to_string(threshold)); + return diff.abs().max().item<float>() <= threshold * maxValue; +} + +bool almostEqual(const at::Tensor& a, const at::Tensor& b, float threshold) { + return checkRtol(a - b, {a, b}, threshold); +} + +std::vector<int64_t> parseSingleDim(std::string shape_str) { + std::vector<int64_t> shape; + std::stringstream ss; + for (auto c : shape_str) { + if (c == '(' || c == ' ') { + continue; + } else if (c == ',') { + int64_t dim; + ss >> dim; + shape.push_back(dim); + ss.clear(); + } else if (c == ')') { + int64_t dim; + ss >> dim; + shape.push_back(dim); + ss.clear(); + return shape; + } else { + ss << c; + } + } + + trtorch::logging::log(trtorch::logging::Level::kERROR, "Shapes need dimensions delimited by comma in parentheses, \"(N,..,C,H,W)\"\n e.g \"(3,3,200,200)\""); + exit(1); + return {}; +} + +trtorch::ExtraInfo::InputRange parseDynamicDim(std::string shape_str) { + shape_str = shape_str.substr(1, shape_str.size() - 2); + std::vector<std::vector<int64_t>> shape; + std::stringstream ss; + + std::string delimiter = ";"; + + size_t pos = 0; + while ((pos = shape_str.find(delimiter)) != std::string::npos) { + auto token = shape_str.substr(0, pos); + auto range = parseSingleDim(token); + shape_str.erase(0, pos + delimiter.length()); + shape.push_back(range); + } + + auto range = parseSingleDim(shape_str); + shape.push_back(range); + + if (shape.size() != 3) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Dynamic shapes need three sets of dimensions delimited by semi-colons, \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\"\n e.g \"[(3,3,100,100);(3,3,200,200);(3,3,300,300)]\""); + exit(1); + } + + return trtorch::ExtraInfo::InputRange(shape[0], shape[1], shape[2]); +} + +std::string get_cwd() { + char buff[FILENAME_MAX]; //create string buffer to hold path + if (getcwd(buff, FILENAME_MAX)) { + std::string current_working_dir(buff); + return current_working_dir; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Unable to get current directory"); + exit(1); + } +} + +std::string real_path(std::string path) { + auto abs_path = path; + char real_path_c[PATH_MAX]; + char* res = realpath(abs_path.c_str(), real_path_c); + if (res) { + return std::string(real_path_c); + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, std::string("Unable to find file ") + abs_path); + exit(1); + } +} + +std::string resolve_path(std::string path) { + auto rpath = path; + if (!(rpath.rfind("/", 0) == 0)) { + rpath = get_cwd() + '/' + rpath; + } + return rpath; +} + +int main(int argc, char** argv) { + trtorch::logging::set_is_colored_output_on(true); + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kWARNING); + trtorch::logging::set_logging_prefix(""); + + + args::ArgumentParser parser("TRTorch is a compiler for TorchScript, it will compile and optimize TorchScript programs to run on NVIDIA GPUs using TensorRT", ""); + args::HelpFlag help(parser, "help", "Display this help menu", {'h', "help"}); + + args::Group group(parser, "Verbiosity of the compiler", args::Group::Validators::AtMostOne); + args::Flag verbose(group, "verbose", "Dumps debugging information about the compilation process onto the console", {'v', "verbose"}); + args::Flag warning(group, "warning", "Disables warnings generated during compilation onto the console (warnings are on by default)", {'w', "warnings"}); + args::Flag info(group, "info", "Dumps info messages generated during compilation onto the console", {"i", "info"}); + + args::Flag build_debuggable_engine(parser, "build-debuggable-engine", "Creates a debuggable engine", {"build-debuggable-engine"}); + args::Flag use_strict_types(parser, "use-strict-types", "Restrict operating type to only use set default operation precision (op_precision)", {"use-strict-types"}); + args::Flag allow_gpu_fallback(parser, "allow-gpu-fallback", "(Only used when targeting DLA (device-type)) Lets engine run layers on GPU if they are not supported on DLA", {"allow-gpu-fallback"}); + + args::ValueFlag<std::string> op_precision(parser, "precision", "Default operating precision for the engine (Int8 requires a calibration-cache argument) [ float | float32 | f32 | half | float16 | f16 | int8 | i8 ] (default: float)", {'p', "default-op-precision"}); + args::ValueFlag<std::string> device_type(parser, "type", "The type of device the engine should be built for [ gpu | dla ] (default: gpu)", {'d', "device-type"}); + args::ValueFlag<std::string> engine_capability(parser, "capability", "The type of device the engine should be built for [ default | safe_gpu | safe_dla ]", {"engine-capability"}); + + args::ValueFlag<std::string> calibration_cache_file(parser, "file_path", "Path to calibration cache file to use for post training quantization", {"calibration-cache-file"}); + args::ValueFlag<int> num_min_timing_iters(parser, "num_iters", "Number of minimization timing iterations used to select kernels", {"num-min-timing-iter"}); + args::ValueFlag<int> num_avg_timing_iters(parser, "num_iters", "Number of averaging timing iterations used to select kernels", {"num-avg-timing-iters"}); + args::ValueFlag<int> workspace_size(parser, "workspace_size", "Maximum size of workspace given to TensorRT", {"workspace-size"}); + args::ValueFlag<int> max_batch_size(parser, "max_batch_size", "Maximum batch size (must be >= 1 to be set, 0 means not set)", {"max-batch-size"}); + args::ValueFlag<double> threshold(parser, "threshold", "Maximum acceptable numerical deviation from standard torchscript output (default 2e-5)", {'t', "threshold"}); + + + args::Flag save_engine(parser, "save_engine", "Instead of compiling a full a TorchScript program, save the created engine to the path specified as the output path", {"save-engine"}); + args::Positional<std::string> input_path(parser, "input_file_path", "Path to input TorchScript file"); + args::Positional<std::string> output_path(parser, "output_file_path", "Path for compiled TorchScript (or TensorRT engine) file"); + args::PositionalList<std::string> input_shapes(parser, "input_shapes", "Sizes for inputs to engine, can either be a single size or a range defined by Min, Optimal, Max sizes, e.g. \"(N,..,C,H,W)\" \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\""); + + + try + { + parser.ParseCLI(argc, argv); + } + catch (args::Help) + { + std::cout << parser; + return 0; + } + catch (args::ParseError e) + { + std::cerr << e.what() << std::endl; + std::cerr << parser; + return 1; + } + catch (args::ValidationError e) + { + std::cerr << e.what() << std::endl; + std::cerr << parser; + return 1; + } + + if (verbose) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kDEBUG); + } else if (info) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kINFO); + } else if (warning) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kERROR); + } + + + std::vector<trtorch::ExtraInfo::InputRange> ranges; + for (const auto shapes : args::get(input_shapes)) { + if (shapes.rfind("(", 0) == 0) { + ranges.push_back(trtorch::ExtraInfo::InputRange(parseSingleDim(shapes))); + } else if (shapes.rfind("[", 0) == 0) { + ranges.push_back(parseDynamicDim(shapes)); + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Dimensions should be specified in one of these types \"(N,..,C,H,W)\" \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\"\n e.g \"(3,3,300,300)\" \"[(3,3,100,100);(3,3,200,200);(3,3,300,300)]\""); + std::cerr << parser; + exit(1); + } + } + + auto compile_settings = trtorch::ExtraInfo(ranges); + + if (build_debuggable_engine) { + compile_settings.debug = true; + } + + if (use_strict_types) { + compile_settings.strict_types = true; + } + + if (allow_gpu_fallback) { + compile_settings.allow_gpu_fallback = true; + } + + std::string calibration_cache_file_path = ""; + if (calibration_cache_file) { + calibration_cache_file_path = resolve_path(args::get(calibration_cache_file)); + } + + auto calibrator = trtorch::ptq::make_int8_cache_calibrator(calibration_cache_file_path); + + if (op_precision) { + auto precision = args::get(op_precision); + std::transform(precision.begin(), precision.end(), precision.begin(), [](unsigned char c){ return std::tolower(c); }); + if (precision == "float" || precision == "float32" || precision == "f32") { + compile_settings.op_precision = torch::kF32; + } else if (precision == "half" || precision == "float16" || precision == "f16") { + compile_settings.op_precision = torch::kF16; + } else if (precision == "int8" || precision == "i8") { + compile_settings.op_precision = torch::kI8; + if (calibration_cache_file) { + compile_settings.ptq_calibrator = calibrator; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "If targeting INT8 default operating precision with trtorchc, a calibration cache file must be provided"); + std::cerr << parser; + return 1; + } + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid default operating precision, options are [ float | float32 | f32 | half | float16 | f16 | int8 | i8 ]"); + std::cerr << parser; + return 1; + } + } + + if (device_type) { + auto device = args::get(device_type); + std::transform(device.begin(), device.end(), device.begin(), [](unsigned char c){ return std::tolower(c); }); + if (device == "gpu") { + compile_settings.device = trtorch::ExtraInfo::DeviceType::kGPU; + } else if (device == "dla") { + compile_settings.device = trtorch::ExtraInfo::DeviceType::kDLA; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid device type, options are [ gpu | dla ]"); + std::cerr << parser; + return 1; + } + } + + if (engine_capability) { + auto capability = args::get(engine_capability); + std::transform(capability.begin(), capability.end(), capability.begin(), [](unsigned char c){ return std::tolower(c); }); + if (capability == "default") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kDEFAULT; + } else if (capability == "safe_gpu") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kSAFE_GPU; + } else if (capability == "safe_dla") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kSAFE_DLA; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid engine capability, options are [ default | safe_gpu | safe_dla ]"); + std::cerr << parser; + return 1; + } + } + + if (num_min_timing_iters) { + compile_settings.num_min_timing_iters = args::get(num_min_timing_iters); + } + + if (num_avg_timing_iters) { + compile_settings.num_avg_timing_iters = args::get(num_avg_timing_iters); + } + + if (workspace_size) { + compile_settings.workspace_size = args::get(workspace_size); + } + + if (max_batch_size) { + compile_settings.max_batch_size = args::get(max_batch_size); + } + + auto real_input_path = resolve_path(args::get(input_path)); + auto real_output_path = resolve_path(args::get(output_path)); + + torch::jit::Module mod; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + mod = torch::jit::load(real_input_path); + } + catch (const c10::Error& e) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Error loading the model (path may be incorrect)"); + std::cerr << parser; + return 1; + } + + if (!trtorch::CheckMethodOperatorSupport(mod, "forward")) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Module is not currently supported by TRTorch"); + return 1; + } + + if (save_engine) { + auto engine = trtorch::ConvertGraphToTRTEngine(mod, "forward", compile_settings); + std::ofstream out(real_output_path); + out << engine; + out.close(); + } else { + auto trt_mod = trtorch::CompileGraph(mod, compile_settings); + + if (compile_settings.op_precision == trtorch::ExtraInfo::DataType::kFloat) { + double threshold_val = 2e-5; + if (threshold) { + threshold_val = args::get(threshold); + } + + std::vector<torch::jit::IValue> jit_inputs_ivalues; + std::vector<torch::jit::IValue> trt_inputs_ivalues; + + for (auto i : ranges) { + auto in = at::randn(i.opt, {at::kCUDA}); + jit_inputs_ivalues.push_back(in.clone()); + trt_inputs_ivalues.push_back(in.clone()); + } + + torch::jit::IValue jit_results_ivalues = mod.forward(jit_inputs_ivalues); + std::vector<at::Tensor> jit_results; + if (jit_results_ivalues.isTensor()) { + jit_results.push_back(jit_results_ivalues.toTensor()); + } else { + auto results = jit_results_ivalues.toTuple()->elements(); + for (auto r : results) { + jit_results.push_back(r.toTensor()); + } + } + + + torch::jit::IValue trt_results_ivalues = trt_mod.forward(trt_inputs_ivalues); + std::vector<at::Tensor> trt_results; + if (trt_results_ivalues.isTensor()) { + trt_results.push_back(trt_results_ivalues.toTensor()); + } else { + auto results = trt_results_ivalues.toTuple()->elements(); + for (auto r : results) { + trt_results.push_back(r.toTensor()); + } + } + + for (size_t i = 0; i < trt_results.size(); i++) { + if (!almostEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), threshold_val)) { + std::ostringstream threshold_ss; + threshold_ss << threshold_val; + trtorch::logging::log(trtorch::logging::Level::kWARNING, std::string("Maximum numerical deviation for output exceeds set threshold (") + threshold_ss.str() + std::string(")")); + } + } + } else { + trtorch::logging::log(trtorch::logging::Level::kWARNING, "Due to change in operating data type, numerical precision is not checked"); + } + + trt_mod.save(real_output_path); + } + + return 0; +} \ No newline at end of file diff --git a/cpp/trtorchexec/main.cpp b/cpp/trtorchexec/main.cpp index 2085928b6f..8b3e114e62 100644 --- a/cpp/trtorchexec/main.cpp +++ b/cpp/trtorchexec/main.cpp @@ -38,6 +38,7 @@ int main(int argc, const char* argv[]) { } mod.to(at::kCUDA); + mod.eval(); std::vector<std::vector<int64_t>> dims; for (int i = 2; i < argc; i++) { @@ -92,7 +93,7 @@ int main(int argc, const char* argv[]) { std::cout << "Running TRT module" << std::endl; torch::jit::IValue trt_results_ivalues = trt_mod.forward(trt_inputs_ivalues); std::vector<at::Tensor> trt_results; - if (trt_results_ivalues.isTensor()) { + if (trt_results_ivalues.isTensor()) { trt_results.push_back(trt_results_ivalues.toTensor()); } else { auto results = trt_results_ivalues.toTuple()->elements(); @@ -106,5 +107,8 @@ int main(int argc, const char* argv[]) { } std::cout << "Converted Engine saved to /tmp/engine_converted_from_jit.trt" << std::endl; + + trt_mod.save("/tmp/ts_trt.ts"); + std::cout << "Compiled TorchScript program saved to /tmp/ts_trt.ts" << std::endl; std::cout << "ok\n"; } diff --git a/docs/._index.html b/docs/._index.html new file mode 100644 index 0000000000..e9528f4621 Binary files /dev/null and b/docs/._index.html differ diff --git a/docs/_cpp_api/class_view_hierarchy.html b/docs/_cpp_api/class_view_hierarchy.html index fbdc65da2f..f85121680e 100644 --- a/docs/_cpp_api/class_view_hierarchy.html +++ b/docs/_cpp_api/class_view_hierarchy.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html index 3bb12ab4c5..c87d34fbc9 100644 --- a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html +++ b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -1029,6 +1034,102 @@ <h2 id="class-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::eq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1a61d6f3e6a3929edec1d3659330b8297d"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator== + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <a class="headerlink" href="#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE" title="Permalink to this definition"> + ¶ + </a> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="structtrtorch_1_1ExtraInfo.html#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> <dl class="cpp function"> <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneE8DataType"> @@ -1121,6 +1222,102 @@ <h2 id="class-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::neq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1afac08806f8da094821031a3bf0ee5fa7"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator!= + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <a class="headerlink" href="#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE" title="Permalink to this definition"> + ¶ + </a> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="structtrtorch_1_1ExtraInfo.html#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> </div> </dd> </dl> diff --git a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html index 2448917b3b..9307df8de1 100644 --- a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html +++ b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html index f597b95aab..034a7ad417 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html index fa3ae5e34f..b116598b13 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html index b60ed74d7c..baab8f4257 100644 --- a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html +++ b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html b/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html index 26e7ea5e09..4343e11651 100644 --- a/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html +++ b/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html b/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html index 86ad8f94e7..3d633d42c2 100644 --- a/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html +++ b/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html b/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html index 17cd191e37..e66b846828 100644 --- a/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html +++ b/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html b/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html index 92e0b92577..70bab59633 100644 --- a/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html +++ b/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html b/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html index f7af8b6d00..cface47f06 100644 --- a/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html +++ b/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html index adffa0adbb..c235e0d9f1 100644 --- a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html +++ b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html @@ -300,6 +300,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html b/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html index a37e731f3d..71be6933d1 100644 --- a/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html +++ b/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp.html b/docs/_cpp_api/dir_cpp.html index c9e4c2ea38..aa3f7ff7a7 100644 --- a/docs/_cpp_api/dir_cpp.html +++ b/docs/_cpp_api/dir_cpp.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api.html b/docs/_cpp_api/dir_cpp_api.html index 7fbc4e9dc3..347bd9aec6 100644 --- a/docs/_cpp_api/dir_cpp_api.html +++ b/docs/_cpp_api/dir_cpp_api.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api_include.html b/docs/_cpp_api/dir_cpp_api_include.html index 5e429a64c4..d0dfe41b75 100644 --- a/docs/_cpp_api/dir_cpp_api_include.html +++ b/docs/_cpp_api/dir_cpp_api_include.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api_include_trtorch.html b/docs/_cpp_api/dir_cpp_api_include_trtorch.html index 21764a1793..f62d859e63 100644 --- a/docs/_cpp_api/dir_cpp_api_include_trtorch.html +++ b/docs/_cpp_api/dir_cpp_api_include_trtorch.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html b/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html index 542e81728f..129e059169 100644 --- a/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html +++ b/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html index 31065539ec..04ade4a022 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -666,6 +671,15 @@ <h2 id="included-by"> </a> </h2> <ul class="simple"> + <li> + <p> + <a class="reference internal" href="file_cpp_api_include_trtorch_ptq.h.html#file-cpp-api-include-trtorch-ptq-h"> + <span class="std std-ref"> + File ptq.h + </span> + </a> + </p> + </li> <li> <p> <a class="reference internal" href="file_cpp_api_include_trtorch_trtorch.h.html#file-cpp-api-include-trtorch-trtorch-h"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html index 15a55ee514..08496d4df3 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html index 4fa2f78fe5..2523a931e6 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -657,6 +662,22 @@ <h2 id="includes"> </code> </p> </li> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + trtorch/logging.h + </span> + </code> + ( + <a class="reference internal" href="file_cpp_api_include_trtorch_logging.h.html#file-cpp-api-include-trtorch-logging-h"> + <span class="std std-ref"> + File logging.h + </span> + </a> + ) + </p> + </li> <li> <p> <code class="docutils literal notranslate"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html index 195f6874ff..aebe61a64f 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_view_hierarchy.html b/docs/_cpp_api/file_view_hierarchy.html index 7032b84fb8..c78cea8757 100644 --- a/docs/_cpp_api/file_view_hierarchy.html +++ b/docs/_cpp_api/file_view_hierarchy.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html b/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html index 0f8843cbdd..05e7dc9113 100644 --- a/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html +++ b/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html b/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html index 114f9718ad..64a730c483 100644 --- a/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html +++ b/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html b/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html index d380324c31..24130faded 100644 --- a/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html +++ b/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html b/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html index 88d8902db6..1691a73dcf 100644 --- a/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html +++ b/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html b/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html index 985f30ec38..f2768d2b4c 100644 --- a/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html +++ b/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html b/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html index c5e37b48fc..f939b71ee0 100644 --- a/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html +++ b/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html b/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html index 67a76c7ef1..0f0febae9b 100644 --- a/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html +++ b/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html b/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html index 604ba98513..a732856cd5 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html +++ b/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html b/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html index 5cd9ff7479..7a4cd7291b 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html +++ b/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html b/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html index f415c4dbbb..4a3777ac7e 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html +++ b/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html b/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html index 98ee58c07b..b64edced5a 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html +++ b/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html b/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html index 11de089949..445ad9224a 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html +++ b/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html b/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html index a5b0c66cc7..a75b7ee703 100644 --- a/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html +++ b/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html b/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html index e8d434f4f4..59793d3c32 100644 --- a/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html +++ b/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch.html b/docs/_cpp_api/namespace_trtorch.html index bae0f7be6c..2a12108e0e 100644 --- a/docs/_cpp_api/namespace_trtorch.html +++ b/docs/_cpp_api/namespace_trtorch.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch__logging.html b/docs/_cpp_api/namespace_trtorch__logging.html index 4a625f5b9e..4582223721 100644 --- a/docs/_cpp_api/namespace_trtorch__logging.html +++ b/docs/_cpp_api/namespace_trtorch__logging.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch__ptq.html b/docs/_cpp_api/namespace_trtorch__ptq.html index a31b22746d..05e742a6c4 100644 --- a/docs/_cpp_api/namespace_trtorch__ptq.html +++ b/docs/_cpp_api/namespace_trtorch__ptq.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html index 92d75d8ea7..51289e027e 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html index 7873a19d40..1a259290fd 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html index 9eeffec98d..a6f6a84e77 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -512,6 +517,8 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="cp">#include</span> <span class="cpf">&lt;iostream&gt;</span><span class="cp"></span> <span class="cp">#include</span> <span class="cpf">&lt;sstream&gt;</span><span class="cp"></span> +<span class="cp">#include</span> <span class="cpf">"trtorch/logging.h"</span><span class="cp"></span> + <span class="cp">#ifndef DOXYGEN_SHOULD_SKIP_THIS</span> <span class="k">namespace</span> <span class="n">nvinfer1</span> <span class="p">{</span> <span class="k">class</span> <span class="nc">IInt8Calibrator</span><span class="p">;</span> @@ -519,9 +526,12 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="p">}</span> <span class="k">namespace</span> <span class="n">torch</span> <span class="p">{</span> -<span class="k">namespace</span> <span class="n">data</span> <span class="p">{</span> -<span class="k">template</span><span class="o">&lt;</span><span class="k">typename</span> <span class="n">Example</span><span class="o">&gt;</span> -<span class="k">class</span> <span class="nc">Iterator</span><span class="p">;</span> +<span class="k">class</span> <span class="nc">Tensor</span><span class="p">;</span> +<span class="p">}</span> + +<span class="k">namespace</span> <span class="n">trtorch</span> <span class="p">{</span> +<span class="k">namespace</span> <span class="n">ptq</span> <span class="p">{</span> +<span class="kt">bool</span> <span class="n">get_batch_impl</span><span class="p">(</span><span class="kt">void</span><span class="o">*</span> <span class="n">bindings</span><span class="p">[],</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">names</span><span class="p">[],</span> <span class="kt">int</span> <span class="n">nbBindings</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&amp;</span> <span class="n">data</span><span class="p">);</span> <span class="p">}</span> <span class="p">}</span> <span class="cp">#endif </span><span class="c1">//DOXYGEN_SHOULD_SKIP_THIS</span> @@ -535,7 +545,12 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="k">using</span> <span class="n">Batch</span> <span class="o">=</span> <span class="k">typename</span> <span class="n">DataLoader</span><span class="o">::</span><span class="n">super</span><span class="o">::</span><span class="n">BatchType</span><span class="p">;</span> <span class="k">public</span><span class="o">:</span> <span class="n">Int8Calibrator</span><span class="p">(</span><span class="n">DataLoaderUniquePtr</span> <span class="n">dataloader</span><span class="p">,</span> <span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span><span class="o">&amp;</span> <span class="n">cache_file_path</span><span class="p">,</span> <span class="kt">bool</span> <span class="n">use_cache</span><span class="p">)</span> - <span class="o">:</span> <span class="n">dataloader_</span><span class="p">(</span><span class="n">dataloader</span><span class="p">.</span><span class="n">get</span><span class="p">()),</span> <span class="n">it_</span><span class="p">(</span><span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">end</span><span class="p">()),</span> <span class="n">cache_file_path_</span><span class="p">(</span><span class="n">cache_file_path</span><span class="p">),</span> <span class="n">use_cache_</span><span class="p">(</span><span class="n">use_cache</span><span class="p">)</span> <span class="p">{}</span> + <span class="o">:</span> <span class="n">dataloader_</span><span class="p">(</span><span class="n">dataloader</span><span class="p">.</span><span class="n">get</span><span class="p">()),</span> <span class="n">cache_file_path_</span><span class="p">(</span><span class="n">cache_file_path</span><span class="p">),</span> <span class="n">use_cache_</span><span class="p">(</span><span class="n">use_cache</span><span class="p">)</span> <span class="p">{</span> + <span class="k">for</span> <span class="p">(</span><span class="k">auto</span> <span class="nl">batch</span> <span class="p">:</span> <span class="o">*</span><span class="n">dataloader_</span><span class="p">)</span> <span class="p">{</span> + <span class="n">batched_data_</span><span class="p">.</span><span class="n">push_back</span><span class="p">(</span><span class="n">batch</span><span class="p">.</span><span class="n">data</span><span class="p">);</span> + <span class="p">}</span> + <span class="n">it_</span> <span class="o">=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">begin</span><span class="p">();</span> + <span class="p">}</span> <span class="kt">int</span> <span class="n">getBatchSize</span><span class="p">()</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span> <span class="c1">// HACK: TRTorch only uses explict batch sizing, INT8 Calibrator does not</span> @@ -546,26 +561,15 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="p">}</span> <span class="kt">bool</span> <span class="n">getBatch</span><span class="p">(</span><span class="kt">void</span><span class="o">*</span> <span class="n">bindings</span><span class="p">[],</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">names</span><span class="p">[],</span> <span class="kt">int</span> <span class="n">nbBindings</span><span class="p">)</span> <span class="k">override</span> <span class="p">{</span> - <span class="c1">// HACK: doesnt seem like the first try in the initializer list works</span> - <span class="k">if</span> <span class="p">(</span><span class="o">!</span> <span class="n">it_created_</span><span class="p">)</span> <span class="p">{</span> - <span class="n">it_</span> <span class="o">=</span> <span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">begin</span><span class="p">();</span> - <span class="n">it_created_</span> <span class="o">=</span> <span class="nb">true</span><span class="p">;</span> - <span class="p">}</span> - - <span class="k">if</span> <span class="p">(</span><span class="n">it_</span> <span class="o">==</span> <span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">end</span><span class="p">())</span> <span class="p">{</span> + <span class="k">if</span> <span class="p">(</span><span class="n">it_</span> <span class="o">!=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">end</span><span class="p">())</span> <span class="p">{</span> + <span class="k">auto</span> <span class="n">status</span> <span class="o">=</span> <span class="n">get_batch_impl</span><span class="p">(</span><span class="n">bindings</span><span class="p">,</span> <span class="n">names</span><span class="p">,</span> <span class="n">nbBindings</span><span class="p">,</span> <span class="o">*</span><span class="n">it_</span><span class="p">);</span> + <span class="n">it_</span> <span class="o">=</span> <span class="o">++</span><span class="n">it_</span><span class="p">;</span> + <span class="k">return</span> <span class="n">status</span><span class="p">;</span> + <span class="p">}</span> <span class="k">else</span> <span class="p">{</span> + <span class="c1">// Reset iterator if incase calibrator is going to be used again</span> + <span class="n">it_</span> <span class="o">=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">begin</span><span class="p">();</span> <span class="k">return</span> <span class="nb">false</span><span class="p">;</span> <span class="p">}</span> - - <span class="k">auto</span> <span class="n">batch</span> <span class="o">=</span> <span class="o">*</span><span class="n">it_</span><span class="p">;</span> - - <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">nbBindings</span><span class="p">;</span> <span class="n">i</span><span class="o">++</span><span class="p">)</span> <span class="p">{</span> - <span class="k">auto</span> <span class="n">data</span> <span class="o">=</span> <span class="n">batch</span><span class="p">.</span><span class="n">data</span><span class="p">;</span> - <span class="n">data</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="n">to</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">kCUDA</span><span class="p">).</span><span class="n">contiguous</span><span class="p">();</span> - <span class="n">bindings</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="n">data_ptr</span><span class="p">();</span> - <span class="p">}</span> - - <span class="n">it_</span> <span class="o">=</span> <span class="o">++</span><span class="n">it_</span><span class="p">;</span> - <span class="k">return</span> <span class="nb">true</span><span class="p">;</span> <span class="p">}</span> <span class="k">const</span> <span class="kt">void</span><span class="o">*</span> <span class="n">readCalibrationCache</span><span class="p">(</span><span class="kt">size_t</span><span class="o">&amp;</span> <span class="n">length</span><span class="p">)</span> <span class="k">override</span> <span class="p">{</span> @@ -573,18 +577,17 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="n">std</span><span class="o">::</span><span class="n">stringstream</span> <span class="n">ss</span><span class="p">;</span> <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Reading Calibration Cache from "</span> <span class="o">&lt;&lt;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kINFO</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">cache_</span><span class="p">.</span><span class="n">clear</span><span class="p">();</span> - <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">cache_file</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> - <span class="n">cache_file</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> - <span class="k">if</span> <span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> - <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">cache_file</span><span class="p">),</span> - <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> - <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> - <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Cache read"</span><span class="p">;</span> - <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">input</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> + <span class="n">input</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> + <span class="k">if</span> <span class="p">(</span><span class="n">input</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">input</span><span class="p">),</span> <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> + <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> + <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="s">"Cache read"</span><span class="p">);</span> <span class="p">}</span> - <span class="n">cache_size_</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> - <span class="k">return</span> <span class="n">cache_size_</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> + <span class="n">length</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> + <span class="k">return</span> <span class="n">length</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> <span class="k">return</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> @@ -603,12 +606,13 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="k">private</span><span class="o">:</span> <span class="n">DataLoader</span><span class="o">*</span> <span class="n">dataloader_</span><span class="p">;</span> - <span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">Iterator</span><span class="o">&lt;</span><span class="n">Batch</span><span class="o">&gt;</span> <span class="n">it_</span><span class="p">;</span> <span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span><span class="o">&amp;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="kt">size_t</span> <span class="n">cache_size_</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="kt">bool</span> <span class="n">use_cache_</span><span class="p">;</span> <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span> <span class="n">cache_</span><span class="p">;</span> - <span class="kt">bool</span> <span class="n">it_created_</span> <span class="o">=</span> <span class="nb">false</span><span class="p">;</span> + <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&gt;</span> <span class="n">batched_data_</span><span class="p">;</span> + <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&gt;::</span><span class="n">iterator</span> <span class="n">it_</span><span class="p">;</span> + <span class="p">};</span> <span class="k">template</span><span class="o">&lt;</span><span class="k">typename</span> <span class="n">Algorithm</span><span class="o">&gt;</span> @@ -632,23 +636,17 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="n">std</span><span class="o">::</span><span class="n">stringstream</span> <span class="n">ss</span><span class="p">;</span> <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Reading Calibration Cache from "</span> <span class="o">&lt;&lt;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kINFO</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">cache_</span><span class="p">.</span><span class="n">clear</span><span class="p">();</span> - <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">cache_file</span><span class="p">;</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">open</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">in</span> <span class="o">|</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">unsetf</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">skipws</span><span class="p">);</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">seekg</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">beg</span><span class="p">);</span> - <span class="n">cache_</span><span class="p">.</span><span class="n">reserve</span><span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">tellg</span><span class="p">());</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">seekg</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">beg</span><span class="p">);</span> - <span class="k">if</span> <span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> - <span class="n">std</span><span class="o">::</span><span class="n">cout</span> <span class="o">&lt;&lt;</span> <span class="s">"Trying to read cache"</span> <span class="o">&lt;&lt;</span> <span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span> - <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istreambuf_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">cache_file</span><span class="p">),</span> - <span class="n">std</span><span class="o">::</span><span class="n">istreambuf_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> - <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> - <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Cache read"</span><span class="p">;</span> - <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">input</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> + <span class="n">input</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> + <span class="k">if</span> <span class="p">(</span><span class="n">input</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">input</span><span class="p">),</span> <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> + <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> + <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="s">"Cache read"</span><span class="p">);</span> <span class="p">}</span> - <span class="n">cache_size_</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> - <span class="k">return</span> <span class="n">cache_size_</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> + <span class="n">length</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> + <span class="k">return</span> <span class="n">length</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html index 1184e3258d..c85c92f209 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -568,7 +573,9 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-trtorch-h--page-roo <span class="k">operator</span> <span class="nf">Value</span><span class="p">()</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span><span class="p">;</span> <span class="p">}</span> <span class="k">explicit</span> <span class="k">operator</span> <span class="nf">bool</span><span class="p">()</span> <span class="o">=</span> <span class="k">delete</span><span class="p">;</span> <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">==</span><span class="p">(</span><span class="n">DataType</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">==</span> <span class="n">other</span><span class="p">.</span><span class="n">value</span><span class="p">;</span> <span class="p">}</span> + <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">==</span><span class="p">(</span><span class="n">DataType</span><span class="o">::</span><span class="n">Value</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">==</span> <span class="n">other</span><span class="p">;</span> <span class="p">}</span> <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">!=</span><span class="p">(</span><span class="n">DataType</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">!=</span> <span class="n">other</span><span class="p">.</span><span class="n">value</span><span class="p">;</span> <span class="p">}</span> + <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">!=</span><span class="p">(</span><span class="n">DataType</span><span class="o">::</span><span class="n">Value</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">!=</span> <span class="n">other</span><span class="p">;</span> <span class="p">}</span> <span class="k">private</span><span class="o">:</span> <span class="n">Value</span> <span class="n">value</span><span class="p">;</span> <span class="p">};</span> diff --git a/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html b/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html index e5719a16e4..e98a7cedbd 100644 --- a/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html +++ b/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -955,7 +960,10 @@ <h2 id="struct-documentation"> <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> DataType </a> - ::kFloat + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE" title="trtorch::ExtraInfo::DataType::kFloat"> + kFloat + </a> <a class="headerlink" href="#_CPPv4N7trtorch9ExtraInfo12op_precisionE" title="Permalink to this definition"> ¶ </a> @@ -1091,7 +1099,10 @@ <h2 id="struct-documentation"> <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DeviceType.html#_CPPv4N7trtorch9ExtraInfo10DeviceTypeE" title="trtorch::ExtraInfo::DeviceType"> DeviceType </a> - ::kGPU + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DeviceType.html#_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE" title="trtorch::ExtraInfo::DeviceType::kGPU"> + kGPU + </a> <a class="headerlink" href="#_CPPv4N7trtorch9ExtraInfo6deviceE" title="Permalink to this definition"> ¶ </a> @@ -1707,6 +1718,99 @@ <h2 id="struct-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::eq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1a61d6f3e6a3929edec1d3659330b8297d"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator== + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> <dl class="cpp function"> <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneE8DataType"> @@ -1796,6 +1900,99 @@ <h2 id="struct-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::neq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1afac08806f8da094821031a3bf0ee5fa7"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator!= + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> </div> </dd> </dl> diff --git a/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html b/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html index 8b58f44947..569b00db96 100644 --- a/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html +++ b/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/trtorch_cpp.html b/docs/_cpp_api/trtorch_cpp.html index f23f54243e..44e285ede8 100644 --- a/docs/_cpp_api/trtorch_cpp.html +++ b/docs/_cpp_api/trtorch_cpp.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/unabridged_api.html b/docs/_cpp_api/unabridged_api.html index f862c086d1..96a93493a0 100644 --- a/docs/_cpp_api/unabridged_api.html +++ b/docs/_cpp_api/unabridged_api.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/unabridged_orphan.html b/docs/_cpp_api/unabridged_orphan.html index cc96ebf58c..554cd70624 100644 --- a/docs/_cpp_api/unabridged_orphan.html +++ b/docs/_cpp_api/unabridged_orphan.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt index bec2619937..1ca71e13ce 100644 --- a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt +++ b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt @@ -39,6 +39,8 @@ Included By ----------- +- :ref:`file_cpp_api_include_trtorch_ptq.h` + - :ref:`file_cpp_api_include_trtorch_trtorch.h` diff --git a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt index ff8e4dacc1..a5f33139f7 100644 --- a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt +++ b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt @@ -37,6 +37,8 @@ Includes - ``string`` +- ``trtorch/logging.h`` (:ref:`file_cpp_api_include_trtorch_logging.h`) + - ``vector`` diff --git a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt index 93cefc6c66..6d02e502f4 100644 --- a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt +++ b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt @@ -18,6 +18,8 @@ Program Listing for File ptq.h #include <iostream> #include <sstream> + #include "trtorch/logging.h" + #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace nvinfer1 { class IInt8Calibrator; @@ -25,9 +27,12 @@ Program Listing for File ptq.h } namespace torch { - namespace data { - template<typename Example> - class Iterator; + class Tensor; + } + + namespace trtorch { + namespace ptq { + bool get_batch_impl(void* bindings[], const char* names[], int nbBindings, torch::Tensor& data); } } #endif //DOXYGEN_SHOULD_SKIP_THIS @@ -41,7 +46,12 @@ Program Listing for File ptq.h using Batch = typename DataLoader::super::BatchType; public: Int8Calibrator(DataLoaderUniquePtr dataloader, const std::string& cache_file_path, bool use_cache) - : dataloader_(dataloader.get()), it_(dataloader_->end()), cache_file_path_(cache_file_path), use_cache_(use_cache) {} + : dataloader_(dataloader.get()), cache_file_path_(cache_file_path), use_cache_(use_cache) { + for (auto batch : *dataloader_) { + batched_data_.push_back(batch.data); + } + it_ = batched_data_.begin(); + } int getBatchSize() const override { // HACK: TRTorch only uses explict batch sizing, INT8 Calibrator does not @@ -52,26 +62,15 @@ Program Listing for File ptq.h } bool getBatch(void* bindings[], const char* names[], int nbBindings) override { - // HACK: doesnt seem like the first try in the initializer list works - if (! it_created_) { - it_ = dataloader_->begin(); - it_created_ = true; - } - - if (it_ == dataloader_->end()) { + if (it_ != batched_data_.end()) { + auto status = get_batch_impl(bindings, names, nbBindings, *it_); + it_ = ++it_; + return status; + } else { + // Reset iterator if incase calibrator is going to be used again + it_ = batched_data_.begin(); return false; } - - auto batch = *it_; - - for (int i = 0; i < nbBindings; i++) { - auto data = batch.data; - data = data.to(at::kCUDA).contiguous(); - bindings[i] = data.data_ptr(); - } - - it_ = ++it_; - return true; } const void* readCalibrationCache(size_t& length) override { @@ -79,18 +78,17 @@ Program Listing for File ptq.h std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file(cache_file_path_, std::ios::binary); - cache_file >> std::noskipws; - if (cache_file.good()) { - std::copy(std::istream_iterator<char>(cache_file), - std::istream_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } return nullptr; } @@ -109,12 +107,13 @@ Program Listing for File ptq.h private: DataLoader* dataloader_; - torch::data::Iterator<Batch> it_; const std::string& cache_file_path_; size_t cache_size_ = 0; bool use_cache_; std::vector<char> cache_; - bool it_created_ = false; + std::vector<torch::Tensor> batched_data_; + std::vector<torch::Tensor>::iterator it_; + }; template<typename Algorithm> @@ -138,23 +137,17 @@ Program Listing for File ptq.h std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file; - cache_file.open(cache_file_path_, std::ios::in | std::ios::binary); - cache_file.unsetf(std::ios::skipws); - cache_file.seekg(0, std::ios::beg); - cache_.reserve(cache_file.tellg()); - cache_file.seekg(0, std::ios::beg); - if (cache_file.good()) { - std::cout << "Trying to read cache" << std::endl; - std::copy(std::istreambuf_iterator<char>(cache_file), - std::istreambuf_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } diff --git a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt index 14d26d87b9..fa356d80b0 100644 --- a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt +++ b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt @@ -74,7 +74,9 @@ Program Listing for File trtorch.h operator Value() const { return value; } explicit operator bool() = delete; constexpr bool operator==(DataType other) const { return value == other.value; } + constexpr bool operator==(DataType::Value other) const { return value == other; } constexpr bool operator!=(DataType other) const { return value != other.value; } + constexpr bool operator!=(DataType::Value other) const { return value != other; } private: Value value; }; diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt index 5255135f58..45a1610b49 100644 --- a/docs/_sources/index.rst.txt +++ b/docs/_sources/index.rst.txt @@ -23,15 +23,18 @@ Getting Started * :ref:`installation` * :ref:`getting_started` * :ref:`ptq` +* :ref:`trtorchc` + .. toctree:: :caption: Getting Started - :maxdepth: 2 + :maxdepth: 1 :hidden: tutorials/installation tutorials/getting_started tutorials/ptq + tutorials/trtorchc Contributor Documentation -------------------------------- diff --git a/docs/_sources/tutorials/getting_started.rst.txt b/docs/_sources/tutorials/getting_started.rst.txt index 0d133a7eab..45c08b8637 100644 --- a/docs/_sources/tutorials/getting_started.rst.txt +++ b/docs/_sources/tutorials/getting_started.rst.txt @@ -130,7 +130,8 @@ To compile your TorchScript module with TRTorch, all you need to do is provide t to TRTorch and you will be returned an optimized TorchScript module to run or add into another PyTorch module. The only required setting is the input size or input range which is defined as a list of either list types like ``lists``, ``tuples`` or PyTorch ``size`` objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import ``trtorch``. .. code-block:: python @@ -152,6 +153,17 @@ operating precision for the engine or target device. input_data = input_data.half() result = trt_ts_module(input_data) + torch.jit.save(trt_ts_module, "trt_ts_module.ts") + +.. code-block:: python + + # Deployment application + import torch + import trtorch + + trt_ts_module = torch.jit.load("trt_ts_module.ts") + input_data = input_data.half() + result = trt_ts_module(input_data) .. _ts_in_cc: @@ -251,7 +263,35 @@ We can also set settings like operating precision to run in FP16. auto trt_mod = trtorch::CompileGraph(mod, info); auto out = trt_mod.forward({in}); -And now we are running the module in FP16 precision. +And now we are running the module in FP16 precision. You can then save the module to load later. + +.. code-block:: c++ + + trt_mod.save("<PATH TO SAVED TRT/TS MOD>") + +TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against ``libtrtorch.so`` + +.. code-block:: c++ + + #include "torch/script.h" + #include "trtorch/trtorch.h" + + int main(int argc, const char* argv[]) { + torch::jit::Module module; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + module = torch::jit::load("<PATH TO SAVED TRT/TS MOD>"); + } + catch (const c10::Error& e) { + std::cerr << "error loading the model\n"; + return -1; + } + + torch::Tensor in = torch::randn({1, 1, 32, 32}, torch::kCUDA); + auto out = mod.forward(in); + + std::cout << "ok\n"; + } If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the ``ConvertGraphToTRTEngine`` API. diff --git a/docs/_sources/tutorials/trtorchc.rst.txt b/docs/_sources/tutorials/trtorchc.rst.txt new file mode 100644 index 0000000000..5561ee86ed --- /dev/null +++ b/docs/_sources/tutorials/trtorchc.rst.txt @@ -0,0 +1,91 @@ +.. _trtorchc: + +trtorchc +================================= + +``trtorchc`` is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against ``libtrtorch.so`` +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with ``torch.jit.load()`` and run like you would run any other module. + +.. code-block:: txt + + trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options + + +e.g. + +.. code-block:: txt + + trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 diff --git a/docs/contributors/conversion.html b/docs/contributors/conversion.html index 078214b750..3249c81d4f 100644 --- a/docs/contributors/conversion.html +++ b/docs/contributors/conversion.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/execution.html b/docs/contributors/execution.html index 9b77950c3b..b80c618615 100644 --- a/docs/contributors/execution.html +++ b/docs/contributors/execution.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/lowering.html b/docs/contributors/lowering.html index 9d3728b25d..ee974b61a2 100644 --- a/docs/contributors/lowering.html +++ b/docs/contributors/lowering.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/phases.html b/docs/contributors/phases.html index f5e355f36b..1583f5200b 100644 --- a/docs/contributors/phases.html +++ b/docs/contributors/phases.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/system_overview.html b/docs/contributors/system_overview.html index 53e8c4bbdd..05f7f760fc 100644 --- a/docs/contributors/system_overview.html +++ b/docs/contributors/system_overview.html @@ -57,7 +57,7 @@ <link href="../genindex.html" rel="index" title="Index"/> <link href="../search.html" rel="search" title="Search"/> <link href="lowering.html" rel="next" title="Lowering Phase"/> - <link href="../tutorials/ptq.html" rel="prev" title="Post Training Quantization (PTQ)"/> + <link href="../tutorials/trtorchc.html" rel="prev" title="trtorchc"/> </head> <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> <svg class="md-svg"> @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -695,7 +700,7 @@ <h3 id="execution"> <footer class="md-footer"> <div class="md-footer-nav"> <nav class="md-footer-nav__inner md-grid"> - <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="../tutorials/ptq.html" rel="prev" title="Post Training Quantization (PTQ)"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="../tutorials/trtorchc.html" rel="prev" title="trtorchc"> <div class="md-flex__cell md-flex__cell--shrink"> <i class="md-icon md-icon--arrow-back md-footer-nav__button"> </i> @@ -705,7 +710,7 @@ <h3 id="execution"> <span class="md-footer-nav__direction"> Previous </span> - Post Training Quantization (PTQ) + trtorchc </span> </div> </a> diff --git a/docs/contributors/useful_links.html b/docs/contributors/useful_links.html index 9277c3b0d7..8352444c8a 100644 --- a/docs/contributors/useful_links.html +++ b/docs/contributors/useful_links.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/writing_converters.html b/docs/contributors/writing_converters.html index c3f4fdce29..3bfa302d38 100644 --- a/docs/contributors/writing_converters.html +++ b/docs/contributors/writing_converters.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/genindex.html b/docs/genindex.html index a5c96be619..088b5a35ce 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -1000,18 +1005,34 @@ <h2 id="T"> trtorch::ExtraInfo::DataType::operator!= (C++ function) </a> , - <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> + <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> [1] </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> + [2] + </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + [3] + </a> </li> <li> <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> trtorch::ExtraInfo::DataType::operator== (C++ function) </a> , - <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> + <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> [1] </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> + [2] + </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + [3] + </a> </li> <li> <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"> diff --git a/docs/index.html b/docs/index.html index 6eaea7c19f..2cb263f129 100644 --- a/docs/index.html +++ b/docs/index.html @@ -295,6 +295,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -593,6 +598,15 @@ <h2 id="getting-started"> </a> </p> </li> + <li> + <p> + <a class="reference internal" href="tutorials/trtorchc.html#trtorchc"> + <span class="std std-ref"> + trtorchc + </span> + </a> + </p> + </li> </ul> <div class="toctree-wrapper compound"> </div> diff --git a/docs/objects.inv b/docs/objects.inv index 2b21b404b1..895a3e0b2e 100644 Binary files a/docs/objects.inv and b/docs/objects.inv differ diff --git a/docs/py-modindex.html b/docs/py-modindex.html index 0a746c3fd4..f67e5422e0 100644 --- a/docs/py-modindex.html +++ b/docs/py-modindex.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/py_api/logging.html b/docs/py_api/logging.html index f264fabe08..a19e7f27a4 100644 --- a/docs/py_api/logging.html +++ b/docs/py_api/logging.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/py_api/trtorch.html b/docs/py_api/trtorch.html index 2ebeabd12d..1d1bb164cb 100644 --- a/docs/py_api/trtorch.html +++ b/docs/py_api/trtorch.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/search.html b/docs/search.html index 072ef595d6..a058f1d279 100644 --- a/docs/search.html +++ b/docs/search.html @@ -298,6 +298,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/searchindex.js b/docs/searchindex.js index 85e7236e9b..34d173a0d1 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["_cpp_api/class_view_hierarchy","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84","_cpp_api/dir_cpp","_cpp_api/dir_cpp_api","_cpp_api/dir_cpp_api_include","_cpp_api/dir_cpp_api_include_trtorch","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4","_cpp_api/file_cpp_api_include_trtorch_logging.h","_cpp_api/file_cpp_api_include_trtorch_macros.h","_cpp_api/file_cpp_api_include_trtorch_ptq.h","_cpp_api/file_cpp_api_include_trtorch_trtorch.h","_cpp_api/file_view_hierarchy","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5","_cpp_api/namespace_trtorch","_cpp_api/namespace_trtorch__logging","_cpp_api/namespace_trtorch__ptq","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h","_cpp_api/structtrtorch_1_1ExtraInfo","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange","_cpp_api/trtorch_cpp","_cpp_api/unabridged_api","_cpp_api/unabridged_orphan","contributors/conversion","contributors/execution","contributors/lowering","contributors/phases","contributors/system_overview","contributors/useful_links","contributors/writing_converters","index","py_api/logging","py_api/trtorch","tutorials/getting_started","tutorials/installation","tutorials/ptq"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["_cpp_api/class_view_hierarchy.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.rst","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.rst","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.rst","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.rst","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_api.rst","_cpp_api/dir_cpp_api_include.rst","_cpp_api/dir_cpp_api_include_trtorch.rst","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.rst","_cpp_api/file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/file_view_hierarchy.rst","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.rst","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.rst","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.rst","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.rst","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.rst","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.rst","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.rst","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.rst","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.rst","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.rst","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.rst","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.rst","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.rst","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.rst","_cpp_api/namespace_trtorch.rst","_cpp_api/namespace_trtorch__logging.rst","_cpp_api/namespace_trtorch__ptq.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/structtrtorch_1_1ExtraInfo.rst","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.rst","_cpp_api/trtorch_cpp.rst","_cpp_api/unabridged_api.rst","_cpp_api/unabridged_orphan.rst","contributors/conversion.rst","contributors/execution.rst","contributors/lowering.rst","contributors/phases.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","index.rst","py_api/logging.rst","py_api/trtorch.rst","tutorials/getting_started.rst","tutorials/installation.rst","tutorials/ptq.rst"],objects:{"":{"logging::trtorch::Level":[17,1,1,"_CPPv4N7logging7trtorch5LevelE"],"logging::trtorch::Level::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::Level::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::Level::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::Level::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::Level::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::Level::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::get_is_colored_output_on":[25,3,1,"_CPPv4N7logging7trtorch24get_is_colored_output_onEv"],"logging::trtorch::get_logging_prefix":[29,3,1,"_CPPv4N7logging7trtorch18get_logging_prefixEv"],"logging::trtorch::get_reportable_log_level":[23,3,1,"_CPPv4N7logging7trtorch24get_reportable_log_levelEv"],"logging::trtorch::log":[27,3,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::lvl":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::msg":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::set_is_colored_output_on":[26,3,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_is_colored_output_on::colored_output_on":[26,4,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_logging_prefix":[24,3,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_logging_prefix::prefix":[24,4,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_reportable_log_level":[28,3,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"logging::trtorch::set_reportable_log_level::lvl":[28,4,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"ptq::trtorch::make_int8_cache_calibrator":[33,3,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::Algorithm":[33,5,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::cache_file_path":[33,4,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_calibrator":[31,3,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::Algorithm":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::DataLoader":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::cache_file_path":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::dataloader":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::use_cache":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"trtorch::CheckMethodOperatorSupport":[35,3,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::method_name":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::module":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CompileGraph":[36,3,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::info":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::module":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine":[32,3,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::info":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::method_name":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::module":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ExtraInfo":[44,6,1,"_CPPv4N7trtorch9ExtraInfoE"],"trtorch::ExtraInfo::DataType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo8DataTypeE"],"trtorch::ExtraInfo::DataType::DataType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEv"],"trtorch::ExtraInfo::DataType::DataType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEN3c1010ScalarTypeE"],"trtorch::ExtraInfo::DataType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"],"trtorch::ExtraInfo::DataType::Value::kChar":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::Value::kFloat":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::Value::kHalf":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypecv5ValueEv"],"trtorch::ExtraInfo::DataType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataTypecvbEv"],"trtorch::ExtraInfo::DataType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"],"trtorch::ExtraInfo::DataType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"],"trtorch::ExtraInfo::DataType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"],"trtorch::ExtraInfo::DataType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"],"trtorch::ExtraInfo::DeviceType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::DeviceType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEv"],"trtorch::ExtraInfo::DeviceType::DeviceType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEN3c1010DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5ValueE"],"trtorch::ExtraInfo::DeviceType::Value::kDLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::Value::kGPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypecv5ValueEv"],"trtorch::ExtraInfo::DeviceType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypecvbEv"],"trtorch::ExtraInfo::DeviceType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::EngineCapability":[44,1,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapabilityE"],"trtorch::ExtraInfo::EngineCapability::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::ExtraInfo":[44,3,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::fixed_sizes":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::input_ranges":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorI10InputRangeEE"],"trtorch::ExtraInfo::InputRange":[45,6,1,"_CPPv4N7trtorch9ExtraInfo10InputRangeE"],"trtorch::ExtraInfo::InputRange::InputRange":[45,3,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::max":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::min":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::opt":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::max":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3maxE"],"trtorch::ExtraInfo::InputRange::min":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3minE"],"trtorch::ExtraInfo::InputRange::opt":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3optE"],"trtorch::ExtraInfo::allow_gpu_fallback":[44,7,1,"_CPPv4N7trtorch9ExtraInfo18allow_gpu_fallbackE"],"trtorch::ExtraInfo::capability":[44,7,1,"_CPPv4N7trtorch9ExtraInfo10capabilityE"],"trtorch::ExtraInfo::debug":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5debugE"],"trtorch::ExtraInfo::device":[44,7,1,"_CPPv4N7trtorch9ExtraInfo6deviceE"],"trtorch::ExtraInfo::input_ranges":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12input_rangesE"],"trtorch::ExtraInfo::max_batch_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14max_batch_sizeE"],"trtorch::ExtraInfo::num_avg_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_avg_timing_itersE"],"trtorch::ExtraInfo::num_min_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_min_timing_itersE"],"trtorch::ExtraInfo::op_precision":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12op_precisionE"],"trtorch::ExtraInfo::ptq_calibrator":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14ptq_calibratorE"],"trtorch::ExtraInfo::refit":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5refitE"],"trtorch::ExtraInfo::strict_types":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12strict_typesE"],"trtorch::ExtraInfo::workspace_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14workspace_sizeE"],"trtorch::dump_build_info":[34,3,1,"_CPPv4N7trtorch15dump_build_infoEv"],"trtorch::get_build_info":[30,3,1,"_CPPv4N7trtorch14get_build_infoEv"],"trtorch::ptq::Int8CacheCalibrator":[3,6,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Algorithm":[3,5,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::getBatch":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::bindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::names":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::nbBindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatchSize":[3,3,1,"_CPPv4NK7trtorch3ptq19Int8CacheCalibrator12getBatchSizeEv"],"trtorch::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::cache":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator":[4,6,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Algorithm":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::DataLoaderUniquePtr":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Int8Calibrator":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::cache_file_path":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::dataloader":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::use_cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::getBatch":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::bindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::names":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::nbBindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatchSize":[4,3,1,"_CPPv4NK7trtorch3ptq14Int8Calibrator12getBatchSizeEv"],"trtorch::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*":[4,3,1,"_CPPv4N7trtorch3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8Calibrator::readCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::readCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],STR:[5,0,1,"c.STR"],TRTORCH_API:[6,0,1,"c.TRTORCH_API"],TRTORCH_HIDDEN:[7,0,1,"c.TRTORCH_HIDDEN"],TRTORCH_MAJOR_VERSION:[8,0,1,"c.TRTORCH_MAJOR_VERSION"],TRTORCH_MINOR_VERSION:[12,0,1,"c.TRTORCH_MINOR_VERSION"],TRTORCH_PATCH_VERSION:[9,0,1,"c.TRTORCH_PATCH_VERSION"],TRTORCH_VERSION:[10,0,1,"c.TRTORCH_VERSION"],XSTR:[11,0,1,"c.XSTR"],trtorch:[58,8,0,"-"]},"trtorch.logging":{Level:[57,9,1,""],get_is_colored_output_on:[57,10,1,""],get_logging_prefix:[57,10,1,""],get_reportable_log_level:[57,10,1,""],log:[57,10,1,""],set_is_colored_output_on:[57,10,1,""],set_logging_prefix:[57,10,1,""],set_reportable_log_level:[57,10,1,""]},"trtorch.logging.Level":{Debug:[57,11,1,""],Error:[57,11,1,""],Info:[57,11,1,""],InternalError:[57,11,1,""],Warning:[57,11,1,""]},trtorch:{DeviceType:[58,9,1,""],EngineCapability:[58,9,1,""],check_method_op_support:[58,10,1,""],compile:[58,10,1,""],convert_method_to_trt_engine:[58,10,1,""],dtype:[58,9,1,""],dump_build_info:[58,10,1,""],get_build_info:[58,10,1,""],logging:[57,8,0,"-"]}},objnames:{"0":["c","macro","C macro"],"1":["cpp","enum","C++ enum"],"10":["py","function","Python function"],"11":["py","attribute","Python attribute"],"2":["cpp","enumerator","C++ enumerator"],"3":["cpp","function","C++ function"],"4":["cpp","functionParam","functionParam"],"5":["cpp","templateParam","templateParam"],"6":["cpp","class","C++ class"],"7":["cpp","member","C++ member"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:enum","10":"py:function","11":"py:attribute","2":"cpp:enumerator","3":"cpp:function","4":"cpp:functionParam","5":"cpp:templateParam","6":"cpp:class","7":"cpp:member","8":"py:module","9":"py:class"},terms:{"abstract":[50,55],"byte":58,"case":[1,2,44,49,55,61],"catch":59,"char":[3,4,42,59],"class":[31,33,42,43,44,48,55,57,58,59,61],"const":[1,2,3,4,31,32,33,35,36,42,43,44,51,55,59,61],"default":[1,2,3,4,17,31,33,41,43,44,58,61],"enum":[1,2,40,43,44,48,57,61],"final":49,"float":[58,59],"function":[1,2,3,4,44,45,48,50,51,55,59],"import":59,"int":[3,4,42,50,59],"long":49,"new":[1,2,3,4,36,44,45,50,53,55,57,59],"public":[1,2,3,4,42,43,44,45,61],"return":[1,2,3,4,23,25,30,31,32,33,35,36,40,41,42,43,44,50,51,52,53,55,57,58,59,61],"static":[44,45,49,55,58,59],"super":[42,59],"throw":[51,59],"true":[1,2,4,42,43,44,51,55,58,59,61],"try":[42,53,59],"void":[3,4,24,26,27,28,34,40,42,43],"while":61,And:59,Are:40,But:59,For:[49,59],Its:55,Not:3,One:[58,59],PRs:59,Thats:59,The:[2,44,49,50,51,52,53,55,57,60,61],Then:[60,61],There:[4,49,55,59,61],These:[49,50],Use:[44,55,58,61],Useful:56,Using:4,Will:35,With:[59,61],___torch_mangle_10:[50,59],___torch_mangle_5:59,___torch_mangle_9:59,__attribute__:41,__gnuc__:41,__init__:59,__torch__:[50,59],__visibility__:41,_all_:51,_convolut:59,aarch64:[53,60],abl:[49,51,55,56,61],about:[49,50,55,58],abov:[28,59],accept:[44,45,50,55],access:[51,55,56,59],accord:55,accuraci:61,across:51,act:50,acthardtanh:55,activ:[59,61],activationtyp:55,actual:[50,51,55,57,59],add:[27,49,51,55,57,59],add_:[51,59],addactiv:55,added:[28,49],addenginetograph:[50,59],addit:[51,59],addlay:59,addshuffl:59,advanc:61,after:[49,56,59],again:55,ahead:59,aim:51,algorithm:[3,4,31,33,42,43,61],all:[17,43,50,51,58,59,61],alloc:55,allow:[44,45,49,51,58],allow_gpu_fallback:[43,44,58],alreadi:[49,51,59],also:[33,49,55,56,59,60,61],alwai:[3,4,26],analogu:55,ani:[49,55,58,59,60],annot:[55,59],anoth:59,aot:[56,59],api:[13,15,16,40,41,42,43,53,55,58,59,61],apidirectori:[22,46],appli:61,applic:[2,33,44,51,59],aquir:59,architectur:56,archiv:60,aren:59,arg:[49,59],argc:59,argument:[50,51,55,59],argv:59,around:[50,55,59],arrai:[3,4,49],arrayref:[43,44,45],arxiv:61,assembl:[49,59],assign:[3,4,50],associ:[49,55,59],associatevalueandivalu:55,associatevalueandtensor:[55,59],aten:[51,54,55,59],attribut:51,auto:[42,55,59,61],avail:55,averag:[44,58],back:[50,51,53,59],back_insert:42,background:59,base:[34,46,47,50,57,59,60,61],batch:[3,4,42,44,55,58,61],batch_norm:55,batch_siz:[42,61],batchnorm:51,batchtyp:42,bazel:[53,60],bdist_wheel:60,becaus:[55,59],becom:55,been:[49,55,59],befor:[51,53,55,56,59,60],beg:42,begin:[42,60],beginn:59,behavior:58,being:59,below:[55,59],benefit:[55,59],best:[44,45],better:[59,61],between:[55,61],bia:[51,59],bin:60,binari:[42,61],bind:[3,4,42],bit:[55,58,59],blob:54,block0:51,block1:51,block:49,bool:[1,2,3,4,25,26,31,35,40,42,43,44,51,55,57,58,59,61],both:59,briefli:59,bsd:43,buffer:[3,4],bug:60,build:[30,31,33,44,49,52,53,55,58,59,61],build_fil:60,builderconfig:43,built:60,c10:[1,2,43,44,45,59,61],c_api:54,c_str:[55,59],cach:[3,4,31,33,42,61],cache_:42,cache_fil:42,cache_file_path:[3,4,31,33,42,43],cache_file_path_:42,cache_size_:42,calcul:[49,59],calibr:[3,4,31,33,42,44,61],calibration_cache_fil:[31,33,61],calibration_dataload:[31,61],calibration_dataset:61,call:[31,33,36,44,50,51,55,58,59],callmethod:59,can:[1,2,4,31,32,33,44,45,49,50,51,52,53,55,58,59,60,61],cannot:[51,59],capabl:[43,44,58],cast:[3,4],caus:[55,60],cdll:59,cerr:59,chain:55,chanc:55,chang:[33,53,61],check:[1,2,35,44,51,55,58,59,60],check_method_op_support:58,checkmethodoperatorsupport:[21,37,43,46,47,59],choos:59,cifar10:61,cifar:61,classif:59,clear:42,close:59,closer:51,code:[53,56,59],collect:59,color:[25,26,57],colored_output_on:[26,40,57],com:[54,59,60,61],comment:60,common:[49,51],commun:59,comparis:[1,44],comparison:[2,44],compat:[1,2,44],compil:[32,35,36,44,50,51,55,58,61],compile_set:59,compilegraph:[21,37,43,46,47,59,61],complet:59,complex:59,compliat:61,compon:[52,53,59],compos:59,composit:59,comput:61,config:60,configur:[32,36,56,58,59,61],connect:51,consid:59,consolid:59,constant:[49,50,51,59],constexpr:[1,2,43,44],construct:[1,2,3,4,44,45,49,52,53,55,59],constructor:[1,44,59],consum:[4,49,59],contain:[31,35,49,51,55,58,59,60,61],content:61,context:[49,50,52,53],contigu:42,contributor:59,control:59,conv1:59,conv2:59,conv2d:59,conv:[55,59],convers:[50,51,58,59],conversionctx:[55,59],convert:[3,4,32,35,36,51,52,53,56,58],convert_method_to_trt_engin:58,convertgraphtotrtengin:[21,37,43,46,47,59],convien:44,convienc:[3,4],convolut:61,coordin:53,copi:[42,55],copyright:43,core:[51,53,59],corespond:50,corpor:43,correct:60,correspond:55,coupl:[49,53],cout:[42,59],cp35:60,cp35m:60,cp36:60,cp36m:60,cp37:60,cp37m:60,cp38:60,cpp:[14,15,16,40,41,42,43,48,51,53,59,61],cpp_frontend:61,cppdirectori:[22,46],cppdoc:59,creat:[31,33,49,55],csrc:[51,54],cstddef:61,ctx:[55,59],ctype:59,cuda:[44,58,59,60],cudafloattyp:59,current:[23,55],data:[1,3,4,31,33,42,44,49,52,53,55,61],data_dir:61,data_ptr:42,dataflow:[55,59],dataload:[4,31,33,42,43,44,61],dataloader_:42,dataloaderopt:61,dataloaderuniqueptr:[4,42],dataset:[33,61],datatyp:[2,21,37,43,44,46,47,58],datatypeclass:[0,46],dbg:60,dead_code_elimin:51,deal:55,debug:[17,26,43,44,55,57,58],debugg:58,dedic:51,deep:[55,56,61],deeplearn:54,def:59,defin:[1,2,3,4,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,41,44,45,48,59,61],definit:[48,55],delet:[1,2,43,44,51],demo:61,depend:[30,33,49,53,59],deploi:[56,59,61],deploy:[59,61],describ:[44,55,58,59],deseri:[58,59],destroi:55,destructor:55,detail:59,determin:51,develop:[56,59,60],devic:[2,43,44,58,59],devicetyp:[0,21,37,43,44,46,47,58],dict:58,dictionari:[58,59],differ:[33,56,59],dimens:51,directli:[55,61],directori:[18,19,20,21,22,43,46,60,61],disabl:57,disclos:60,distdir:60,distribut:[58,59,61],dla:[2,44,58],doc:[53,54,60],docsrc:53,document:[40,41,42,43,46,47,53,59,61],doe:[41,42,51,55,61],doesn:59,doesnt:42,doing:[49,51,59,61],domain:61,don:[55,61],done:[49,53],dont:40,down:60,download:60,doxygen_should_skip_thi:[42,43],driver:60,dtype:58,due:[3,4],dump:[34,60],dump_build_info:[21,37,43,46,47,58],dure:[55,61],dynam:[44,45,58],each:[3,4,44,49,50,51,55,59],easi:49,easier:[52,53,55,59,61],easili:[3,4],edu:61,effect:[51,59,61],effici:55,either:[44,45,55,58,59,60],element:50,element_typ:42,els:[41,58],emit:49,empti:59,emum:[17,44],enabl:[3,4,25,57,58],encount:60,end:[42,55,59],end_dim:59,endif:[41,42,43],endl:42,enforc:59,engin:[1,2,32,36,44,45,49,52,53,56,58,59,61],engine_converted_from_jit:59,enginecap:[43,44,58],ensur:[33,51],enter:49,entri:[44,55],entropi:[31,33,61],enumer:[1,2,17,44],equival:[36,52,53,55,58,59],equivil:32,error:[17,49,51,53,57,59,60],etc:58,eval:59,evalu:[50,52,53],evaluated_value_map:[49,55],even:59,everi:59,everyth:17,exampl:[42,50,55,59,61],exception_elimin:51,execpt:51,execut:[51,58,59,61],execute_engin:[50,59],exhaust:59,exist:[4,32,35,36,58,60,61],expect:[51,55,59],explic:42,explicit:[3,4,43,51,56,61],explicitli:61,explict:42,explictli:[1,44],extend:55,extent:[56,59],extra:44,extra_info:[58,61],extrainfo:[0,3,4,21,32,36,37,43,46,47,58,59,61],extrainfostruct:[0,46],factori:[4,31,33,61],fail:59,fallback:55,fals:[1,2,3,4,42,43,44,58,59],fashion:59,fc1:59,fc2:59,fc3:59,feat:59,featur:61,fed:[3,4,59],feed:[31,33,59],feel:56,field:[3,4,61],file:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,53,58,59,60,61],find:[4,59],first:[42,49,51,59,61],fix:44,fixed_s:[43,44],flatten:59,flatten_convert:59,float16:58,float32:58,flow:[55,59],fly:59,follow:[59,61],form:49,forward:[31,33,36,50,55,58,59,61],found:[43,59,60,61],fp16:[1,44,56,58,59],fp32:[1,44,56,61],freed:55,freeze_modul:51,from:[1,2,3,4,31,33,42,44,45,49,50,51,52,53,55,59,61],full:[55,59,61],fulli:[35,51,58,59,61],fuse_flatten_linear:51,fuse_linear:51,fusion:55,gaurd:41,gcc:53,gear:61,gener:[3,4,33,50,51,53,55,59,61],get:[1,2,3,4,23,30,42,44,45,55,57,60,61],get_build_info:[21,37,43,46,47,58],get_is_colored_output_on:[18,38,40,46,47,57],get_logging_prefix:[18,38,40,46,47,57],get_reportable_log_level:[18,38,40,46,47,57],getattr:[51,59],getbatch:[3,4,42],getbatchs:[3,4,42],getdimens:[55,59],getoutput:[55,59],github:[54,59,60,61],given:[44,51,58,59],global:[27,59],gnu:60,goal:55,going:59,good:[42,55],got:59,gpu:[2,32,36,44,58,59],graph:[17,32,35,36,43,49,52,53,55,56,58,59],great:59,gtc:56,guard:51,guard_elimin:51,hack:42,half:[58,59],handl:51,happen:59,hardtanh:55,has:[49,51,53,55,59,61],hash:60,have:[33,42,49,55,56,59,60,61],haven:59,header:59,help:[26,49,55],helper:55,here:[42,49,50,59,60,61],hermet:60,hfile:[22,46],hidden:41,high:51,higher:[51,59],hinton:61,hold:[44,45,49,55,61],hood:53,how:[3,4,59],howev:33,html:[54,59,60,61],http:[54,59,60,61],http_archiv:60,idea:51,ifndef:[42,43],ifstream:42,iint8calibr:[3,4,31,33,42,43,44,61],iint8entropycalibrator2:[3,4,31,33,42,43,61],iint8minmaxcalibr:[31,33,61],ilay:55,imag:61,images_:61,implement:[3,4,50,59,61],implic:51,in_shap:59,in_tensor:59,includ:[14,16,17,30,34,40,41,42,43,48,58,59,60,61],includedirectori:[22,46],index:[54,56,61],inetworkdefinit:49,infer:[51,59,61],info:[17,32,36,43,44,55,57,59],inform:[28,30,34,49,56,58,59,61],infrastructur:61,ingest:53,inherit:[46,47,61],initi:42,inlin:[43,51,59],input0:59,input1:59,input2:59,input:[3,4,33,44,45,49,50,51,52,53,55,58,59,61],input_data:59,input_rang:[43,44],input_s:59,input_shap:[58,59,61],inputrang:[21,37,43,44,46,47,59],inputrangeclass:[0,46],inspect:[55,59],instal:[56,59],instanc:[51,59],instanti:[52,53,55,59],instatin:[1,2,44],instead:[49,51,59],instruct:59,insur:60,int16_t:43,int64_t:[43,44,45,61],int8:[1,42,44,56,58,61],int8_t:43,int8cachecalibr:[20,33,39,42,43,46,47],int8cachecalibratortempl:[0,46],int8calibr:[3,20,31,39,42,43,46,47],int8calibratorstruct:[0,46],integ:58,integr:56,interfac:[1,2,44,50,53,55,61],intermedi:[17,59],intern:[2,17,44,55,59],internal_error:57,internalerror:57,interpret:50,intro_to_torchscript_tutori:59,invok:59,ios:42,iostream:[20,42,59],is_train:61,iscustomclass:55,issu:[3,4,59,60],istensor:55,istream_iter:42,istreambuf_iter:42,it_:42,it_created_:42,itensor:[49,55,59],iter:[42,44,49,58],its:[33,49,50,55],itself:[1,2,44],ivalu:[49,50,55,59],jetson:58,jit:[32,35,36,43,49,50,51,52,53,54,55,58,59],just:[42,43,51,56,59],kchar:[1,43,44],kclip:55,kcpu:[2,44],kcuda:[2,42,44,59],kdebug:[17,40,42],kdefault:[43,44],kdla:[2,43,44],kei:[58,59],kernel:[44,55,58],kerror:[17,40],kfloat:[1,43,44],kgpu:[2,43,44],kgraph:[17,40,51],khalf:[1,43,44,59],ki8:61,kind:[49,58],kinfo:[17,40,42],kinternal_error:[17,40],know:[40,55],kriz:61,krizhevski:61,ksafe_dla:[43,44],ksafe_gpu:[43,44],ktest:61,ktrain:61,kwarn:[17,40],label:61,laid:59,lambda:[55,59],languag:59,larg:[52,53,59,61],larger:61,last:51,later:33,layer:[44,49,51,55,58,59,61],ld_library_path:60,ldd:60,learn:[56,61],leav:51,lenet:59,lenet_trt:[50,59],lenetclassifi:59,lenetfeatextractor:59,length:[3,4,42],let:[44,51,55],level:[18,23,27,28,38,40,42,46,47,51,53,57,59],levelnamespac:[0,46],leverag:61,lib:[51,59],librari:[30,43,50,52,53,55,59,60],libtorch:[4,34,55,59,60,61],libtrtorch:60,licens:43,like:[42,49,55,59,61],limit:[51,61],linear:59,link:[49,56],linux:[53,60],linux_x86_64:60,list:[18,19,20,21,35,48,49,55,58,59,60],listconstruct:49,live:55,load:[50,59,61],local:[51,59],locat:61,log:[16,17,19,21,22,37,42,43,46,47,48,51,55,58],log_debug:55,logger:57,loggingenum:[0,46],loglevel:57,look:[49,50,51,52,53,59,61],loop:51,loss:61,lot:55,lower:17,lower_graph:51,lower_tupl:51,loweralltupl:51,lowersimpletupl:51,lvl:[27,28,40],machin:[50,61],macro:[5,6,7,8,9,10,11,12,16,18,21,22,40,43,46,48],made:[51,52,53],mai:[49,53,59,61],main:[50,52,53,55,59],maintain:[50,55],major:53,make:[49,59,60,61],make_data_load:[4,61],make_int8_cache_calibr:[21,39,43,46,47,61],make_int8_calibr:[21,33,39,43,46,47,61],manag:[49,50,52,53,55,59],map:[2,44,49,51,52,53,55,59,61],master:[54,60,61],match:[44,51,60],matmul:[51,59],matrix:54,matur:53,max:[43,44,45,55,58,59],max_batch_s:[43,44,58],max_pool2d:59,max_val:55,maximum:[44,45,58,59],mean:[44,55,56,58],mechan:55,meet:58,member:[44,45,58],memori:[20,21,42,43,51,55,59],messag:[17,27,28,57],metadata:[50,55],method:[32,35,36,51,55,58,59,60],method_nam:[32,35,43,58,59],min:[43,44,45,55,58,59],min_val:55,minim:[44,58,61],minimum:[44,45,57,59],minmax:[31,33,61],miss:59,mod:[59,61],mode:61,mode_:61,model:[59,61],modul:[32,35,36,43,50,52,53,55,56,58,61],modular:59,more:[49,56,59,61],most:53,move:[31,43,59,61],msg:[27,40,57],much:[55,61],multipl:61,must:[44,55,58,59,60],name:[3,4,32,35,42,55,58,59,60],namespac:[0,40,42,43,48,56,61],nativ:[53,54,59],native_funct:54,nbbind:[3,4,42],necessari:40,need:[1,2,28,33,41,44,49,51,55,59,60,61],nest:[46,47],net:[55,59],network:[31,33,55,59,61],new_lay:55,new_local_repositori:60,new_siz:61,next:[3,4,49,50,61],nice:60,ninja:60,nlp:[31,33,61],node:[51,55,59],node_info:[55,59],noexcept:61,none:55,norm:55,normal:[1,2,44,59,61],noskipw:42,note:[2,44,55],now:[53,55,59],nullptr:[42,43,44],num_avg_timing_it:[43,44,58],num_min_timing_it:[43,44,58],number:[3,4,44,51,55,58,59],nvidia:[32,36,43,54,58,59,60,61],nvinfer1:[3,4,31,33,42,43,44,55,61],object:[1,2,3,4,44,45,55,59],obvious:59,off:50,ofstream:[42,59],older:53,onc:[40,41,42,43,49,50,59,61],one:[51,55,57,59],ones:[40,59,60],onli:[2,3,4,17,33,42,44,53,55,57,58,59,61],onnx:51,onto:50,op_precis:[43,44,58,59,61],open:42,oper:[1,2,3,4,35,42,43,44,49,50,51,52,53,55,56,58,61],ops:[51,59],opset:[52,53],opt:[43,44,45,58,59,60],optim:[44,45,56,59],optimi:59,optimin:[44,45],option:[42,58,60,61],order:[44,55,59],org:[54,59,60,61],other:[1,2,43,44,49,56,58,59],our:[53,59],out:[35,42,49,51,52,53,55,57,58,59,60],out_shap:59,out_tensor:[55,59],output:[25,26,44,49,50,51,55,57,59],outself:59,over:[52,53],overrid:[3,4,31,33,42,61],overview:[54,56],own:[55,59],packag:[51,59],page:56,pair:[55,61],paramet:[1,2,3,4,26,27,28,31,32,33,35,36,44,45,49,51,55,57,58,59],parent:[14,15,16,18,19,20,21],pars:59,part:53,pass:[49,50,52,53,55,59,61],path:[4,13,14,15,16,31,33,59,60,61],pathwai:59,pattern:[55,59],perform:[31,33],performac:[44,45,61],phase:[17,55,59],pick:59,pip3:60,piplein:59,place:[51,60,61],plan:53,pleas:60,point:[58,59],pointer:[3,4,61],pop:50,post:[31,33,44,56],power:59,pragma:[40,41,42,43,61],pre_cxx11_abi:60,precis:[44,56,58,59,61],precompil:60,prefix:[24,26,40,57],preprint:61,preprocess:61,preserv:[59,61],prespect:59,pretti:59,previous:33,prim:[49,50,51,59],primarili:[53,59],print:[17,35,42,57,58],priorit:60,privat:[3,4,42,43,61],process:59,produc:[44,45,49,50,55,59],profil:[44,45],program:[18,19,20,21,33,48,52,53,56,59],propog:51,provid:[3,4,44,55,59,60,61],ptq:[3,4,16,21,22,37,43,46,47,48,56],ptq_calibr:[3,4,43,44,61],ptqtemplat:[0,46],pull:60,pure:35,purpos:60,push:50,python3:[51,59,60],python:53,python_api:54,pytorch:[50,52,53,55,58,59,60,61],quantiz:[31,33,56],quantizatiom:44,question:59,quickli:[59,61],quit:[55,59],rais:51,raiseexcept:51,rand:59,randn:59,rang:[44,45,58,59],rather:51,read:[3,4,31,33,42,61],readcalibrationcach:[3,4,42],realiz:50,realli:55,reason:[1,44,59],recalibr:33,recognit:61,recomend:[31,33],recommend:[31,33,59,60],record:[49,59],recurs:49,reduc:[51,52,53,61],refer:[50,52,53],referenc:60,refit:[43,44,58],reflect:43,regard:60,regist:[50,55],registernodeconversionpattern:[55,59],registri:[49,59],reinterpret_cast:42,relationship:[46,47],releas:60,relu:59,remain:[51,61],remove_contigu:51,remove_dropout:51,replac:51,report:[23,42],reportable_log_level:57,repositori:53,repres:[44,45,55,57],represent:[51,55,59],request:59,requir:[33,49,57,58,59,61],reserv:[42,43],resolv:[49,51,52,53],resourc:[49,61],respons:[33,50],restrict:[44,58],result:[49,51,52,53,58,59],reus:[51,61],right:[43,53,55],root:[43,61],run:[2,32,44,49,50,52,53,55,56,58,59,60,61],runtim:[50,56,59],safe:[55,58],safe_dla:58,safe_gpu:58,safeti:[44,58],same:[50,59],sampl:61,save:[33,42,58,59],saw:59,scalar:55,scalartyp:[1,43,44],scale:61,schema:[55,59],scope:51,scratch:33,script:[35,51,58,59],script_model:59,scriptmodul:[58,59],sdk:54,seamlessli:56,search:56,section:61,see:[35,50,51,58,59],seekg:42,seem:42,select:[31,32,33,44,58,61],self:[50,51,55,59],sens:59,serial:[32,50,52,53,58,59],set:[3,4,17,26,28,32,33,36,44,45,49,51,52,53,56,57,58,59,61],set_is_colored_output_on:[18,38,40,46,47,57],set_logging_prefix:[18,38,40,46,47,57],set_reportable_log_level:[18,38,40,46,47,57],setalpha:55,setbeta:55,setnam:[55,59],setreshapedimens:59,setup:[60,61],sever:[17,27,57],sha256:60,shape:[44,45,55,58],ship:59,should:[1,3,4,33,43,44,49,55,56,57,58,61],shown:59,shuffl:59,side:[51,59],signifi:[44,45],significantli:51,similar:[55,59],simonyan:61,simpil:61,simpl:59,simplifi:49,sinc:[51,59,61],singl:[44,45,51,59,61],singular:55,site:[51,59],size:[3,4,42,44,45,51,58,59,61],size_t:[3,4,42,61],skipw:42,softmax:51,sole:61,some:[49,50,51,52,53,55,59,61],someth:[41,51],sort:55,sourc:[43,53,58],space:61,specif:[36,51,52,53,58],specifi:[3,4,55,56,57,58,59],specifii:58,src:54,sstream:[20,42],stabl:54,stack:[50,61],stage:49,stand:50,standard:56,start:[49,60],start_dim:59,state:[49,55,59],statement:51,static_cast:42,std:[3,4,24,27,29,30,31,32,33,35,40,42,43,44,45,59,61],stdout:[34,57,58],steamlin:61,step:[56,61],still:[42,61],stitch:59,stop:59,storag:61,store:[4,49,55,59],str:[19,41,42,46,47,57,58],straight:55,strict_typ:[43,44,58],strictli:58,string:[3,4,18,20,21,24,27,29,30,31,32,33,35,40,42,43,55,58,59,61],stringstream:42,strip_prefix:60,struct:[1,2,21,37,43,61],structur:[33,44,53,55,59],style:43,sub:59,subdirectori:48,subgraph:[49,51,55,59],subject:53,submodul:59,subset:61,suit:56,support:[1,2,26,35,44,45,54,58,59],sure:60,system:[49,55,56,60],take:[32,35,36,49,50,52,53,55,58,59,61],talk:56,tar:[60,61],tarbal:[59,61],target:[2,44,53,56,58,59,61],targets_:61,task:[31,33,61],techinqu:59,techniqu:61,tell:[55,59],tellg:42,templat:[20,21,39,42,43,46,47,59],tensor:[44,45,49,50,55,59,61],tensorcontain:55,tensorlist:55,tensorrt:[1,2,3,4,31,32,33,34,36,43,44,45,49,51,52,53,55,56,58,59,61],term:61,termin:[26,59],test:53,text:57,than:[51,56],thats:[49,61],thei:[44,49,51,55,60],them:[50,59],theori:49,therebi:50,therefor:[33,59],thi:[1,2,31,33,40,41,42,43,44,45,49,50,51,52,53,55,59,60,61],think:55,third_parti:[53,60],those:49,though:[53,55,59],three:[44,45,52,53],thrid_parti:60,through:[49,50,56,59],time:[44,49,51,52,53,55,58,59,61],tini:61,tmp:59,tocustomclass:55,todim:59,togeth:[49,55,59],too:60,tool:[55,59],top:53,torch:[1,2,4,31,32,33,35,36,42,43,44,50,51,54,55,58,59,61],torch_scirpt_modul:59,torch_script_modul:59,torchscript:[32,35,36,52,53,58],toronto:61,tovec:59,toward:61,trace:[58,59],traced_model:59,track:[55,61],tradit:61,traget:36,train:[31,33,44,56,59],trainabl:51,transform:[59,61],translat:59,travers:[52,53],tree:[43,61],trigger:59,trim:61,trt:[1,2,3,4,44,49,50,51,55,59],trt_mod:[59,61],trt_ts_modul:59,trtorch:[0,1,2,3,4,15,17,22,40,41,42,44,45,47,48,49,50,51,52,53,60,61],trtorch_api:[19,23,24,25,26,27,28,29,30,31,32,33,34,35,36,40,41,43,46,47],trtorch_check:55,trtorch_hidden:[19,41,46,47],trtorch_major_vers:[19,41,46,47],trtorch_minor_vers:[19,41,46,47],trtorch_patch_vers:[19,41,46,47],trtorch_unus:55,trtorch_vers:[19,41,46,47],trtorchfil:[22,46],trtorchnamespac:[0,46],tupl:[58,59],tupleconstruct:51,tupleunpack:51,tutori:[59,61],two:[55,59,60,61],type:[1,2,31,45,46,47,49,50,55,57,58,59,61],typenam:[3,4,31,33,42,43],typic:[49,55],uint64_t:[43,44],unabl:[55,59],uncom:60,under:[43,53],underli:[1,2,44,55],union:[55,59],uniqu:4,unique_ptr:[4,31],unlik:56,unpack_addmm:51,unpack_log_softmax:51,unqiue_ptr:4,unsetf:42,unstabl:53,unsupport:[35,58],unsur:55,untest:53,until:[49,53,55],unwrap:55,unwraptodoubl:55,unwraptoint:59,upstream:59,url:60,use:[1,2,3,4,31,33,44,49,50,53,55,57,58,59,60,61],use_cach:[3,4,31,42,43],use_cache_:42,use_subset:61,used:[1,2,3,4,44,45,49,50,51,55,57,58,59,61],useful:55,user:[40,52,53,59,60,61],uses:[31,33,42,55,61],using:[1,2,32,36,42,44,55,56,58,59,61],using_int:59,usr:60,util:[55,59],valid:[2,44,55],valu:[1,2,17,43,44,49,50,55,59],value_tensor_map:[49,55],vector:[20,21,42,43,44,45,59,61],veri:61,version:[30,34,53,60],vgg16:61,via:[56,58],virtual:61,wai:59,want:[40,44,59],warn:[17,42,55,57],websit:60,weight:[49,59],welcom:59,well:[59,61],were:59,what:[4,51,59],whatev:50,when:[26,42,44,49,50,51,52,53,55,57,58,59,60,61],where:[49,51,55,59,61],whether:[4,61],which:[2,32,33,36,44,49,50,51,52,53,55,58,59,61],whl:60,whose:51,within:[50,52,53],without:[55,59,61],work:[42,51,53,55,61],worker:61,workspac:[44,58,60,61],workspace_s:[43,44,58,61],would:[55,59,60],wrap:[52,53,59],wrapper:55,write:[3,4,31,33,42,49,56,59,61],writecalibrationcach:[3,4,42],www:[59,60,61],x86_64:[53,60],xstr:[19,41,46,47],yaml:54,you:[1,2,31,33,44,49,50,51,53,55,56,58,59,60,61],your:[55,56,59,60],yourself:59,zisserman:61},titles:["Class Hierarchy","Class ExtraInfo::DataType","Class ExtraInfo::DeviceType","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TRTORCH_API","Define TRTORCH_HIDDEN","Define TRTORCH_MAJOR_VERSION","Define TRTORCH_PATCH_VERSION","Define TRTORCH_VERSION","Define XSTR","Define TRTORCH_MINOR_VERSION","Directory cpp","Directory api","Directory include","Directory trtorch","Enum Level","File logging.h","File macros.h","File ptq.h","File trtorch.h","File Hierarchy","Function trtorch::logging::get_reportable_log_level","Function trtorch::logging::set_logging_prefix","Function trtorch::logging::get_is_colored_output_on","Function trtorch::logging::set_is_colored_output_on","Function trtorch::logging::log","Function trtorch::logging::set_reportable_log_level","Function trtorch::logging::get_logging_prefix","Function trtorch::get_build_info","Template Function trtorch::ptq::make_int8_calibrator","Function trtorch::ConvertGraphToTRTEngine","Template Function trtorch::ptq::make_int8_cache_calibrator","Function trtorch::dump_build_info","Function trtorch::CheckMethodOperatorSupport","Function trtorch::CompileGraph","Namespace trtorch","Namespace trtorch::logging","Namespace trtorch::ptq","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File trtorch.h","Struct ExtraInfo","Struct ExtraInfo::InputRange","TRTorch C++ API","Full API","Full API","Conversion Phase","Execution Phase","Lowering Phase","Compiler Phases","System Overview","Useful Links for TRTorch Development","Writing Converters","TRTorch","trtorch.logging","trtorch","Getting Started","Installation","Post Training Quantization (PTQ)"],titleterms:{"class":[0,1,2,3,4,20,21,37,39,46,47],"enum":[17,18,38,46,47,58],"function":[18,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,46,47,54,58],The:59,Used:51,Useful:54,addmm:51,advic:55,ahead:56,api:[14,18,19,20,21,46,47,48,54,56],applic:61,arg:55,avail:54,background:[50,55],base:[3,4],binari:60,build:60,checkmethodoperatorsupport:35,citat:61,code:51,compil:[52,53,56,59,60],compilegraph:36,construct:50,content:[18,19,20,21,37,38,39],context:55,contigu:51,contract:55,contributor:56,convers:[49,52,53,55],convert:[49,55,59],convertgraphtotrtengin:32,cpp:[13,18,19,20,21],creat:[59,61],cudnn:60,custom:59,datatyp:1,dead:51,debug:60,defin:[5,6,7,8,9,10,11,12,19,46,47],definit:[18,19,20,21],depend:60,develop:54,devicetyp:2,dimens:54,directori:[13,14,15,16,48],distribut:60,documen:56,document:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,54,56],dropout:51,dump_build_info:34,easier:54,elimin:51,engin:50,evalu:49,execept:51,execut:[50,52,53],executor:50,expect:54,extrainfo:[1,2,44,45],file:[16,18,19,20,21,22,40,41,42,43,46,48],flatten:51,freez:51,from:60,full:[46,47,48],fuse:51,gaurd:51,get:[56,59],get_build_info:30,get_is_colored_output_on:25,get_logging_prefix:29,get_reportable_log_level:23,gpu:56,graph:[50,51],guarante:55,hierarchi:[0,22,46],hood:59,how:61,includ:[15,18,19,20,21],indic:56,inherit:[3,4],inputrang:45,instal:60,int8cachecalibr:3,int8calibr:4,jit:56,layer:54,level:17,linear:51,link:54,list:[40,41,42,43],local:60,log:[18,23,24,25,26,27,28,29,38,40,57],logsoftmax:51,lower:[51,52,53],macro:[19,41],make_int8_cache_calibr:33,make_int8_calibr:31,modul:[51,59],namespac:[18,20,21,37,38,39,46,47],native_op:54,nest:[1,2,44,45],node:49,nvidia:56,oper:59,other:55,overview:53,own:61,packag:60,pass:51,pattern:51,phase:[49,50,51,52,53],post:61,program:[40,41,42,43],ptq:[20,31,33,39,42,61],python:[54,56,59,60],pytorch:[54,56],quantiz:61,read:54,redund:51,regist:59,relationship:[1,2,3,4,44,45],remov:51,respons:55,result:50,set_is_colored_output_on:26,set_logging_prefix:24,set_reportable_log_level:28,sometim:54,sourc:60,start:[56,59],str:5,struct:[44,45,46,47],subdirectori:[13,14,15],submodul:58,system:53,tarbal:60,templat:[3,4,31,33],tensorrt:[50,54,60],time:56,torchscript:[56,59],train:61,trtorch:[16,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,43,46,54,56,57,58,59],trtorch_api:6,trtorch_hidden:7,trtorch_major_vers:8,trtorch_minor_vers:12,trtorch_patch_vers:9,trtorch_vers:10,tupl:51,type:[3,4,44],under:59,unpack:51,unsupport:59,using:60,weight:55,what:55,work:59,write:55,xstr:11,your:61}}) \ No newline at end of file +Search.setIndex({docnames:["_cpp_api/class_view_hierarchy","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84","_cpp_api/dir_cpp","_cpp_api/dir_cpp_api","_cpp_api/dir_cpp_api_include","_cpp_api/dir_cpp_api_include_trtorch","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4","_cpp_api/file_cpp_api_include_trtorch_logging.h","_cpp_api/file_cpp_api_include_trtorch_macros.h","_cpp_api/file_cpp_api_include_trtorch_ptq.h","_cpp_api/file_cpp_api_include_trtorch_trtorch.h","_cpp_api/file_view_hierarchy","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5","_cpp_api/namespace_trtorch","_cpp_api/namespace_trtorch__logging","_cpp_api/namespace_trtorch__ptq","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h","_cpp_api/structtrtorch_1_1ExtraInfo","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange","_cpp_api/trtorch_cpp","_cpp_api/unabridged_api","_cpp_api/unabridged_orphan","contributors/conversion","contributors/execution","contributors/lowering","contributors/phases","contributors/system_overview","contributors/useful_links","contributors/writing_converters","index","py_api/logging","py_api/trtorch","tutorials/getting_started","tutorials/installation","tutorials/ptq","tutorials/trtorchc"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["_cpp_api/class_view_hierarchy.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.rst","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.rst","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.rst","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.rst","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_api.rst","_cpp_api/dir_cpp_api_include.rst","_cpp_api/dir_cpp_api_include_trtorch.rst","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.rst","_cpp_api/file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/file_view_hierarchy.rst","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.rst","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.rst","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.rst","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.rst","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.rst","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.rst","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.rst","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.rst","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.rst","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.rst","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.rst","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.rst","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.rst","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.rst","_cpp_api/namespace_trtorch.rst","_cpp_api/namespace_trtorch__logging.rst","_cpp_api/namespace_trtorch__ptq.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/structtrtorch_1_1ExtraInfo.rst","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.rst","_cpp_api/trtorch_cpp.rst","_cpp_api/unabridged_api.rst","_cpp_api/unabridged_orphan.rst","contributors/conversion.rst","contributors/execution.rst","contributors/lowering.rst","contributors/phases.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","index.rst","py_api/logging.rst","py_api/trtorch.rst","tutorials/getting_started.rst","tutorials/installation.rst","tutorials/ptq.rst","tutorials/trtorchc.rst"],objects:{"":{"logging::trtorch::Level":[17,1,1,"_CPPv4N7logging7trtorch5LevelE"],"logging::trtorch::Level::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::Level::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::Level::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::Level::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::Level::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::Level::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::get_is_colored_output_on":[25,3,1,"_CPPv4N7logging7trtorch24get_is_colored_output_onEv"],"logging::trtorch::get_logging_prefix":[29,3,1,"_CPPv4N7logging7trtorch18get_logging_prefixEv"],"logging::trtorch::get_reportable_log_level":[23,3,1,"_CPPv4N7logging7trtorch24get_reportable_log_levelEv"],"logging::trtorch::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::log":[27,3,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::lvl":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::msg":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::set_is_colored_output_on":[26,3,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_is_colored_output_on::colored_output_on":[26,4,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_logging_prefix":[24,3,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_logging_prefix::prefix":[24,4,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_reportable_log_level":[28,3,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"logging::trtorch::set_reportable_log_level::lvl":[28,4,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"ptq::trtorch::make_int8_cache_calibrator":[33,3,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::Algorithm":[33,5,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::cache_file_path":[33,4,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_calibrator":[31,3,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::Algorithm":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::DataLoader":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::cache_file_path":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::dataloader":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::use_cache":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"trtorch::CheckMethodOperatorSupport":[35,3,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::method_name":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::module":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CompileGraph":[36,3,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::info":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::module":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine":[32,3,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::info":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::method_name":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::module":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ExtraInfo":[44,6,1,"_CPPv4N7trtorch9ExtraInfoE"],"trtorch::ExtraInfo::DataType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo8DataTypeE"],"trtorch::ExtraInfo::DataType::DataType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEv"],"trtorch::ExtraInfo::DataType::DataType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEN3c1010ScalarTypeE"],"trtorch::ExtraInfo::DataType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"],"trtorch::ExtraInfo::DataType::Value::kChar":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::Value::kFloat":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::Value::kHalf":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::kChar":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::kFloat":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::kHalf":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypecv5ValueEv"],"trtorch::ExtraInfo::DataType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataTypecvbEv"],"trtorch::ExtraInfo::DataType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"],"trtorch::ExtraInfo::DeviceType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::DeviceType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEv"],"trtorch::ExtraInfo::DeviceType::DeviceType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEN3c1010DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5ValueE"],"trtorch::ExtraInfo::DeviceType::Value::kDLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::Value::kGPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::kDLA":[2,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::kGPU":[2,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypecv5ValueEv"],"trtorch::ExtraInfo::DeviceType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypecvbEv"],"trtorch::ExtraInfo::DeviceType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::EngineCapability":[44,1,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapabilityE"],"trtorch::ExtraInfo::EngineCapability::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::ExtraInfo":[44,3,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::fixed_sizes":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::input_ranges":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorI10InputRangeEE"],"trtorch::ExtraInfo::InputRange":[45,6,1,"_CPPv4N7trtorch9ExtraInfo10InputRangeE"],"trtorch::ExtraInfo::InputRange::InputRange":[45,3,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::max":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::min":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::opt":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::max":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3maxE"],"trtorch::ExtraInfo::InputRange::min":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3minE"],"trtorch::ExtraInfo::InputRange::opt":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3optE"],"trtorch::ExtraInfo::allow_gpu_fallback":[44,7,1,"_CPPv4N7trtorch9ExtraInfo18allow_gpu_fallbackE"],"trtorch::ExtraInfo::capability":[44,7,1,"_CPPv4N7trtorch9ExtraInfo10capabilityE"],"trtorch::ExtraInfo::debug":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5debugE"],"trtorch::ExtraInfo::device":[44,7,1,"_CPPv4N7trtorch9ExtraInfo6deviceE"],"trtorch::ExtraInfo::input_ranges":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12input_rangesE"],"trtorch::ExtraInfo::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::max_batch_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14max_batch_sizeE"],"trtorch::ExtraInfo::num_avg_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_avg_timing_itersE"],"trtorch::ExtraInfo::num_min_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_min_timing_itersE"],"trtorch::ExtraInfo::op_precision":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12op_precisionE"],"trtorch::ExtraInfo::ptq_calibrator":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14ptq_calibratorE"],"trtorch::ExtraInfo::refit":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5refitE"],"trtorch::ExtraInfo::strict_types":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12strict_typesE"],"trtorch::ExtraInfo::workspace_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14workspace_sizeE"],"trtorch::dump_build_info":[34,3,1,"_CPPv4N7trtorch15dump_build_infoEv"],"trtorch::get_build_info":[30,3,1,"_CPPv4N7trtorch14get_build_infoEv"],"trtorch::ptq::Int8CacheCalibrator":[3,6,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Algorithm":[3,5,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::getBatch":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::bindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::names":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::nbBindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatchSize":[3,3,1,"_CPPv4NK7trtorch3ptq19Int8CacheCalibrator12getBatchSizeEv"],"trtorch::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::cache":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator":[4,6,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Algorithm":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::DataLoaderUniquePtr":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Int8Calibrator":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::cache_file_path":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::dataloader":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::use_cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::getBatch":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::bindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::names":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::nbBindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatchSize":[4,3,1,"_CPPv4NK7trtorch3ptq14Int8Calibrator12getBatchSizeEv"],"trtorch::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*":[4,3,1,"_CPPv4N7trtorch3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8Calibrator::readCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::readCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],STR:[5,0,1,"c.STR"],TRTORCH_API:[6,0,1,"c.TRTORCH_API"],TRTORCH_HIDDEN:[7,0,1,"c.TRTORCH_HIDDEN"],TRTORCH_MAJOR_VERSION:[8,0,1,"c.TRTORCH_MAJOR_VERSION"],TRTORCH_MINOR_VERSION:[12,0,1,"c.TRTORCH_MINOR_VERSION"],TRTORCH_PATCH_VERSION:[9,0,1,"c.TRTORCH_PATCH_VERSION"],TRTORCH_VERSION:[10,0,1,"c.TRTORCH_VERSION"],XSTR:[11,0,1,"c.XSTR"],trtorch:[58,8,0,"-"]},"trtorch.logging":{Level:[57,9,1,""],get_is_colored_output_on:[57,10,1,""],get_logging_prefix:[57,10,1,""],get_reportable_log_level:[57,10,1,""],log:[57,10,1,""],set_is_colored_output_on:[57,10,1,""],set_logging_prefix:[57,10,1,""],set_reportable_log_level:[57,10,1,""]},"trtorch.logging.Level":{Debug:[57,11,1,""],Error:[57,11,1,""],Info:[57,11,1,""],InternalError:[57,11,1,""],Warning:[57,11,1,""]},trtorch:{DeviceType:[58,9,1,""],EngineCapability:[58,9,1,""],check_method_op_support:[58,10,1,""],compile:[58,10,1,""],convert_method_to_trt_engine:[58,10,1,""],dtype:[58,9,1,""],dump_build_info:[58,10,1,""],get_build_info:[58,10,1,""],logging:[57,8,0,"-"]}},objnames:{"0":["c","macro","C macro"],"1":["cpp","enum","C++ enum"],"10":["py","function","Python function"],"11":["py","attribute","Python attribute"],"2":["cpp","enumerator","C++ enumerator"],"3":["cpp","function","C++ function"],"4":["cpp","functionParam","functionParam"],"5":["cpp","templateParam","templateParam"],"6":["cpp","class","C++ class"],"7":["cpp","member","C++ member"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:enum","10":"py:function","11":"py:attribute","2":"cpp:enumerator","3":"cpp:function","4":"cpp:functionParam","5":"cpp:templateParam","6":"cpp:class","7":"cpp:member","8":"py:module","9":"py:class"},terms:{"abstract":[50,55],"byte":58,"case":[1,2,44,49,55,61],"catch":59,"char":[3,4,42,59],"class":[31,33,42,43,44,48,55,57,58,59,61],"const":[1,2,3,4,31,32,33,35,36,42,43,44,51,55,59,61],"default":[1,2,3,4,17,31,33,41,43,44,58,61,62],"enum":[1,2,40,43,44,48,57,61],"final":49,"float":[58,59,62],"function":[1,2,3,4,44,45,48,50,51,55,59],"import":[59,62],"int":[3,4,42,50,59],"long":49,"new":[1,2,3,4,36,44,45,50,53,55,57,59],"public":[1,2,3,4,42,43,44,45,61],"return":[1,2,3,4,23,25,30,31,32,33,35,36,40,41,42,43,44,50,51,52,53,55,57,58,59,61],"static":[44,45,49,55,58,59],"super":[42,59],"throw":[51,59],"true":[1,2,4,43,44,51,55,58,59,61],"try":[53,59],"void":[3,4,24,26,27,28,34,40,42,43],"while":61,And:59,Are:40,But:59,For:[49,59],Its:55,Not:3,One:[58,59],PRs:59,Thats:59,The:[2,44,49,50,51,52,53,55,57,60,61,62],Then:[60,61],There:[4,49,55,59,61],These:[49,50],Use:[44,55,58,61],Useful:56,Using:4,Will:35,With:[59,61],___torch_mangle_10:[50,59],___torch_mangle_5:59,___torch_mangle_9:59,__attribute__:41,__gnuc__:41,__init__:59,__torch__:[50,59],__visibility__:41,_all_:51,_convolut:59,aarch64:[53,60],abl:[49,51,55,56,61],about:[49,50,55,58,62],abov:[28,59],accept:[44,45,50,55,62],access:[51,55,56,59],accord:55,accuraci:61,across:51,act:50,acthardtanh:55,activ:[59,61],activationtyp:55,actual:[50,51,55,57,59],add:[27,49,51,55,57,59],add_:[51,59],addactiv:55,added:[28,49],addenginetograph:[50,59],addit:[51,59],addlay:59,addshuffl:59,advanc:61,after:[49,56,59,62],again:[42,55],against:[59,62],ahead:59,aim:51,algorithm:[3,4,31,33,42,43,61],all:[17,43,50,51,58,59,61,62],alloc:55,allow:[44,45,49,51,58,62],allow_gpu_fallback:[43,44,58],alreadi:[49,51,59,62],also:[33,49,55,56,59,60,61],alwai:[3,4,26],analogu:55,ani:[49,55,58,59,60,62],annot:[55,59],anoth:59,aot:[56,59],api:[13,15,16,40,41,42,43,53,55,58,59,61],apidirectori:[22,46],appli:61,applic:[2,33,44,51,59,62],aquir:59,architectur:56,archiv:60,aren:59,arg:[49,59],argc:59,argument:[50,51,55,59,62],argv:59,around:[50,55,59],arrai:[3,4,49],arrayref:[43,44,45],arxiv:61,aspect:62,assembl:[49,59],assign:[3,4,50],associ:[49,55,59],associatevalueandivalu:55,associatevalueandtensor:[55,59],aten:[51,54,55,59],attribut:51,auto:[42,55,59,61],avail:55,averag:[44,58,62],avg:62,back:[50,51,53,59],back_insert:42,background:59,base:[34,46,47,50,57,59,60,61],basic:62,batch:[3,4,42,44,55,58,61,62],batch_norm:55,batch_siz:[42,61],batched_data_:42,batchnorm:51,batchtyp:42,bazel:[53,60],bdist_wheel:60,becaus:[55,59],becom:55,been:[49,55,59],befor:[51,53,55,56,59,60],begin:[42,60],beginn:59,behavior:58,being:59,below:[55,59],benefit:[55,59],best:[44,45],better:[59,61],between:[55,61],bia:[51,59],bin:60,binari:[42,61],bind:[3,4,42],bit:[55,58,59],blob:54,block0:51,block1:51,block:49,bool:[1,2,3,4,25,26,31,35,40,42,43,44,51,55,57,58,59,61],both:59,briefli:59,bsd:43,buffer:[3,4],bug:60,build:[30,31,33,44,49,52,53,55,58,59,61,62],build_fil:60,builderconfig:43,built:[60,62],c10:[1,2,43,44,45,59,61],c_api:54,c_str:[55,59],cach:[3,4,31,33,42,61,62],cache_:42,cache_fil:42,cache_file_path:[3,4,31,33,42,43],cache_file_path_:42,cache_size_:42,calcul:[49,59],calibr:[3,4,31,33,42,44,61,62],calibration_cache_fil:[31,33,61],calibration_dataload:[31,61],calibration_dataset:61,call:[31,33,36,44,50,51,55,58,59],callmethod:59,can:[1,2,4,31,32,33,44,45,49,50,51,52,53,55,58,59,60,61,62],cannot:[51,59],capabl:[43,44,58,62],cast:[3,4],caus:[55,60],cdll:59,cerr:59,chain:55,chanc:55,chang:[33,53,61],check:[1,2,35,44,51,55,58,59,60,62],check_method_op_support:58,checkmethodoperatorsupport:[21,37,43,46,47,59],choos:59,cifar10:61,cifar:61,classif:59,clear:42,cli:62,close:59,closer:51,code:[53,56,59],collect:59,color:[25,26,57],colored_output_on:[26,40,57],com:[54,59,60,61],command:62,comment:60,common:[49,51],commun:59,comparis:[1,44],comparison:[2,44],compat:[1,2,44],compil:[32,35,36,44,50,51,55,58,61,62],compile_set:59,compilegraph:[21,37,43,46,47,59,61],complet:59,complex:59,compliat:61,compon:[52,53,59],compos:59,composit:59,comput:61,config:60,configur:[32,36,56,58,59,61],connect:51,consid:59,consol:62,consolid:59,constant:[49,50,51,59],constexpr:[1,2,43,44],construct:[1,2,3,4,44,45,49,52,53,55,59],constructor:[1,44,59],consum:[4,49,59],contain:[31,35,49,51,55,58,59,60,61],content:61,context:[49,50,52,53],contributor:59,control:59,conv1:59,conv2:59,conv2d:59,conv:[55,59],convers:[50,51,58,59],conversionctx:[55,59],convert:[3,4,32,35,36,51,52,53,56,58],convert_method_to_trt_engin:58,convertgraphtotrtengin:[21,37,43,46,47,59],convien:44,convienc:[3,4],convolut:61,coordin:53,copi:[42,55],copyright:43,core:[51,53,59],corespond:50,corpor:43,correct:60,correspond:55,coupl:[49,53],cout:59,cp35:60,cp35m:60,cp36:60,cp36m:60,cp37:60,cp37m:60,cp38:60,cpp:[14,15,16,40,41,42,43,48,51,53,59,61],cpp_frontend:61,cppdirectori:[22,46],cppdoc:59,creat:[31,33,49,55,62],csrc:[51,54],cstddef:61,ctx:[55,59],ctype:59,cuda:[44,58,59,60],cudafloattyp:59,current:[23,55],data:[1,3,4,31,33,42,44,49,52,53,55,61],data_dir:61,dataflow:[55,59],dataload:[4,31,33,42,43,44,61],dataloader_:42,dataloaderopt:61,dataloaderuniqueptr:[4,42],dataset:[33,61],datatyp:[2,21,37,43,44,46,47,58],datatypeclass:[0,46],dbg:60,dead_code_elimin:51,deal:55,debug:[17,26,43,44,55,57,58,62],debugg:[58,62],dedic:51,deep:[55,56,61],deeplearn:54,def:59,defin:[1,2,3,4,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,41,44,45,48,59,61,62],definit:[48,55],delet:[1,2,43,44,51],demo:61,depend:[30,33,49,53,59],deploi:[56,59,61],deploy:[59,61,62],describ:[44,55,58,59],deseri:[58,59],destroi:55,destructor:55,detail:59,determin:51,develop:[56,59,60],deviat:62,devic:[2,43,44,58,59,62],devicetyp:[0,21,37,43,44,46,47,58],dict:58,dictionari:[58,59],differ:[33,56,59],dimens:51,directli:[55,61],directori:[18,19,20,21,22,43,46,60,61],disabl:[57,62],disclos:60,displai:62,distdir:60,distribut:[58,59,61],dla:[2,44,58,62],doc:[53,54,60],docsrc:53,document:[40,41,42,43,46,47,53,59,61],doe:[41,42,51,55,61],doesn:59,doing:[49,51,59,61],domain:61,don:[55,61],done:[49,53],dont:40,down:60,download:60,doxygen_should_skip_thi:[42,43],driver:60,dtype:58,due:[3,4],dump:[34,60,62],dump_build_info:[21,37,43,46,47,58],dure:[55,61,62],dynam:[44,45,58],each:[3,4,44,49,50,51,55,59],easi:[49,62],easier:[52,53,55,59,61],easili:[3,4],edu:61,effect:[51,59,61],effici:55,either:[44,45,55,58,59,60,62],element:50,element_typ:42,els:[41,42,58],embed:62,emit:49,empti:59,emum:[17,44],enabl:[3,4,25,57,58],encount:60,end:[42,55,59],end_dim:59,endif:[41,42,43],enforc:59,engin:[1,2,32,36,44,45,49,52,53,56,58,59,61,62],engine_converted_from_jit:59,enginecap:[43,44,58],ensur:[33,51],enter:49,entri:[44,55],entropi:[31,33,61],enumer:[1,2,17,44],equival:[36,52,53,55,58,59],equivil:32,error:[17,49,51,53,57,59,60],etc:58,eval:59,evalu:[50,52,53],evaluated_value_map:[49,55],even:59,everi:59,everyth:17,exampl:[50,55,59,61],exception_elimin:51,execpt:51,execut:[51,58,59,61],execute_engin:[50,59],exhaust:59,exist:[4,32,35,36,58,60,61],expect:[51,55,59],explic:42,explicit:[3,4,43,51,56,61],explicitli:61,explict:42,explictli:[1,44],extend:55,extent:[56,59],extra:44,extra_info:[58,61],extrainfo:[0,3,4,21,32,36,37,43,46,47,58,59,61],extrainfostruct:[0,46],f16:62,f32:62,factori:[4,31,33,61],fail:59,fallback:[55,62],fals:[1,2,3,4,42,43,44,58,59],fashion:59,fc1:59,fc2:59,fc3:59,feat:59,featur:[61,62],fed:[3,4,59],feed:[31,33,59],feel:56,field:[3,4,61],file:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,53,58,59,60,61,62],file_path:62,find:[4,59],first:[49,51,59,61],fix:44,fixed_s:[43,44],flag:62,flatten:59,flatten_convert:59,float16:[58,62],float32:[58,62],flow:[55,59],fly:59,follow:[59,61,62],forc:62,form:49,format:62,forward:[31,33,36,50,55,58,59,61],found:[43,59,60,61],fp16:[1,44,56,58,59],fp32:[1,44,56,61],freed:55,freeze_modul:51,from:[1,2,3,4,31,33,42,44,45,49,50,51,52,53,55,59,61,62],full:[55,59,61,62],fulli:[35,51,58,59,61],fuse_flatten_linear:51,fuse_linear:51,fusion:55,gaurd:41,gcc:53,gear:61,gener:[3,4,33,50,51,53,55,59,61,62],get:[1,2,3,4,23,30,42,44,45,55,57,60,61],get_batch_impl:42,get_build_info:[21,37,43,46,47,58],get_is_colored_output_on:[18,38,40,46,47,57],get_logging_prefix:[18,38,40,46,47,57],get_reportable_log_level:[18,38,40,46,47,57],getattr:[51,59],getbatch:[3,4,42],getbatchs:[3,4,42],getdimens:[55,59],getoutput:[55,59],github:[54,59,60,61],given:[44,51,58,59,62],global:[27,59],gnu:60,goal:55,going:[42,59],good:[42,55],got:59,gpu:[2,32,36,44,58,59,62],graph:[17,32,35,36,43,49,52,53,55,56,58,59],great:59,gtc:56,guard:51,guard_elimin:51,hack:42,half:[58,59,62],handl:51,happen:59,hardtanh:55,has:[49,51,53,55,59,61],hash:60,have:[33,42,49,55,56,59,60,61,62],haven:59,header:59,help:[26,49,55,62],helper:55,here:[42,49,50,59,60,61],hermet:60,hfile:[22,46],hidden:41,high:51,higher:[51,59],hinton:61,hold:[44,45,49,55,61],hood:53,how:[3,4,59],howev:33,html:[54,59,60,61],http:[54,59,60,61],http_archiv:60,idea:51,ident:62,ifndef:[42,43],ifstream:42,iint8calibr:[3,4,31,33,42,43,44,61],iint8entropycalibrator2:[3,4,31,33,42,43,61],iint8minmaxcalibr:[31,33,61],ilay:55,imag:61,images_:61,implement:[3,4,50,59,61],implic:51,in_shap:59,in_tensor:59,incas:42,includ:[14,16,17,30,34,40,41,42,43,48,58,59,60,61,62],includedirectori:[22,46],index:[54,56,61],inetworkdefinit:49,infer:[51,59,61],info:[17,32,36,43,44,55,57,59,62],inform:[28,30,34,49,56,58,59,61,62],infrastructur:61,ingest:53,inherit:[46,47,61],inlin:[43,51,59],input0:59,input1:59,input2:59,input:[3,4,33,42,44,45,49,50,51,52,53,55,58,59,61,62],input_data:59,input_file_path:62,input_rang:[43,44],input_s:59,input_shap:[58,59,61,62],inputrang:[21,37,43,44,46,47,59],inputrangeclass:[0,46],inspect:[55,59],instal:[56,59],instanc:[51,59],instanti:[52,53,55,59],instatin:[1,2,44],instead:[49,51,59,62],instruct:59,insur:60,int16_t:43,int64_t:[43,44,45,61],int8:[1,42,44,56,58,61,62],int8_t:43,int8cachecalibr:[20,33,39,42,43,46,47],int8cachecalibratortempl:[0,46],int8calibr:[3,20,31,39,42,43,46,47],int8calibratorstruct:[0,46],integ:58,integr:56,interfac:[1,2,44,50,53,55,61],intermedi:[17,59],intern:[2,17,44,55,59],internal_error:57,internalerror:57,interpret:50,intro_to_torchscript_tutori:59,invok:59,ios:42,iostream:[20,42,59],is_train:61,iscustomclass:55,issu:[3,4,59,60],istensor:55,istream_iter:42,it_:42,itensor:[49,55,59],iter:[42,44,49,58,62],its:[33,49,50,55],itself:[1,2,44,62],ivalu:[49,50,55,59],jetson:58,jit:[32,35,36,43,49,50,51,52,53,54,55,58,59,62],just:[42,43,51,56,59],kchar:[1,43,44],kclip:55,kcpu:[2,44],kcuda:[2,44,59],kdebug:[17,40,42],kdefault:[43,44],kdla:[2,43,44],kei:[58,59],kernel:[44,55,58,62],kerror:[17,40],kfloat:[1,43,44],kgpu:[2,43,44],kgraph:[17,40,51],khalf:[1,43,44,59],ki8:61,kind:[49,58],kinfo:[17,40,42],kinternal_error:[17,40],know:[40,55],kriz:61,krizhevski:61,ksafe_dla:[43,44],ksafe_gpu:[43,44],ktest:61,ktrain:61,kwarn:[17,40],label:61,laid:59,lambda:[55,59],languag:59,larg:[52,53,59,61],larger:61,last:51,later:[33,59],layer:[44,49,51,55,58,59,61,62],ld_library_path:60,ldd:60,learn:[56,61],leav:51,lenet:59,lenet_trt:[50,59],lenetclassifi:59,lenetfeatextractor:59,length:[3,4,42],let:[44,51,55,62],level:[18,23,27,28,38,40,42,46,47,51,53,57,59],levelnamespac:[0,46],leverag:61,lib:[51,59],librari:[30,43,50,52,53,55,59,60],libtorch:[4,34,55,59,60,61],libtrtorch:[59,60,62],licens:43,like:[49,55,59,61,62],limit:[51,61],line:62,linear:59,link:[49,56,59,62],linux:[53,60],linux_x86_64:60,list:[18,19,20,21,35,48,49,55,58,59,60],listconstruct:49,live:55,load:[50,59,61,62],local:[51,59],locat:61,log:[16,17,19,20,21,22,37,42,43,46,47,48,51,55,58],log_debug:55,logger:57,loggingenum:[0,46],loglevel:57,look:[49,50,51,52,53,59,61],loop:51,loss:61,lot:55,lower:17,lower_graph:51,lower_tupl:51,loweralltupl:51,lowersimpletupl:51,lvl:[27,28,40],machin:[50,61],macro:[5,6,7,8,9,10,11,12,16,18,21,22,40,43,46,48],made:[51,52,53],mai:[49,53,59,61],main:[50,52,53,55,59],maintain:[50,55],major:53,make:[49,59,60,61],make_data_load:[4,61],make_int8_cache_calibr:[21,39,43,46,47,61],make_int8_calibr:[21,33,39,43,46,47,61],manag:[49,50,52,53,55,59],map:[2,44,49,51,52,53,55,59,61],master:[54,60,61],match:[44,51,60],matmul:[51,59],matrix:54,matur:53,max:[43,44,45,55,58,59,62],max_batch_s:[43,44,58,62],max_c:62,max_h:62,max_n:62,max_pool2d:59,max_val:55,max_w:62,maximum:[44,45,58,59,62],mean:[44,55,56,58,62],mechan:55,meet:58,member:[44,45,58],memori:[20,21,42,43,51,55,59],menu:62,messag:[17,27,28,57,62],metadata:[50,55],method:[32,35,36,51,55,58,59,60],method_nam:[32,35,43,58,59],min:[43,44,45,55,58,59,62],min_c:62,min_h:62,min_n:62,min_val:55,min_w:62,minim:[44,58,61,62],minimum:[44,45,57,59],minmax:[31,33,61],miss:59,mod:[59,61],mode:61,mode_:61,model:[59,61],modul:[32,35,36,43,50,52,53,55,56,58,61,62],modular:59,more:[49,56,59,61],most:53,move:[31,43,59,61],msg:[27,40,57],much:[55,61],multipl:61,must:[44,55,58,59,60,62],name:[3,4,32,35,42,55,58,59,60],namespac:[0,40,42,43,48,56,61],nativ:[53,54,59],native_funct:54,nbbind:[3,4,42],necessari:40,need:[1,2,28,33,41,44,49,51,55,59,60,61],nest:[46,47],net:[55,59],network:[31,33,55,59,61],new_lay:55,new_local_repositori:60,new_siz:61,next:[3,4,49,50,61],nice:60,ninja:60,nlp:[31,33,61],node:[51,55,59],node_info:[55,59],noexcept:61,none:55,norm:55,normal:[1,2,44,59,61],noskipw:42,note:[2,44,55],now:[53,55,59],nullptr:[42,43,44],num:62,num_avg_timing_it:[43,44,58],num_it:62,num_min_timing_it:[43,44,58],number:[3,4,44,51,55,58,59,62],numer:62,nvidia:[32,36,43,54,58,59,60,61,62],nvinfer1:[3,4,31,33,42,43,44,55,61],object:[1,2,3,4,44,45,55,59],obvious:59,off:50,ofstream:[42,59],older:53,onc:[40,41,42,43,49,50,59,61],one:[51,55,57,59],ones:[40,59,60],onli:[2,3,4,17,33,42,44,53,55,57,58,59,61,62],onnx:51,onto:[50,62],op_precis:[43,44,58,59,61,62],oper:[1,2,3,4,35,42,43,44,49,50,51,52,53,55,56,58,61,62],ops:[51,59],opset:[52,53],opt:[43,44,45,58,59,60],opt_c:62,opt_h:62,opt_n:62,opt_w:62,optim:[44,45,56,59,62],optimi:59,optimin:[44,45],option:[42,58,60,61,62],order:[44,55,59],org:[54,59,60,61],other:[1,2,43,44,49,56,58,59,62],our:[53,59],out:[35,42,49,51,52,53,55,57,58,59,60],out_shap:59,out_tensor:[55,59],output:[25,26,44,49,50,51,55,57,59,62],output_file_path:62,outself:59,over:[52,53],overrid:[3,4,31,33,42,61],overview:[54,56],own:[55,59],packag:[51,59,62],page:56,pair:[55,61],paramet:[1,2,3,4,26,27,28,31,32,33,35,36,44,45,49,51,55,57,58,59],parent:[14,15,16,18,19,20,21],pars:59,part:[53,62],pass:[49,50,52,53,55,59,61],path:[4,13,14,15,16,31,33,59,60,61,62],pathwai:59,pattern:[55,59],perform:[31,33],performac:[44,45,61],phase:[17,55,59],pick:59,pip3:60,pipelin:62,piplein:59,place:[51,60,61],plan:[53,62],pleas:60,point:[58,59],pointer:[3,4,61],pop:50,posit:62,post:[31,33,44,56,62],power:59,pragma:[40,41,42,43,61],pre_cxx11_abi:60,precis:[44,56,58,59,61,62],precompil:60,prefix:[24,26,40,57],preprint:61,preprocess:61,preserv:[59,61],prespect:59,pretti:59,previous:33,prim:[49,50,51,59],primarili:[53,59],print:[17,35,42,57,58],priorit:60,privat:[3,4,42,43,61],process:[59,62],produc:[44,45,49,50,55,59],profil:[44,45],program:[18,19,20,21,33,48,52,53,56,59,62],propog:51,provid:[3,4,44,55,59,60,61],ptq:[3,4,16,18,21,22,37,43,46,47,48,56,62],ptq_calibr:[3,4,43,44,61],ptqtemplat:[0,46],pull:60,pure:35,purpos:60,push:50,push_back:42,python3:[51,59,60],python:[53,62],python_api:54,pytorch:[50,52,53,55,58,59,60,61],quantiz:[31,33,56,62],quantizatiom:44,question:59,quickli:[59,61,62],quit:[55,59],rais:51,raiseexcept:51,rand:59,randn:59,rang:[44,45,58,59,62],rather:51,read:[3,4,31,33,42,61],readcalibrationcach:[3,4,42],realiz:50,realli:55,reason:[1,44,59],recalibr:33,recognit:61,recomend:[31,33],recommend:[31,33,59,60],record:[49,59],recurs:49,reduc:[51,52,53,61],refer:[50,52,53],referenc:60,refit:[43,44,58],reflect:43,regard:60,regist:[50,55],registernodeconversionpattern:[55,59],registri:[49,59],reinterpret_cast:42,relationship:[46,47],releas:60,relu:59,remain:[51,61],remove_contigu:51,remove_dropout:51,replac:51,report:[23,42],reportable_log_level:57,repositori:53,repres:[44,45,55,57],represent:[51,55,59],request:59,requir:[33,49,57,58,59,61,62],reserv:43,reset:42,resolv:[49,51,52,53],resourc:[49,61],respons:[33,50],restrict:[44,58,62],result:[49,51,52,53,58,59],reus:[51,61],right:[43,53,55],root:[43,61],run:[2,32,44,49,50,52,53,55,56,58,59,60,61,62],runtim:[50,56,59],safe:[55,58],safe_dla:[58,62],safe_gpu:[58,62],safeti:[44,58],same:[50,59],sampl:61,save:[33,42,58,59,62],saw:59,scalar:55,scalartyp:[1,43,44],scale:61,schema:[55,59],scope:51,scratch:33,script:[35,51,58,59],script_model:59,scriptmodul:[58,59],sdk:54,seamlessli:56,search:56,section:61,see:[35,50,51,58,59],select:[31,32,33,44,58,61,62],self:[50,51,55,59],sens:59,serial:[32,50,52,53,58,59],serv:62,set:[3,4,17,26,28,32,33,36,44,45,49,51,52,53,56,57,58,59,61,62],set_is_colored_output_on:[18,38,40,46,47,57],set_logging_prefix:[18,38,40,46,47,57],set_reportable_log_level:[18,38,40,46,47,57],setalpha:55,setbeta:55,setnam:[55,59],setreshapedimens:59,setup:[60,61],sever:[17,27,57],sha256:60,shape:[44,45,55,58],ship:59,should:[1,3,4,33,43,44,49,55,56,57,58,61,62],shown:59,shuffl:59,side:[51,59],signifi:[44,45],significantli:51,similar:[55,59],simonyan:61,simpil:61,simpl:59,simplifi:49,sinc:[51,59,61],singl:[44,45,51,59,61,62],singular:55,site:[51,59],size:[3,4,42,44,45,51,58,59,61,62],size_t:[3,4,42,61],softmax:51,sole:61,some:[49,50,51,52,53,55,59,61],someth:[41,51],sort:55,sourc:[43,53,58],space:61,specif:[36,51,52,53,58],specifi:[3,4,55,56,57,58,59,62],specifii:58,src:54,ssd_trace:62,ssd_trt:62,sstream:[20,42],stabl:54,stack:[50,61],stage:49,stand:50,standard:[56,62],start:[49,60],start_dim:59,state:[49,55,59],statement:51,static_cast:42,statu:42,std:[3,4,24,27,29,30,31,32,33,35,40,42,43,44,45,59,61],stdout:[34,57,58],steamlin:61,step:[56,61],still:[42,61],stitch:59,stop:59,storag:61,store:[4,49,55,59],str:[19,41,42,46,47,57,58],straight:55,strict:62,strict_typ:[43,44,58],strictli:58,string:[3,4,18,20,21,24,27,29,30,31,32,33,35,40,42,43,55,58,59,61],stringstream:42,strip_prefix:60,struct:[1,2,21,37,43,61],structur:[33,44,53,55,59],style:43,sub:59,subdirectori:48,subgraph:[49,51,55,59],subject:53,submodul:59,subset:61,suit:56,support:[1,2,26,35,44,45,54,58,59,62],sure:[59,60],system:[49,55,56,60],take:[32,35,36,49,50,52,53,55,58,59,61],talk:56,tar:[60,61],tarbal:[59,61],target:[2,44,53,56,58,59,61,62],targets_:61,task:[31,33,61],techinqu:59,techniqu:61,tell:[55,59],templat:[20,21,39,42,43,46,47,59],tensor:[42,44,45,49,50,55,59,61],tensorcontain:55,tensorlist:55,tensorrt:[1,2,3,4,31,32,33,34,36,43,44,45,49,51,52,53,55,56,58,59,61,62],term:61,termin:[26,59,62],test:[53,62],text:57,than:[51,56],thats:[49,61],thei:[44,49,51,55,60,62],them:[50,59],theori:49,therebi:50,therefor:[33,59],thi:[1,2,31,33,40,41,42,43,44,45,49,50,51,52,53,55,59,60,61,62],think:55,third_parti:[53,60],those:49,though:[53,55,59,62],three:[44,45,52,53],threshold:62,thrid_parti:60,through:[49,50,56,59],time:[44,49,51,52,53,55,58,59,61,62],tini:61,tmp:59,tocustomclass:55,todim:59,togeth:[49,55,59],too:60,tool:[55,59],top:53,torch:[1,2,4,31,32,33,35,36,42,43,44,50,51,54,55,58,59,61,62],torch_scirpt_modul:59,torch_script_modul:59,torchscript:[32,35,36,52,53,58,62],toronto:61,tovec:59,toward:61,trace:[58,59],traced_model:59,track:[55,61],tradit:61,traget:36,train:[31,33,44,56,59,62],trainabl:51,transform:[59,61],translat:59,travers:[52,53],treat:62,tree:[43,61],trigger:59,trim:61,trt:[1,2,3,4,44,49,50,51,55,59],trt_mod:[59,61],trt_ts_modul:59,trtorch:[0,1,2,3,4,15,17,22,40,41,42,44,45,47,48,49,50,51,52,53,60,61,62],trtorch_api:[19,23,24,25,26,27,28,29,30,31,32,33,34,35,36,40,41,43,46,47],trtorch_check:55,trtorch_hidden:[19,41,46,47],trtorch_major_vers:[19,41,46,47],trtorch_minor_vers:[19,41,46,47],trtorch_patch_vers:[19,41,46,47],trtorch_unus:55,trtorch_vers:[19,41,46,47],trtorchc:56,trtorchfil:[22,46],trtorchnamespac:[0,46],tupl:[58,59],tupleconstruct:51,tupleunpack:51,tutori:[59,61],two:[55,59,60,61,62],type:[1,2,31,45,46,47,49,50,55,57,58,59,61,62],typenam:[3,4,31,33,42,43],typic:[49,55],uint64_t:[43,44],unabl:[55,59],uncom:60,under:[43,53],underli:[1,2,44,55],union:[55,59],uniqu:4,unique_ptr:[4,31],unlik:56,unpack_addmm:51,unpack_log_softmax:51,unqiue_ptr:4,unstabl:53,unsupport:[35,58],unsur:55,untest:53,until:[49,53,55],unwrap:55,unwraptodoubl:55,unwraptoint:59,upstream:59,url:60,use:[1,2,3,4,31,33,44,49,50,53,55,57,58,59,60,61,62],use_cach:[3,4,31,42,43],use_cache_:42,use_subset:61,used:[1,2,3,4,42,44,45,49,50,51,55,57,58,59,61,62],useful:55,user:[40,52,53,59,60,61],uses:[31,33,42,55,61],using:[1,2,32,36,42,44,55,56,58,59,61,62],using_int:59,usr:60,util:[55,59],valid:[2,44,55],valu:[1,2,17,43,44,49,50,55,59],value_tensor_map:[49,55],vector:[20,21,42,43,44,45,59,61],verbios:62,verbos:62,veri:61,version:[30,34,53,60],vgg16:61,via:[56,58],virtual:61,wai:[59,62],want:[40,44,59],warn:[17,42,55,57,62],websit:60,weight:[49,59],welcom:59,well:[59,61],were:59,what:[4,51,59],whatev:50,when:[26,42,44,49,50,51,52,53,55,57,58,59,60,61,62],where:[49,51,55,59,61],whether:[4,61],which:[2,32,33,36,44,49,50,51,52,53,55,58,59,61],whl:60,whose:51,within:[50,52,53],without:[55,59,61],work:[42,51,53,55,61],worker:61,workspac:[44,58,60,61,62],workspace_s:[43,44,58,61,62],would:[55,59,60,62],wrap:[52,53,59],wrapper:55,write:[3,4,31,33,42,49,56,59,61],writecalibrationcach:[3,4,42],www:[59,60,61],x86_64:[53,60],xstr:[19,41,46,47],yaml:54,you:[1,2,31,33,44,49,50,51,53,55,56,58,59,60,61,62],your:[55,56,59,60],yourself:59,zisserman:61},titles:["Class Hierarchy","Class ExtraInfo::DataType","Class ExtraInfo::DeviceType","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TRTORCH_API","Define TRTORCH_HIDDEN","Define TRTORCH_MAJOR_VERSION","Define TRTORCH_PATCH_VERSION","Define TRTORCH_VERSION","Define XSTR","Define TRTORCH_MINOR_VERSION","Directory cpp","Directory api","Directory include","Directory trtorch","Enum Level","File logging.h","File macros.h","File ptq.h","File trtorch.h","File Hierarchy","Function trtorch::logging::get_reportable_log_level","Function trtorch::logging::set_logging_prefix","Function trtorch::logging::get_is_colored_output_on","Function trtorch::logging::set_is_colored_output_on","Function trtorch::logging::log","Function trtorch::logging::set_reportable_log_level","Function trtorch::logging::get_logging_prefix","Function trtorch::get_build_info","Template Function trtorch::ptq::make_int8_calibrator","Function trtorch::ConvertGraphToTRTEngine","Template Function trtorch::ptq::make_int8_cache_calibrator","Function trtorch::dump_build_info","Function trtorch::CheckMethodOperatorSupport","Function trtorch::CompileGraph","Namespace trtorch","Namespace trtorch::logging","Namespace trtorch::ptq","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File trtorch.h","Struct ExtraInfo","Struct ExtraInfo::InputRange","TRTorch C++ API","Full API","Full API","Conversion Phase","Execution Phase","Lowering Phase","Compiler Phases","System Overview","Useful Links for TRTorch Development","Writing Converters","TRTorch","trtorch.logging","trtorch","Getting Started","Installation","Post Training Quantization (PTQ)","trtorchc"],titleterms:{"class":[0,1,2,3,4,20,21,37,39,46,47],"enum":[17,18,38,46,47,58],"function":[18,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,46,47,54,58],The:59,Used:51,Useful:54,addmm:51,advic:55,ahead:56,api:[14,18,19,20,21,46,47,48,54,56],applic:61,arg:55,avail:54,background:[50,55],base:[3,4],binari:60,build:60,checkmethodoperatorsupport:35,citat:61,code:51,compil:[52,53,56,59,60],compilegraph:36,construct:50,content:[18,19,20,21,37,38,39],context:55,contigu:51,contract:55,contributor:56,convers:[49,52,53,55],convert:[49,55,59],convertgraphtotrtengin:32,cpp:[13,18,19,20,21],creat:[59,61],cudnn:60,custom:59,datatyp:1,dead:51,debug:60,defin:[5,6,7,8,9,10,11,12,19,46,47],definit:[18,19,20,21],depend:60,develop:54,devicetyp:2,dimens:54,directori:[13,14,15,16,48],distribut:60,documen:56,document:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,54,56],dropout:51,dump_build_info:34,easier:54,elimin:51,engin:50,evalu:49,execept:51,execut:[50,52,53],executor:50,expect:54,extrainfo:[1,2,44,45],file:[16,18,19,20,21,22,40,41,42,43,46,48],flatten:51,freez:51,from:60,full:[46,47,48],fuse:51,gaurd:51,get:[56,59],get_build_info:30,get_is_colored_output_on:25,get_logging_prefix:29,get_reportable_log_level:23,gpu:56,graph:[50,51],guarante:55,hierarchi:[0,22,46],hood:59,how:61,includ:[15,18,19,20,21],indic:56,inherit:[3,4],inputrang:45,instal:60,int8cachecalibr:3,int8calibr:4,jit:56,layer:54,level:17,linear:51,link:54,list:[40,41,42,43],local:60,log:[18,23,24,25,26,27,28,29,38,40,57],logsoftmax:51,lower:[51,52,53],macro:[19,41],make_int8_cache_calibr:33,make_int8_calibr:31,modul:[51,59],namespac:[18,20,21,37,38,39,46,47],native_op:54,nest:[1,2,44,45],node:49,nvidia:56,oper:59,other:55,overview:53,own:61,packag:60,pass:51,pattern:51,phase:[49,50,51,52,53],post:61,program:[40,41,42,43],ptq:[20,31,33,39,42,61],python:[54,56,59,60],pytorch:[54,56],quantiz:61,read:54,redund:51,regist:59,relationship:[1,2,3,4,44,45],remov:51,respons:55,result:50,set_is_colored_output_on:26,set_logging_prefix:24,set_reportable_log_level:28,sometim:54,sourc:60,start:[56,59],str:5,struct:[44,45,46,47],subdirectori:[13,14,15],submodul:58,system:53,tarbal:60,templat:[3,4,31,33],tensorrt:[50,54,60],time:56,torchscript:[56,59],train:61,trtorch:[16,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,43,46,54,56,57,58,59],trtorch_api:6,trtorch_hidden:7,trtorch_major_vers:8,trtorch_minor_vers:12,trtorch_patch_vers:9,trtorch_vers:10,trtorchc:62,tupl:51,type:[3,4,44],under:59,unpack:51,unsupport:59,using:60,weight:55,what:55,work:59,write:55,xstr:11,your:61}}) \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml index d09258827a..4670406c27 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -1 +1 @@ -<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/class_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/trtorch_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_orphan.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/writing_converters.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/index.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/genindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py-modindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/search.html</loc></url></urlset> \ No newline at end of file +<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/class_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/trtorch_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_orphan.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/conversion.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/execution.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/lowering.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/phases.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/system_overview.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/useful_links.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/writing_converters.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/index.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py_api/logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py_api/trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/getting_started.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/installation.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/trtorchc.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/genindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py-modindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/search.html</loc></url></urlset> \ No newline at end of file diff --git a/docs/tutorials/getting_started.html b/docs/tutorials/getting_started.html index c4836fe36f..9b52d08888 100644 --- a/docs/tutorials/getting_started.html +++ b/docs/tutorials/getting_started.html @@ -365,6 +365,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -878,7 +883,14 @@ <h2 id="compiling-with-trtorch-in-python"> </span> </code> objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import + <code class="docutils literal notranslate"> + <span class="pre"> + trtorch + </span> + </code> + . </p> <div class="highlight-python notranslate"> <div class="highlight"> @@ -898,6 +910,19 @@ <h2 id="compiling-with-trtorch-in-python"> <span class="n">trt_ts_module</span> <span class="o">=</span> <span class="n">trtorch</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">torch_script_module</span><span class="p">,</span> <span class="n">compile_settings</span><span class="p">)</span> +<span class="n">input_data</span> <span class="o">=</span> <span class="n">input_data</span><span class="o">.</span><span class="n">half</span><span class="p">()</span> +<span class="n">result</span> <span class="o">=</span> <span class="n">trt_ts_module</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span> +<span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">trt_ts_module</span><span class="p">,</span> <span class="s2">"trt_ts_module.ts"</span><span class="p">)</span> +</pre> + </div> + </div> + <div class="highlight-python notranslate"> + <div class="highlight"> + <pre><span></span><span class="c1"># Deployment application</span> +<span class="kn">import</span> <span class="nn">torch</span> +<span class="kn">import</span> <span class="nn">trtorch</span> + +<span class="n">trt_ts_module</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="s2">"trt_ts_module.ts"</span><span class="p">)</span> <span class="n">input_data</span> <span class="o">=</span> <span class="n">input_data</span><span class="o">.</span><span class="n">half</span><span class="p">()</span> <span class="n">result</span> <span class="o">=</span> <span class="n">trt_ts_module</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span> </pre> @@ -1044,8 +1069,46 @@ <h2 id="compiling-with-trtorch-in-c"> </div> </div> <p> - And now we are running the module in FP16 precision. + And now we are running the module in FP16 precision. You can then save the module to load later. + </p> + <div class="highlight-c++ notranslate"> + <div class="highlight"> + <pre><span></span><span class="n">trt_mod</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="s">"&lt;PATH TO SAVED TRT/TS MOD&gt;"</span><span class="p">)</span> +</pre> + </div> + </div> + <p> + TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against + <code class="docutils literal notranslate"> + <span class="pre"> + libtrtorch.so + </span> + </code> </p> + <div class="highlight-c++ notranslate"> + <div class="highlight"> + <pre><span></span><span class="cp">#include</span> <span class="cpf">"torch/script.h"</span><span class="cp"></span> +<span class="cp">#include</span> <span class="cpf">"trtorch/trtorch.h"</span><span class="cp"></span> + +<span class="kt">int</span> <span class="nf">main</span><span class="p">(</span><span class="kt">int</span> <span class="n">argc</span><span class="p">,</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">argv</span><span class="p">[])</span> <span class="p">{</span> + <span class="n">torch</span><span class="o">::</span><span class="n">jit</span><span class="o">::</span><span class="n">Module</span> <span class="k">module</span><span class="p">;</span> + <span class="k">try</span> <span class="p">{</span> + <span class="c1">// Deserialize the ScriptModule from a file using torch::jit::load().</span> + <span class="k">module</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">jit</span><span class="o">::</span><span class="n">load</span><span class="p">(</span><span class="s">"&lt;PATH TO SAVED TRT/TS MOD&gt;"</span><span class="p">);</span> + <span class="p">}</span> + <span class="k">catch</span> <span class="p">(</span><span class="k">const</span> <span class="n">c10</span><span class="o">::</span><span class="n">Error</span><span class="o">&amp;</span> <span class="n">e</span><span class="p">)</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">cerr</span> <span class="o">&lt;&lt;</span> <span class="s">"error loading the model</span><span class="se">\n</span><span class="s">"</span><span class="p">;</span> + <span class="k">return</span> <span class="o">-</span><span class="mi">1</span><span class="p">;</span> + <span class="p">}</span> + + <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">in</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">randn</span><span class="p">({</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">},</span> <span class="n">torch</span><span class="o">::</span><span class="n">kCUDA</span><span class="p">);</span> + <span class="k">auto</span> <span class="n">out</span> <span class="o">=</span> <span class="n">mod</span><span class="p">.</span><span class="n">forward</span><span class="p">(</span><span class="n">in</span><span class="p">);</span> + + <span class="n">std</span><span class="o">::</span><span class="n">cout</span> <span class="o">&lt;&lt;</span> <span class="s">"ok</span><span class="se">\n</span><span class="s">"</span><span class="p">;</span> +<span class="p">}</span> +</pre> + </div> + </div> <p> If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the <code class="docutils literal notranslate"> diff --git a/docs/tutorials/installation.html b/docs/tutorials/installation.html index 8f22ca897c..f976eeedea 100644 --- a/docs/tutorials/installation.html +++ b/docs/tutorials/installation.html @@ -382,6 +382,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/tutorials/ptq.html b/docs/tutorials/ptq.html index 91ece9207f..5089f8c9df 100644 --- a/docs/tutorials/ptq.html +++ b/docs/tutorials/ptq.html @@ -56,7 +56,7 @@ </script> <link href="../genindex.html" rel="index" title="Index"/> <link href="../search.html" rel="search" title="Search"/> - <link href="../contributors/system_overview.html" rel="next" title="System Overview"/> + <link href="trtorchc.html" rel="next" title="trtorchc"/> <link href="getting_started.html" rel="prev" title="Getting Started"/> </head> <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> @@ -335,6 +335,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -866,13 +871,13 @@ <h3 id="citations"> </span> </div> </a> - <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="../contributors/system_overview.html" rel="next" title="System Overview"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="trtorchc.html" rel="next" title="trtorchc"> <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> <span class="md-flex__ellipsis"> <span class="md-footer-nav__direction"> Next </span> - System Overview + trtorchc </span> </div> <div class="md-flex__cell md-flex__cell--shrink"> diff --git a/docs/tutorials/trtorchc.html b/docs/tutorials/trtorchc.html new file mode 100644 index 0000000000..148d344c22 --- /dev/null +++ b/docs/tutorials/trtorchc.html @@ -0,0 +1,683 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"/> + <meta content="width=device-width,initial-scale=1" name="viewport"/> + <meta content="ie=edge" http-equiv="x-ua-compatible"/> + <meta content="Copy to clipboard" name="lang:clipboard.copy"/> + <meta content="Copied to clipboard" name="lang:clipboard.copied"/> + <meta content="en" name="lang:search.language"/> + <meta content="True" name="lang:search.pipeline.stopwords"/> + <meta content="True" name="lang:search.pipeline.trimmer"/> + <meta content="No matching documents" name="lang:search.result.none"/> + <meta content="1 matching document" name="lang:search.result.one"/> + <meta content="# matching documents" name="lang:search.result.other"/> + <meta content="[\s\-]+" name="lang:search.tokenizer"/> + <link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect"/> + <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&amp;display=fallback" rel="stylesheet"/> + <style> + body, + input { + font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif + } + + code, + kbd, + pre { + font-family: "Roboto Mono", "Courier New", Courier, monospace + } + </style> + <link href="../_static/stylesheets/application.css" rel="stylesheet"/> + <link href="../_static/stylesheets/application-palette.css" rel="stylesheet"/> + <link href="../_static/stylesheets/application-fixes.css" rel="stylesheet"/> + <link href="../_static/fonts/material-icons.css" rel="stylesheet"/> + <meta content="84bd00" name="theme-color"/> + <script src="../_static/javascripts/modernizr.js"> + </script> + <title> + trtorchc — TRTorch 0.0.2 documentation + </title> + <link href="../_static/material.css" rel="stylesheet" type="text/css"/> + <link href="../_static/pygments.css" rel="stylesheet" type="text/css"/> + <link href="../_static/collapsible-lists/css/tree_view.css" rel="stylesheet" type="text/css"/> + <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"> + </script> + <script src="../_static/jquery.js"> + </script> + <script src="../_static/underscore.js"> + </script> + <script src="../_static/doctools.js"> + </script> + <script src="../_static/language_data.js"> + </script> + <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js"> + </script> + <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js"> + </script> + <link href="../genindex.html" rel="index" title="Index"/> + <link href="../search.html" rel="search" title="Search"/> + <link href="../contributors/system_overview.html" rel="next" title="System Overview"/> + <link href="ptq.html" rel="prev" title="Post Training Quantization (PTQ)"/> + </head> + <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> + <svg class="md-svg"> + <defs data-children-count="0"> + <svg height="448" id="__github" viewbox="0 0 416 448" width="416" xmlns="http://www.w3.org/2000/svg"> + <path d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z" fill="currentColor"> + </path> + </svg> + </defs> + </svg> + <input class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/> + <input class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/> + <label class="md-overlay" data-md-component="overlay" for="__drawer"> + </label> + <a class="md-skip" href="#tutorials/trtorchc" tabindex="1"> + Skip to content + </a> + <header class="md-header" data-md-component="header"> + <nav class="md-header-nav md-grid"> + <div class="md-flex navheader"> + <div class="md-flex__cell md-flex__cell--shrink"> + <a class="md-header-nav__button md-logo" href="../index.html" title="TRTorch 0.0.2 documentation"> + <i class="md-icon"> +  + </i> + </a> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <label class="md-icon md-icon--menu md-header-nav__button" for="__drawer"> + </label> + </div> + <div class="md-flex__cell md-flex__cell--stretch"> + <div class="md-flex__ellipsis md-header-nav__title" data-md-component="title"> + <span class="md-header-nav__topic"> + TRTorch + </span> + <span class="md-header-nav__topic"> + trtorchc + </span> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <label class="md-icon md-icon--search md-header-nav__button" for="__search"> + </label> + <div class="md-search" data-md-component="search" role="dialog"> + <label class="md-search__overlay" for="__search"> + </label> + <div class="md-search__inner" role="search"> + <form action="../search.html" class="md-search__form" method="GET" name="search"> + <input autocapitalize="off" autocomplete="off" class="md-search__input" data-md-component="query" data-md-state="active" name="q" placeholder="Search" spellcheck="false" type="text"/> + <label class="md-icon md-search__icon" for="__search"> + </label> + <button class="md-icon md-search__icon" data-md-component="reset" tabindex="-1" type="reset"> +  + </button> + </form> + <div class="md-search__output"> + <div class="md-search__scrollwrap" data-md-scrollfix=""> + <div class="md-search-result" data-md-component="result"> + <div class="md-search-result__meta"> + Type to start searching + </div> + <ol class="md-search-result__list"> + </ol> + </div> + </div> + </div> + </div> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <div class="md-header-nav__source"> + <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository"> + <div class="md-source__icon"> + <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <use height="24" width="24" xlink:href="#__github"> + </use> + </svg> + </div> + <div class="md-source__repository"> + TRTorch + </div> + </a> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink dropdown"> + <button class="dropdownbutton"> + Versions + </button> + <div class="dropdown-content md-hero"> + <a href="https://nvidia.github.io/TRTorch/" title="master"> + master + </a> + <a href="https://nvidia.github.io/TRTorch/v0.0.2/" title="v0.0.2"> + v0.0.2 + </a> + <a href="https://nvidia.github.io/TRTorch/v0.0.1/" title="v0.0.1"> + v0.0.1 + </a> + </div> + </div> + </div> + </nav> + </header> + <div class="md-container"> + <nav class="md-tabs" data-md-component="tabs"> + <div class="md-tabs__inner md-grid"> + <ul class="md-tabs__list"> + <li class="md-tabs__item"> + <a class="md-tabs__link" href="../index.html"> + TRTorch 0.0.2 documentation + </a> + </li> + </ul> + </div> + </nav> + <main class="md-main"> + <div class="md-main__inner md-grid" data-md-component="container"> + <div class="md-sidebar md-sidebar--primary" data-md-component="navigation"> + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + <nav class="md-nav md-nav--primary" data-md-level="0"> + <label class="md-nav__title md-nav__title--site" for="__drawer"> + <a class="md-nav__button md-logo" href="../index.html" title="TRTorch 0.0.2 documentation"> + <i class="md-icon"> +  + </i> + </a> + <a href="../index.html" title="TRTorch 0.0.2 documentation"> + TRTorch + </a> + </label> + <div class="md-nav__source"> + <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository"> + <div class="md-source__icon"> + <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <use height="24" width="24" xlink:href="#__github"> + </use> + </svg> + </div> + <div class="md-source__repository"> + TRTorch + </div> + </a> + </div> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Getting Started + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html"> + Installation + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#dependencies"> + Dependencies + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#dependencies-for-compilation"> + Dependencies for Compilation + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#building-using-cudnn-tensorrt-tarball-distributions"> + <strong> + Building using cuDNN &amp; TensorRT tarball distributions + </strong> + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#building-using-locally-installed-cudnn-tensorrt"> + <strong> + Building using locally installed cuDNN &amp; TensorRT + </strong> + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html"> + Getting Started + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#creating-a-torchscript-module"> + Creating a TorchScript Module + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-torchscript-in-python"> + Working with TorchScript in Python + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#compiling-with-trtorch-in-python"> + Compiling with TRTorch in Python + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-torchscript-in-c"> + Working with TorchScript in C++ + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#compiling-with-trtorch-in-c"> + Compiling with TRTorch in C++ + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#under-the-hood"> + Under The Hood + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-unsupported-operators"> + Working with Unsupported Operators + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="ptq.html"> + Post Training Quantization (PTQ) + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="ptq.html#how-to-create-your-own-ptq-application"> + How to create your own PTQ application + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <input class="md-toggle md-nav__toggle" data-md-toggle="toc" id="__toc" type="checkbox"/> + <label class="md-nav__link md-nav__link--active" for="__toc"> + trtorchc + </label> + <a class="md-nav__link md-nav__link--active" href="#"> + trtorchc + </a> + <nav class="md-nav md-nav--secondary"> + <ul class="md-nav__list" data-md-scrollfix=""> + <li class="md-nav__item"> + <a class="md-nav__extra_link" href="../_sources/tutorials/trtorchc.rst.txt"> + Show Source + </a> + </li> + </ul> + </nav> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Contributor Documentation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/system_overview.html"> + System Overview + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/system_overview.html#compiler-phases"> + Compiler Phases + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html"> + Writing Converters + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#background"> + Background + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#converters"> + Converters + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#converter-contract"> + Converter Contract + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#conversion-context"> + Conversion Context + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#args"> + Args + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#weights"> + Weights + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#other-advice"> + Other advice + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html"> + Useful Links for TRTorch Development + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-available-layers-and-expected-dimensions"> + TensorRT Available Layers and Expected Dimensions: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-c-documentation"> + TensorRT C++ Documentation: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-python-documentation-sometimes-easier-to-read"> + TensorRT Python Documentation (Sometimes easier to read): + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-functional-api"> + PyTorch Functional API: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-native-ops"> + PyTorch native_ops: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-ir-documentation"> + PyTorch IR Documentation: + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Python API Documenation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html"> + trtorch + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#functions"> + Functions + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#enums"> + Enums + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#submodules"> + Submodules + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/logging.html"> + trtorch.logging + </a> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + C++ API Documenation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html"> + TRTorch C++ API + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#class-hierarchy"> + Class Hierarchy + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#file-hierarchy"> + File Hierarchy + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#full-api"> + Full API + </a> + </li> + </ul> + </li> + </ul> + </nav> + </div> + </div> + </div> + <div class="md-sidebar md-sidebar--secondary" data-md-component="toc"> + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + <nav class="md-nav md-nav--secondary"> + <ul class="md-nav__list" data-md-scrollfix=""> + <li class="md-nav__item"> + <a class="md-nav__extra_link" href="../_sources/tutorials/trtorchc.rst.txt"> + Show Source + </a> + </li> + <li class="md-nav__item" id="searchbox"> + </li> + </ul> + </nav> + </div> + </div> + </div> + <div class="md-content"> + <article class="md-content__inner md-typeset" role="main"> + <span id="id1"> + </span> + <h1 id="tutorials-trtorchc--page-root"> + trtorchc + <a class="headerlink" href="#tutorials-trtorchc--page-root" title="Permalink to this headline"> + ¶ + </a> + </h1> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + trtorchc + </span> + </code> + is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + </p> + <p> + All that is required to run the program after compilation is for C++ linking against + <code class="docutils literal notranslate"> + <span class="pre"> + libtrtorch.so + </span> + </code> + or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with + <code class="docutils literal notranslate"> + <span class="pre"> + torch.jit.load() + </span> + </code> + and run like you would run any other module. + </p> + <div class="highlight-txt notranslate"> + <div class="highlight"> + <pre><span></span>trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + +OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be &gt;= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options +</pre> + </div> + </div> + <p> + e.g. + </p> + <div class="highlight-txt notranslate"> + <div class="highlight"> + <pre><span></span>trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 +</pre> + </div> + </div> + </article> + </div> + </div> + </main> + </div> + <footer class="md-footer"> + <div class="md-footer-nav"> + <nav class="md-footer-nav__inner md-grid"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="ptq.html" rel="prev" title="Post Training Quantization (PTQ)"> + <div class="md-flex__cell md-flex__cell--shrink"> + <i class="md-icon md-icon--arrow-back md-footer-nav__button"> + </i> + </div> + <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> + <span class="md-flex__ellipsis"> + <span class="md-footer-nav__direction"> + Previous + </span> + Post Training Quantization (PTQ) + </span> + </div> + </a> + <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="../contributors/system_overview.html" rel="next" title="System Overview"> + <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> + <span class="md-flex__ellipsis"> + <span class="md-footer-nav__direction"> + Next + </span> + System Overview + </span> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <i class="md-icon md-icon--arrow-forward md-footer-nav__button"> + </i> + </div> + </a> + </nav> + </div> + <div class="md-footer-meta md-typeset"> + <div class="md-footer-meta__inner md-grid"> + <div class="md-footer-copyright"> + <div class="md-footer-copyright__highlight"> + © Copyright 2020, NVIDIA Corporation. + </div> + Created using + <a href="http://www.sphinx-doc.org/"> + Sphinx + </a> + 3.0.3. + and + <a href="https://github.com/bashtage/sphinx-material/"> + Material for + Sphinx + </a> + </div> + </div> + </div> + </footer> + <script src="../_static/javascripts/application.js"> + </script> + <script> + app.initialize({version: "1.0.4", url: {base: ".."}}) + </script> + </body> +</html> \ No newline at end of file diff --git a/docsrc/index.rst b/docsrc/index.rst index 5255135f58..45a1610b49 100644 --- a/docsrc/index.rst +++ b/docsrc/index.rst @@ -23,15 +23,18 @@ Getting Started * :ref:`installation` * :ref:`getting_started` * :ref:`ptq` +* :ref:`trtorchc` + .. toctree:: :caption: Getting Started - :maxdepth: 2 + :maxdepth: 1 :hidden: tutorials/installation tutorials/getting_started tutorials/ptq + tutorials/trtorchc Contributor Documentation -------------------------------- diff --git a/docsrc/tutorials/getting_started.rst b/docsrc/tutorials/getting_started.rst index 0d133a7eab..45c08b8637 100644 --- a/docsrc/tutorials/getting_started.rst +++ b/docsrc/tutorials/getting_started.rst @@ -130,7 +130,8 @@ To compile your TorchScript module with TRTorch, all you need to do is provide t to TRTorch and you will be returned an optimized TorchScript module to run or add into another PyTorch module. The only required setting is the input size or input range which is defined as a list of either list types like ``lists``, ``tuples`` or PyTorch ``size`` objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import ``trtorch``. .. code-block:: python @@ -152,6 +153,17 @@ operating precision for the engine or target device. input_data = input_data.half() result = trt_ts_module(input_data) + torch.jit.save(trt_ts_module, "trt_ts_module.ts") + +.. code-block:: python + + # Deployment application + import torch + import trtorch + + trt_ts_module = torch.jit.load("trt_ts_module.ts") + input_data = input_data.half() + result = trt_ts_module(input_data) .. _ts_in_cc: @@ -251,7 +263,35 @@ We can also set settings like operating precision to run in FP16. auto trt_mod = trtorch::CompileGraph(mod, info); auto out = trt_mod.forward({in}); -And now we are running the module in FP16 precision. +And now we are running the module in FP16 precision. You can then save the module to load later. + +.. code-block:: c++ + + trt_mod.save("<PATH TO SAVED TRT/TS MOD>") + +TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against ``libtrtorch.so`` + +.. code-block:: c++ + + #include "torch/script.h" + #include "trtorch/trtorch.h" + + int main(int argc, const char* argv[]) { + torch::jit::Module module; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + module = torch::jit::load("<PATH TO SAVED TRT/TS MOD>"); + } + catch (const c10::Error& e) { + std::cerr << "error loading the model\n"; + return -1; + } + + torch::Tensor in = torch::randn({1, 1, 32, 32}, torch::kCUDA); + auto out = mod.forward(in); + + std::cout << "ok\n"; + } If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the ``ConvertGraphToTRTEngine`` API. diff --git a/docsrc/tutorials/trtorchc.rst b/docsrc/tutorials/trtorchc.rst new file mode 100644 index 0000000000..5561ee86ed --- /dev/null +++ b/docsrc/tutorials/trtorchc.rst @@ -0,0 +1,91 @@ +.. _trtorchc: + +trtorchc +================================= + +``trtorchc`` is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against ``libtrtorch.so`` +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with ``torch.jit.load()`` and run like you would run any other module. + +.. code-block:: txt + + trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options + + +e.g. + +.. code-block:: txt + + trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py index e72d8482a5..db46947cb6 100644 --- a/py/trtorch/__init__.py +++ b/py/trtorch/__init__.py @@ -11,3 +11,7 @@ from trtorch._compiler import * from trtorch._types import * from trtorch import logging + +def _register_with_torch(): + trtorch_dir = os.path.dirname(__file__) + torch.ops.load_library(trtorch_dir + '/lib/trtorch.so') \ No newline at end of file diff --git a/tests/modules/BUILD b/tests/modules/BUILD index cd282a9756..ac837e1460 100644 --- a/tests/modules/BUILD +++ b/tests/modules/BUILD @@ -1,3 +1,5 @@ +package(default_visibility = ["//visibility:public"]) + config_setting( name = "use_pre_cxx11_abi", values = { @@ -15,7 +17,19 @@ test_suite( tests = [ ":test_modules_as_engines", ":test_compiled_modules", - ":test_multiple_registered_engines" + ":test_multiple_registered_engines", + ":test_serialization" + ] +) + +cc_test( + name = "test_serialization", + srcs = ["test_serialization.cpp"], + deps = [ + ":module_test", + ], + data = [ + ":jit_models" ] ) diff --git a/tests/modules/hub.py b/tests/modules/hub.py index 873dfff9ac..a35dd41b05 100644 --- a/tests/modules/hub.py +++ b/tests/modules/hub.py @@ -2,64 +2,68 @@ import torchvision.models as models models = { - "alexnet": { - "model": models.alexnet(pretrained=True), - "path": "both" - }, - "vgg16": { - "model": models.vgg16(pretrained=True), - "path": "both" - }, - "squeezenet": { - "model": models.squeezenet1_0(pretrained=True), - "path": "both" - }, - "densenet": { - "model": models.densenet161(pretrained=True), - "path": "both" - }, - "inception_v3": { - "model": models.inception_v3(pretrained=True), - "path": "both" - }, - #"googlenet": models.googlenet(pretrained=True), - "shufflenet": { - "model": models.shufflenet_v2_x1_0(pretrained=True), - "path": "both" - }, - "mobilenet_v2": { - "model": models.mobilenet_v2(pretrained=True), - "path": "both" - }, - "resnext50_32x4d": { - "model": models.resnext50_32x4d(pretrained=True), - "path": "both" - }, - "wideresnet50_2": { - "model": models.wide_resnet50_2(pretrained=True), - "path": "both" - }, - "mnasnet": { - "model": models.mnasnet1_0(pretrained=True), - "path": "both" - }, - "resnet18": { - "model": torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True), - "path": "both" - }, - "resnet50": { - "model":torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True), - "path": "both" - }, - "fcn_resnet101": { - "model": torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=True), - "path": "script" - }, - "ssd": { - "model": torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math="fp32"), - "path": "trace" - } + "alexnet": { + "model": models.alexnet(pretrained=True), + "path": "both" + }, + "vgg16": { + "model": models.vgg16(pretrained=True), + "path": "both" + }, + "squeezenet": { + "model": models.squeezenet1_0(pretrained=True), + "path": "both" + }, + "densenet": { + "model": models.densenet161(pretrained=True), + "path": "both" + }, + "inception_v3": { + "model": models.inception_v3(pretrained=True), + "path": "both" + }, + #"googlenet": models.googlenet(pretrained=True), + "shufflenet": { + "model": models.shufflenet_v2_x1_0(pretrained=True), + "path": "both" + }, + "mobilenet_v2": { + "model": models.mobilenet_v2(pretrained=True), + "path": "both" + }, + "resnext50_32x4d": { + "model": models.resnext50_32x4d(pretrained=True), + "path": "both" + }, + "wideresnet50_2": { + "model": models.wide_resnet50_2(pretrained=True), + "path": "both" + }, + "mnasnet": { + "model": models.mnasnet1_0(pretrained=True), + "path": "both" + }, + "resnet18": { + "model": torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True), + "path": "both" + }, + "resnet50": { + "model":torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True), + "path": "both" + }, + "fcn_resnet101": { + "model": torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=True), + "path": "script" + }, + "ssd": { + "model": torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math="fp32"), + "path": "trace" + }, + "faster_rcnn": { + "model": models.detection.fasterrcnn_resnet50_fpn(pretrained=True), + "path": "script" } +} for n, m in models.items(): print("Downloading {}".format(n)) diff --git a/tests/modules/test_serialization.cpp b/tests/modules/test_serialization.cpp new file mode 100644 index 0000000000..0e9c2d59f4 --- /dev/null +++ b/tests/modules/test_serialization.cpp @@ -0,0 +1,34 @@ +#include "module_test.h" + +TEST_P(ModuleTests, SerializedModuleIsStillCorrect) { + std::vector<torch::jit::IValue> post_serialized_inputs_ivalues; + std::vector<torch::jit::IValue> pre_serialized_inputs_ivalues; + for (auto in_shape : input_shapes) { + auto in = at::randint(5, in_shape, {at::kCUDA}); + post_serialized_inputs_ivalues.push_back(in.clone()); + pre_serialized_inputs_ivalues.push_back(in.clone()); + } + + auto pre_serialized_mod = trtorch::CompileGraph(mod, input_shapes); + torch::jit::IValue pre_serialized_results_ivalues = trtorch::tests::util::RunModuleForward(pre_serialized_mod, pre_serialized_inputs_ivalues); + std::vector<at::Tensor> pre_serialized_results; + pre_serialized_results.push_back(pre_serialized_results_ivalues.toTensor()); + + pre_serialized_mod.save("test_serialization_mod.ts"); + auto post_serialized_mod = torch::jit::load("test_serialization_mod.ts"); + + torch::jit::IValue post_serialized_results_ivalues = trtorch::tests::util::RunModuleForward(post_serialized_mod, post_serialized_inputs_ivalues); + std::vector<at::Tensor> post_serialized_results; + post_serialized_results.push_back(post_serialized_results_ivalues.toTensor()); + + for (size_t i = 0; i < pre_serialized_results.size(); i++) { + ASSERT_TRUE(trtorch::tests::util::almostEqual(post_serialized_results[i], pre_serialized_results[i].reshape_as(post_serialized_results[i]), 2e-5)); + } +} + + +INSTANTIATE_TEST_SUITE_P(CompiledModuleForwardIsCloseSuite, + ModuleTests, + testing::Values( + PathAndInSize({"tests/modules/resnet18_traced.jit.pt", + {{1,3,224,224}}}))); diff --git a/third_party/args/BUILD b/third_party/args/BUILD new file mode 100644 index 0000000000..5d7a14bb2c --- /dev/null +++ b/third_party/args/BUILD @@ -0,0 +1,6 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "args", + hdrs = ["args.hpp"], +) diff --git a/third_party/args/LICENSE b/third_party/args/LICENSE new file mode 100644 index 0000000000..5c792a5edf --- /dev/null +++ b/third_party/args/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2016-2017 Taylor C. Richberger <taywee@gmx.com> and Pavel Belikov + + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/args/args.hpp b/third_party/args/args.hpp new file mode 100644 index 0000000000..1a595268c0 --- /dev/null +++ b/third_party/args/args.hpp @@ -0,0 +1,4305 @@ +/* A simple header-only C++ argument parser library. + * + * https://github.com/Taywee/args + * + * Copyright (c) 2016-2019 Taylor C. Richberger <taywee@gmx.com> and Pavel + * Belikov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** \file args.hxx + * \brief this single-header lets you use all of the args functionality + * + * The important stuff is done inside the args namespace + */ + +#ifndef ARGS_HXX +#define ARGS_HXX + +#include <algorithm> +#include <iterator> +#include <exception> +#include <functional> +#include <sstream> +#include <string> +#include <tuple> +#include <vector> +#include <unordered_map> +#include <unordered_set> +#include <type_traits> +#include <cstddef> + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +#define noexcept +#endif + +#ifdef ARGS_TESTNAMESPACE +namespace argstest +{ +#else + +/** \namespace args + * \brief contains all the functionality of the args library + */ +namespace args +{ +#endif + /** Getter to grab the value from the argument type. + * + * If the Get() function of the type returns a reference, so does this, and + * the value will be modifiable. + */ + template <typename Option> + auto get(Option &option_) -> decltype(option_.Get()) + { + return option_.Get(); + } + + /** (INTERNAL) Count UTF-8 glyphs + * + * This is not reliable, and will fail for combinatory glyphs, but it's + * good enough here for now. + * + * \param string The string to count glyphs from + * \return The UTF-8 glyphs in the string + */ + inline std::string::size_type Glyphs(const std::string &string_) + { + std::string::size_type length = 0; + for (const char c: string_) + { + if ((c & 0xc0) != 0x80) + { + ++length; + } + } + return length; + } + + /** (INTERNAL) Wrap a vector of words into a vector of lines + * + * Empty words are skipped. Word "\n" forces wrapping. + * + * \param begin The begin iterator + * \param end The end iterator + * \param width The width of the body + * \param firstlinewidth the width of the first line, defaults to the width of the body + * \param firstlineindent the indent of the first line, defaults to 0 + * \return the vector of lines + */ + template <typename It> + inline std::vector<std::string> Wrap(It begin, + It end, + const std::string::size_type width, + std::string::size_type firstlinewidth = 0, + std::string::size_type firstlineindent = 0) + { + std::vector<std::string> output; + std::string line(firstlineindent, ' '); + bool empty = true; + + if (firstlinewidth == 0) + { + firstlinewidth = width; + } + + auto currentwidth = firstlinewidth; + + for (auto it = begin; it != end; ++it) + { + if (it->empty()) + { + continue; + } + + if (*it == "\n") + { + if (!empty) + { + output.push_back(line); + line.clear(); + empty = true; + currentwidth = width; + } + + continue; + } + + auto itemsize = Glyphs(*it); + if ((line.length() + 1 + itemsize) > currentwidth) + { + if (!empty) + { + output.push_back(line); + line.clear(); + empty = true; + currentwidth = width; + } + } + + if (itemsize > 0) + { + if (!empty) + { + line += ' '; + } + + line += *it; + empty = false; + } + } + + if (!empty) + { + output.push_back(line); + } + + return output; + } + + namespace detail + { + template <typename T> + std::string Join(const T& array, const std::string &delimiter) + { + std::string res; + for (auto &element : array) + { + if (!res.empty()) + { + res += delimiter; + } + + res += element; + } + + return res; + } + } + + /** (INTERNAL) Wrap a string into a vector of lines + * + * This is quick and hacky, but works well enough. You can specify a + * different width for the first line + * + * \param width The width of the body + * \param firstlinewid the width of the first line, defaults to the width of the body + * \return the vector of lines + */ + inline std::vector<std::string> Wrap(const std::string &in, const std::string::size_type width, std::string::size_type firstlinewidth = 0) + { + // Preserve existing line breaks + const auto newlineloc = in.find('\n'); + if (newlineloc != in.npos) + { + auto first = Wrap(std::string(in, 0, newlineloc), width); + auto second = Wrap(std::string(in, newlineloc + 1), width); + first.insert( + std::end(first), + std::make_move_iterator(std::begin(second)), + std::make_move_iterator(std::end(second))); + return first; + } + + std::istringstream stream(in); + std::string::size_type indent = 0; + + for (char c : in) + { + if (!isspace(c)) + { + break; + } + ++indent; + } + + return Wrap(std::istream_iterator<std::string>(stream), std::istream_iterator<std::string>(), + width, firstlinewidth, indent); + } + +#ifdef ARGS_NOEXCEPT + /// Error class, for when ARGS_NOEXCEPT is defined + enum class Error + { + None, + Usage, + Parse, + Validation, + Required, + Map, + Extra, + Help, + Subparser, + Completion, + }; +#else + /** Base error class + */ + class Error : public std::runtime_error + { + public: + Error(const std::string &problem) : std::runtime_error(problem) {} + virtual ~Error() {} + }; + + /** Errors that occur during usage + */ + class UsageError : public Error + { + public: + UsageError(const std::string &problem) : Error(problem) {} + virtual ~UsageError() {} + }; + + /** Errors that occur during regular parsing + */ + class ParseError : public Error + { + public: + ParseError(const std::string &problem) : Error(problem) {} + virtual ~ParseError() {} + }; + + /** Errors that are detected from group validation after parsing finishes + */ + class ValidationError : public Error + { + public: + ValidationError(const std::string &problem) : Error(problem) {} + virtual ~ValidationError() {} + }; + + /** Errors that when a required flag is omitted + */ + class RequiredError : public ValidationError + { + public: + RequiredError(const std::string &problem) : ValidationError(problem) {} + virtual ~RequiredError() {} + }; + + /** Errors in map lookups + */ + class MapError : public ParseError + { + public: + MapError(const std::string &problem) : ParseError(problem) {} + virtual ~MapError() {} + }; + + /** Error that occurs when a singular flag is specified multiple times + */ + class ExtraError : public ParseError + { + public: + ExtraError(const std::string &problem) : ParseError(problem) {} + virtual ~ExtraError() {} + }; + + /** An exception that indicates that the user has requested help + */ + class Help : public Error + { + public: + Help(const std::string &flag) : Error(flag) {} + virtual ~Help() {} + }; + + /** (INTERNAL) An exception that emulates coroutine-like control flow for subparsers. + */ + class SubparserError : public Error + { + public: + SubparserError() : Error("") {} + virtual ~SubparserError() {} + }; + + /** An exception that contains autocompletion reply + */ + class Completion : public Error + { + public: + Completion(const std::string &flag) : Error(flag) {} + virtual ~Completion() {} + }; +#endif + + /** A simple unified option type for unified initializer lists for the Matcher class. + */ + struct EitherFlag + { + const bool isShort; + const char shortFlag; + const std::string longFlag; + EitherFlag(const std::string &flag) : isShort(false), shortFlag(), longFlag(flag) {} + EitherFlag(const char *flag) : isShort(false), shortFlag(), longFlag(flag) {} + EitherFlag(const char flag) : isShort(true), shortFlag(flag), longFlag() {} + + /** Get just the long flags from an initializer list of EitherFlags + */ + static std::unordered_set<std::string> GetLong(std::initializer_list<EitherFlag> flags) + { + std::unordered_set<std::string> longFlags; + for (const EitherFlag &flag: flags) + { + if (!flag.isShort) + { + longFlags.insert(flag.longFlag); + } + } + return longFlags; + } + + /** Get just the short flags from an initializer list of EitherFlags + */ + static std::unordered_set<char> GetShort(std::initializer_list<EitherFlag> flags) + { + std::unordered_set<char> shortFlags; + for (const EitherFlag &flag: flags) + { + if (flag.isShort) + { + shortFlags.insert(flag.shortFlag); + } + } + return shortFlags; + } + + std::string str() const + { + return isShort ? std::string(1, shortFlag) : longFlag; + } + + std::string str(const std::string &shortPrefix, const std::string &longPrefix) const + { + return isShort ? shortPrefix + std::string(1, shortFlag) : longPrefix + longFlag; + } + }; + + + + /** A class of "matchers", specifying short and flags that can possibly be + * matched. + * + * This is supposed to be constructed and then passed in, not used directly + * from user code. + */ + class Matcher + { + private: + const std::unordered_set<char> shortFlags; + const std::unordered_set<std::string> longFlags; + + public: + /** Specify short and long flags separately as iterators + * + * ex: `args::Matcher(shortFlags.begin(), shortFlags.end(), longFlags.begin(), longFlags.end())` + */ + template <typename ShortIt, typename LongIt> + Matcher(ShortIt shortFlagsStart, ShortIt shortFlagsEnd, LongIt longFlagsStart, LongIt longFlagsEnd) : + shortFlags(shortFlagsStart, shortFlagsEnd), + longFlags(longFlagsStart, longFlagsEnd) + { + if (shortFlags.empty() && longFlags.empty()) + { +#ifndef ARGS_NOEXCEPT + throw UsageError("empty Matcher"); +#endif + } + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + Error GetError() const noexcept + { + return shortFlags.empty() && longFlags.empty() ? Error::Usage : Error::None; + } +#endif + + /** Specify short and long flags separately as iterables + * + * ex: `args::Matcher(shortFlags, longFlags)` + */ + template <typename Short, typename Long> + Matcher(Short &&shortIn, Long &&longIn) : + Matcher(std::begin(shortIn), std::end(shortIn), std::begin(longIn), std::end(longIn)) + {} + + /** Specify a mixed single initializer-list of both short and long flags + * + * This is the fancy one. It takes a single initializer list of + * any number of any mixed kinds of flags. Chars are + * automatically interpreted as short flags, and strings are + * automatically interpreted as long flags: + * + * args::Matcher{'a'} + * args::Matcher{"foo"} + * args::Matcher{'h', "help"} + * args::Matcher{"foo", 'f', 'F', "FoO"} + */ + Matcher(std::initializer_list<EitherFlag> in) : + Matcher(EitherFlag::GetShort(in), EitherFlag::GetLong(in)) {} + + Matcher(Matcher &&other) : shortFlags(std::move(other.shortFlags)), longFlags(std::move(other.longFlags)) + {} + + ~Matcher() {} + + /** (INTERNAL) Check if there is a match of a short flag + */ + bool Match(const char flag) const + { + return shortFlags.find(flag) != shortFlags.end(); + } + + /** (INTERNAL) Check if there is a match of a long flag + */ + bool Match(const std::string &flag) const + { + return longFlags.find(flag) != longFlags.end(); + } + + /** (INTERNAL) Check if there is a match of a flag + */ + bool Match(const EitherFlag &flag) const + { + return flag.isShort ? Match(flag.shortFlag) : Match(flag.longFlag); + } + + /** (INTERNAL) Get all flag strings as a vector, with the prefixes embedded + */ + std::vector<EitherFlag> GetFlagStrings() const + { + std::vector<EitherFlag> flagStrings; + flagStrings.reserve(shortFlags.size() + longFlags.size()); + for (const char flag: shortFlags) + { + flagStrings.emplace_back(flag); + } + for (const std::string &flag: longFlags) + { + flagStrings.emplace_back(flag); + } + return flagStrings; + } + + /** (INTERNAL) Get long flag if it exists or any short flag + */ + EitherFlag GetLongOrAny() const + { + if (!longFlags.empty()) + { + return *longFlags.begin(); + } + + if (!shortFlags.empty()) + { + return *shortFlags.begin(); + } + + // should be unreachable + return ' '; + } + + /** (INTERNAL) Get short flag if it exists or any long flag + */ + EitherFlag GetShortOrAny() const + { + if (!shortFlags.empty()) + { + return *shortFlags.begin(); + } + + if (!longFlags.empty()) + { + return *longFlags.begin(); + } + + // should be unreachable + return ' '; + } + }; + + /** Attributes for flags. + */ + enum class Options + { + /** Default options. + */ + None = 0x0, + + /** Flag can't be passed multiple times. + */ + Single = 0x01, + + /** Flag can't be omitted. + */ + Required = 0x02, + + /** Flag is excluded from usage line. + */ + HiddenFromUsage = 0x04, + + /** Flag is excluded from options help. + */ + HiddenFromDescription = 0x08, + + /** Flag is global and can be used in any subcommand. + */ + Global = 0x10, + + /** Flag stops a parser. + */ + KickOut = 0x20, + + /** Flag is excluded from auto completion. + */ + HiddenFromCompletion = 0x40, + + /** Flag is excluded from options help and usage line + */ + Hidden = HiddenFromUsage | HiddenFromDescription | HiddenFromCompletion, + }; + + inline Options operator | (Options lhs, Options rhs) + { + return static_cast<Options>(static_cast<int>(lhs) | static_cast<int>(rhs)); + } + + inline Options operator & (Options lhs, Options rhs) + { + return static_cast<Options>(static_cast<int>(lhs) & static_cast<int>(rhs)); + } + + class FlagBase; + class PositionalBase; + class Command; + class ArgumentParser; + + /** A simple structure of parameters for easy user-modifyable help menus + */ + struct HelpParams + { + /** The width of the help menu + */ + unsigned int width = 80; + /** The indent of the program line + */ + unsigned int progindent = 2; + /** The indent of the program trailing lines for long parameters + */ + unsigned int progtailindent = 4; + /** The indent of the description and epilogs + */ + unsigned int descriptionindent = 4; + /** The indent of the flags + */ + unsigned int flagindent = 6; + /** The indent of the flag descriptions + */ + unsigned int helpindent = 40; + /** The additional indent each group adds + */ + unsigned int eachgroupindent = 2; + + /** The minimum gutter between each flag and its help + */ + unsigned int gutter = 1; + + /** Show the terminator when both options and positional parameters are present + */ + bool showTerminator = true; + + /** Show the {OPTIONS} on the prog line when this is true + */ + bool showProglineOptions = true; + + /** Show the positionals on the prog line when this is true + */ + bool showProglinePositionals = true; + + /** The prefix for short flags + */ + std::string shortPrefix; + + /** The prefix for long flags + */ + std::string longPrefix; + + /** The separator for short flags + */ + std::string shortSeparator; + + /** The separator for long flags + */ + std::string longSeparator; + + /** The program name for help generation + */ + std::string programName; + + /** Show command's flags + */ + bool showCommandChildren = false; + + /** Show command's descriptions and epilog + */ + bool showCommandFullHelp = false; + + /** The postfix for progline when showProglineOptions is true and command has any flags + */ + std::string proglineOptions = "{OPTIONS}"; + + /** The prefix for progline when command has any subcommands + */ + std::string proglineCommand = "COMMAND"; + + /** The prefix for progline value + */ + std::string proglineValueOpen = " <"; + + /** The postfix for progline value + */ + std::string proglineValueClose = ">"; + + /** The prefix for progline required argument + */ + std::string proglineRequiredOpen = ""; + + /** The postfix for progline required argument + */ + std::string proglineRequiredClose = ""; + + /** The prefix for progline non-required argument + */ + std::string proglineNonrequiredOpen = "["; + + /** The postfix for progline non-required argument + */ + std::string proglineNonrequiredClose = "]"; + + /** Show flags in program line + */ + bool proglineShowFlags = false; + + /** Use short flags in program lines when possible + */ + bool proglinePreferShortFlags = false; + + /** Program line prefix + */ + std::string usageString; + + /** String shown in help before flags descriptions + */ + std::string optionsString = "OPTIONS:"; + + /** Display value name after all the long and short flags + */ + bool useValueNameOnce = false; + + /** Show value name + */ + bool showValueName = true; + + /** Add newline before flag description + */ + bool addNewlineBeforeDescription = false; + + /** The prefix for option value + */ + std::string valueOpen = "["; + + /** The postfix for option value + */ + std::string valueClose = "]"; + + /** Add choices to argument description + */ + bool addChoices = false; + + /** The prefix for choices + */ + std::string choiceString = "\nOne of: "; + + /** Add default values to argument description + */ + bool addDefault = false; + + /** The prefix for default values + */ + std::string defaultString = "\nDefault: "; + }; + + /** A number of arguments which can be consumed by an option. + * + * Represents a closed interval [min, max]. + */ + struct Nargs + { + const size_t min; + const size_t max; + + Nargs(size_t min_, size_t max_) : min{min_}, max{max_} + { +#ifndef ARGS_NOEXCEPT + if (max < min) + { + throw UsageError("Nargs: max > min"); + } +#endif + } + + Nargs(size_t num_) : min{num_}, max{num_} + { + } + + friend bool operator == (const Nargs &lhs, const Nargs &rhs) + { + return lhs.min == rhs.min && lhs.max == rhs.max; + } + + friend bool operator != (const Nargs &lhs, const Nargs &rhs) + { + return !(lhs == rhs); + } + }; + + /** Base class for all match types + */ + class Base + { + private: + Options options = {}; + + protected: + bool matched = false; + const std::string help; +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + mutable Error error = Error::None; + mutable std::string errorMsg; +#endif + + public: + Base(const std::string &help_, Options options_ = {}) : options(options_), help(help_) {} + virtual ~Base() {} + + Options GetOptions() const noexcept + { + return options; + } + + bool IsRequired() const noexcept + { + return (GetOptions() & Options::Required) != Options::None; + } + + virtual bool Matched() const noexcept + { + return matched; + } + + virtual void Validate(const std::string &, const std::string &) const + { + } + + operator bool() const noexcept + { + return Matched(); + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &, const unsigned indentLevel) const + { + std::tuple<std::string, std::string, unsigned> description; + std::get<1>(description) = help; + std::get<2>(description) = indentLevel; + return { std::move(description) }; + } + + virtual std::vector<Command*> GetCommands() + { + return {}; + } + + virtual bool IsGroup() const + { + return false; + } + + virtual FlagBase *Match(const EitherFlag &) + { + return nullptr; + } + + virtual PositionalBase *GetNextPositional() + { + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() + { + return {}; + } + + virtual bool HasFlag() const + { + return false; + } + + virtual bool HasPositional() const + { + return false; + } + + virtual bool HasCommand() const + { + return false; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &) const + { + return {}; + } + + /// Sets a kick-out value for building subparsers + void KickOut(bool kickout_) noexcept + { + if (kickout_) + { + options = options | Options::KickOut; + } + else + { + options = static_cast<Options>(static_cast<int>(options) & ~static_cast<int>(Options::KickOut)); + } + } + + /// Gets the kick-out value for building subparsers + bool KickOut() const noexcept + { + return (options & Options::KickOut) != Options::None; + } + + virtual void Reset() noexcept + { + matched = false; +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const + { + return error; + } + + /// Only for ARGS_NOEXCEPT + std::string GetErrorMsg() const + { + return errorMsg; + } +#endif + }; + + /** Base class for all match types that have a name + */ + class NamedBase : public Base + { + protected: + const std::string name; + bool kickout = false; + std::string defaultString; + bool defaultStringManual = false; + std::vector<std::string> choicesStrings; + bool choicesStringManual = false; + + virtual std::string GetDefaultString(const HelpParams&) const { return {}; } + + virtual std::vector<std::string> GetChoicesStrings(const HelpParams&) const { return {}; } + + virtual std::string GetNameString(const HelpParams&) const { return Name(); } + + void AddDescriptionPostfix(std::string &dest, const bool isManual, const std::string &manual, bool isGenerated, const std::string &generated, const std::string &str) const + { + if (isManual && !manual.empty()) + { + dest += str; + dest += manual; + } + else if (!isManual && isGenerated && !generated.empty()) + { + dest += str; + dest += generated; + } + } + + public: + NamedBase(const std::string &name_, const std::string &help_, Options options_ = {}) : Base(help_, options_), name(name_) {} + virtual ~NamedBase() {} + + /** Sets default value string that will be added to argument description. + * Use empty string to disable it for this argument. + */ + void HelpDefault(const std::string &str) + { + defaultStringManual = true; + defaultString = str; + } + + /** Gets default value string that will be added to argument description. + */ + std::string HelpDefault(const HelpParams &params) const + { + return defaultStringManual ? defaultString : GetDefaultString(params); + } + + /** Sets choices strings that will be added to argument description. + * Use empty vector to disable it for this argument. + */ + void HelpChoices(const std::vector<std::string> &array) + { + choicesStringManual = true; + choicesStrings = array; + } + + /** Gets choices strings that will be added to argument description. + */ + std::vector<std::string> HelpChoices(const HelpParams &params) const + { + return choicesStringManual ? choicesStrings : GetChoicesStrings(params); + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned indentLevel) const override + { + std::tuple<std::string, std::string, unsigned> description; + std::get<0>(description) = GetNameString(params); + std::get<1>(description) = help; + std::get<2>(description) = indentLevel; + + AddDescriptionPostfix(std::get<1>(description), choicesStringManual, detail::Join(choicesStrings, ", "), params.addChoices, detail::Join(GetChoicesStrings(params), ", "), params.choiceString); + AddDescriptionPostfix(std::get<1>(description), defaultStringManual, defaultString, params.addDefault, GetDefaultString(params), params.defaultString); + + return { std::move(description) }; + } + + virtual std::string Name() const + { + return name; + } + }; + + namespace detail + { + template<typename T> + using vector = std::vector<T, std::allocator<T>>; + + template<typename K, typename T> + using unordered_map = std::unordered_map<K, T, std::hash<K>, + std::equal_to<K>, std::allocator<std::pair<const K, T> > >; + + template<typename S, typename T> + class is_streamable + { + template<typename SS, typename TT> + static auto test(int) + -> decltype( std::declval<SS&>() << std::declval<TT>(), std::true_type() ); + + template<typename, typename> + static auto test(...) -> std::false_type; + + public: + using type = decltype(test<S,T>(0)); + }; + + template <typename T> + using IsConvertableToString = typename is_streamable<std::ostringstream, T>::type; + + template <typename T> + typename std::enable_if<IsConvertableToString<T>::value, std::string>::type + ToString(const T &value) + { + std::ostringstream s; + s << value; + return s.str(); + } + + template <typename T> + typename std::enable_if<!IsConvertableToString<T>::value, std::string>::type + ToString(const T &) + { + return {}; + } + + template <typename T> + std::vector<std::string> MapKeysToStrings(const T &map) + { + std::vector<std::string> res; + using K = typename std::decay<decltype(std::begin(map)->first)>::type; + if (IsConvertableToString<K>::value) + { + for (const auto &p : map) + { + res.push_back(detail::ToString(p.first)); + } + + std::sort(res.begin(), res.end()); + } + return res; + } + } + + /** Base class for all flag options + */ + class FlagBase : public NamedBase + { + protected: + const Matcher matcher; + + virtual std::string GetNameString(const HelpParams &params) const override + { + const std::string postfix = !params.showValueName || NumberOfArguments() == 0 ? std::string() : Name(); + std::string flags; + const auto flagStrings = matcher.GetFlagStrings(); + const bool useValueNameOnce = flagStrings.size() == 1 ? false : params.useValueNameOnce; + for (auto it = flagStrings.begin(); it != flagStrings.end(); ++it) + { + auto &flag = *it; + if (it != flagStrings.begin()) + { + flags += ", "; + } + + flags += flag.isShort ? params.shortPrefix : params.longPrefix; + flags += flag.str(); + + if (!postfix.empty() && (!useValueNameOnce || it + 1 == flagStrings.end())) + { + flags += flag.isShort ? params.shortSeparator : params.longSeparator; + flags += params.valueOpen + postfix + params.valueClose; + } + } + + return flags; + } + + public: + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : NamedBase(name_, help_, extraError_ ? Options::Single : Options()), matcher(std::move(matcher_)) {} + + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : NamedBase(name_, help_, options_), matcher(std::move(matcher_)) {} + + virtual ~FlagBase() {} + + virtual FlagBase *Match(const EitherFlag &flag) override + { + if (matcher.Match(flag)) + { + if ((GetOptions() & Options::Single) != Options::None && matched) + { + std::ostringstream problem; + problem << "Flag '" << flag.str() << "' was passed multiple times, but is only allowed to be passed once"; +#ifdef ARGS_NOEXCEPT + error = Error::Extra; + errorMsg = problem.str(); +#else + throw ExtraError(problem.str()); +#endif + } + matched = true; + return this; + } + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + return { this }; + } + + const Matcher &GetMatcher() const + { + return matcher; + } + + virtual void Validate(const std::string &shortPrefix, const std::string &longPrefix) const override + { + if (!Matched() && IsRequired()) + { + std::ostringstream problem; + problem << "Flag '" << matcher.GetLongOrAny().str(shortPrefix, longPrefix) << "' is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Required; + errorMsg = problem.str(); +#else + throw RequiredError(problem.str()); +#endif + } + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + if (!params.proglineShowFlags) + { + return {}; + } + + const std::string postfix = NumberOfArguments() == 0 ? std::string() : Name(); + const EitherFlag flag = params.proglinePreferShortFlags ? matcher.GetShortOrAny() : matcher.GetLongOrAny(); + std::string res = flag.str(params.shortPrefix, params.longPrefix); + if (!postfix.empty()) + { + res += params.proglineValueOpen + postfix + params.proglineValueClose; + } + + return { IsRequired() ? params.proglineRequiredOpen + res + params.proglineRequiredClose + : params.proglineNonrequiredOpen + res + params.proglineNonrequiredClose }; + } + + virtual bool HasFlag() const override + { + return true; + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + const auto nargs = NumberOfArguments(); + if (nargs.min > nargs.max) + { + return Error::Usage; + } + + const auto matcherError = matcher.GetError(); + if (matcherError != Error::None) + { + return matcherError; + } + + return error; + } +#endif + + /** Defines how many values can be consumed by this option. + * + * \return closed interval [min, max] + */ + virtual Nargs NumberOfArguments() const noexcept = 0; + + /** Parse values of this option. + * + * \param value Vector of values. It's size must be in NumberOfArguments() interval. + */ + virtual void ParseValue(const std::vector<std::string> &value) = 0; + }; + + /** Base class for value-accepting flag options + */ + class ValueFlagBase : public FlagBase + { + public: + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : FlagBase(name_, help_, std::move(matcher_), extraError_) {} + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : FlagBase(name_, help_, std::move(matcher_), options_) {} + virtual ~ValueFlagBase() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return 1; + } + }; + + class CompletionFlag : public ValueFlagBase + { + public: + std::vector<std::string> reply; + size_t cword = 0; + std::string syntax; + + template <typename GroupClass> + CompletionFlag(GroupClass &group_, Matcher &&matcher_): ValueFlagBase("completion", "completion flag", std::move(matcher_), Options::Hidden) + { + group_.AddCompletion(*this); + } + + virtual ~CompletionFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return 2; + } + + virtual void ParseValue(const std::vector<std::string> &value_) override + { + syntax = value_.at(0); + std::istringstream(value_.at(1)) >> cword; + } + + /** Get the completion reply + */ + std::string Get() noexcept + { + return detail::Join(reply, "\n"); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + cword = 0; + syntax.clear(); + reply.clear(); + } + }; + + + /** Base class for positional options + */ + class PositionalBase : public NamedBase + { + protected: + bool ready; + + public: + PositionalBase(const std::string &name_, const std::string &help_, Options options_ = {}) : NamedBase(name_, help_, options_), ready(true) {} + virtual ~PositionalBase() {} + + bool Ready() + { + return ready; + } + + virtual void ParseValue(const std::string &value_) = 0; + + virtual void Reset() noexcept override + { + matched = false; + ready = true; +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + + virtual PositionalBase *GetNextPositional() override + { + return Ready() ? this : nullptr; + } + + virtual bool HasPositional() const override + { + return true; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + return { IsRequired() ? params.proglineRequiredOpen + Name() + params.proglineRequiredClose + : params.proglineNonrequiredOpen + Name() + params.proglineNonrequiredClose }; + } + + virtual void Validate(const std::string &, const std::string &) const override + { + if (IsRequired() && !Matched()) + { + std::ostringstream problem; + problem << "Option '" << Name() << "' is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Required; + errorMsg = problem.str(); +#else + throw RequiredError(problem.str()); +#endif + } + } + }; + + /** Class for all kinds of validating groups, including ArgumentParser + */ + class Group : public Base + { + private: + std::vector<Base*> children; + std::function<bool(const Group &)> validator; + + public: + /** Default validators + */ + struct Validators + { + static bool Xor(const Group &group) + { + return group.MatchedChildren() == 1; + } + + static bool AtLeastOne(const Group &group) + { + return group.MatchedChildren() >= 1; + } + + static bool AtMostOne(const Group &group) + { + return group.MatchedChildren() <= 1; + } + + static bool All(const Group &group) + { + return group.Children().size() == group.MatchedChildren(); + } + + static bool AllOrNone(const Group &group) + { + return (All(group) || None(group)); + } + + static bool AllChildGroups(const Group &group) + { + return std::none_of(std::begin(group.Children()), std::end(group.Children()), [](const Base* child) -> bool { + return child->IsGroup() && !child->Matched(); + }); + } + + static bool DontCare(const Group &) + { + return true; + } + + static bool CareTooMuch(const Group &) + { + return false; + } + + static bool None(const Group &group) + { + return group.MatchedChildren() == 0; + } + }; + /// If help is empty, this group will not be printed in help output + Group(const std::string &help_ = std::string(), const std::function<bool(const Group &)> &validator_ = Validators::DontCare, Options options_ = {}) : Base(help_, options_), validator(validator_) {} + /// If help is empty, this group will not be printed in help output + Group(Group &group_, const std::string &help_ = std::string(), const std::function<bool(const Group &)> &validator_ = Validators::DontCare, Options options_ = {}) : Base(help_, options_), validator(validator_) + { + group_.Add(*this); + } + virtual ~Group() {} + + /** Append a child to this Group. + */ + void Add(Base &child) + { + children.emplace_back(&child); + } + + /** Get all this group's children + */ + const std::vector<Base *> &Children() const + { + return children; + } + + /** Return the first FlagBase that matches flag, or nullptr + * + * \param flag The flag with prefixes stripped + * \return the first matching FlagBase pointer, or nullptr if there is no match + */ + virtual FlagBase *Match(const EitherFlag &flag) override + { + for (Base *child: Children()) + { + if (FlagBase *match = child->Match(flag)) + { + return match; + } + } + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + std::vector<FlagBase*> res; + for (Base *child: Children()) + { + auto childRes = child->GetAllFlags(); + res.insert(res.end(), childRes.begin(), childRes.end()); + } + return res; + } + + virtual void Validate(const std::string &shortPrefix, const std::string &longPrefix) const override + { + for (Base *child: Children()) + { + child->Validate(shortPrefix, longPrefix); + } + } + + /** Get the next ready positional, or nullptr if there is none + * + * \return the first ready PositionalBase pointer, or nullptr if there is no match + */ + virtual PositionalBase *GetNextPositional() override + { + for (Base *child: Children()) + { + if (auto next = child->GetNextPositional()) + { + return next; + } + } + return nullptr; + } + + /** Get whether this has any FlagBase children + * + * \return Whether or not there are any FlagBase children + */ + virtual bool HasFlag() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasFlag(); }); + } + + /** Get whether this has any PositionalBase children + * + * \return Whether or not there are any PositionalBase children + */ + virtual bool HasPositional() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasPositional(); }); + } + + /** Get whether this has any Command children + * + * \return Whether or not there are any Command children + */ + virtual bool HasCommand() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasCommand(); }); + } + + /** Count the number of matched children this group has + */ + std::vector<Base *>::size_type MatchedChildren() const + { + // Cast to avoid warnings from -Wsign-conversion + return static_cast<std::vector<Base *>::size_type>( + std::count_if(std::begin(Children()), std::end(Children()), [](const Base *child){return child->Matched();})); + } + + /** Whether or not this group matches validation + */ + virtual bool Matched() const noexcept override + { + return validator(*this); + } + + /** Get validation + */ + bool Get() const + { + return Matched(); + } + + /** Get all the child descriptions for help generation + */ + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned int indent) const override + { + std::vector<std::tuple<std::string, std::string, unsigned int>> descriptions; + + // Push that group description on the back if not empty + unsigned addindent = 0; + if (!help.empty()) + { + descriptions.emplace_back(help, "", indent); + addindent = 1; + } + + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + { + continue; + } + + auto groupDescriptions = child->GetDescription(params, indent + addindent); + descriptions.insert( + std::end(descriptions), + std::make_move_iterator(std::begin(groupDescriptions)), + std::make_move_iterator(std::end(groupDescriptions))); + } + return descriptions; + } + + /** Get the names of positional parameters + */ + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + std::vector <std::string> names; + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromUsage) != Options::None) + { + continue; + } + + auto groupNames = child->GetProgramLine(params); + names.insert( + std::end(names), + std::make_move_iterator(std::begin(groupNames)), + std::make_move_iterator(std::end(groupNames))); + } + return names; + } + + virtual std::vector<Command*> GetCommands() override + { + std::vector<Command*> res; + for (const auto &child : Children()) + { + auto subparsers = child->GetCommands(); + res.insert(std::end(res), std::begin(subparsers), std::end(subparsers)); + } + return res; + } + + virtual bool IsGroup() const override + { + return true; + } + + virtual void Reset() noexcept override + { + Base::Reset(); + + for (auto &child: Children()) + { + child->Reset(); + } +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + if (error != Error::None) + { + return error; + } + + auto it = std::find_if(Children().begin(), Children().end(), [](const Base *child){return child->GetError() != Error::None;}); + if (it == Children().end()) + { + return Error::None; + } else + { + return (*it)->GetError(); + } + } +#endif + + }; + + /** Class for using global options in ArgumentParser. + */ + class GlobalOptions : public Group + { + public: + GlobalOptions(Group &base, Base &options_) : Group(base, {}, Group::Validators::DontCare, Options::Global) + { + Add(options_); + } + }; + + /** Utility class for building subparsers with coroutines/callbacks. + * + * Brief example: + * \code + * Command command(argumentParser, "command", "my command", [](args::Subparser &s) + * { + * // your command flags/positionals + * s.Parse(); //required + * //your command code + * }); + * \endcode + * + * For ARGS_NOEXCEPT mode don't forget to check `s.GetError()` after `s.Parse()` + * and return if it isn't equals to args::Error::None. + * + * \sa Command + */ + class Subparser : public Group + { + private: + std::vector<std::string> args; + std::vector<std::string> kicked; + ArgumentParser *parser = nullptr; + const HelpParams &helpParams; + const Command &command; + bool isParsed = false; + + public: + Subparser(std::vector<std::string> args_, ArgumentParser &parser_, const Command &command_, const HelpParams &helpParams_) + : Group({}, Validators::AllChildGroups), args(std::move(args_)), parser(&parser_), helpParams(helpParams_), command(command_) + { + } + + Subparser(const Command &command_, const HelpParams &helpParams_) : Group({}, Validators::AllChildGroups), helpParams(helpParams_), command(command_) + { + } + + Subparser(const Subparser&) = delete; + Subparser(Subparser&&) = delete; + Subparser &operator = (const Subparser&) = delete; + Subparser &operator = (Subparser&&) = delete; + + const Command &GetCommand() + { + return command; + } + + /** (INTERNAL) Determines whether Parse was called or not. + */ + bool IsParsed() const + { + return isParsed; + } + + /** Continue parsing arguments for new command. + */ + void Parse(); + + /** Returns a vector of kicked out arguments. + * + * \sa Base::KickOut + */ + const std::vector<std::string> &KickedOut() const noexcept + { + return kicked; + } + }; + + /** Main class for building subparsers. + * + * /sa Subparser + */ + class Command : public Group + { + private: + friend class Subparser; + + std::string name; + std::string help; + std::string description; + std::string epilog; + std::string proglinePostfix; + + std::function<void(Subparser&)> parserCoroutine; + bool commandIsRequired = true; + Command *selectedCommand = nullptr; + + mutable std::vector<std::tuple<std::string, std::string, unsigned>> subparserDescription; + mutable std::vector<std::string> subparserProgramLine; + mutable bool subparserHasFlag = false; + mutable bool subparserHasPositional = false; + mutable bool subparserHasCommand = false; +#ifdef ARGS_NOEXCEPT + mutable Error subparserError = Error::None; +#endif + mutable Subparser *subparser = nullptr; + + protected: + + class RaiiSubparser + { + public: + RaiiSubparser(ArgumentParser &parser_, std::vector<std::string> args_); + RaiiSubparser(const Command &command_, const HelpParams &params_); + + ~RaiiSubparser() + { + command.subparser = oldSubparser; + } + + Subparser &Parser() + { + return parser; + } + + private: + const Command &command; + Subparser parser; + Subparser *oldSubparser; + }; + + Command() = default; + + std::function<void(Subparser&)> &GetCoroutine() + { + return selectedCommand != nullptr ? selectedCommand->GetCoroutine() : parserCoroutine; + } + + Command &SelectedCommand() + { + Command *res = this; + while (res->selectedCommand != nullptr) + { + res = res->selectedCommand; + } + + return *res; + } + + const Command &SelectedCommand() const + { + const Command *res = this; + while (res->selectedCommand != nullptr) + { + res = res->selectedCommand; + } + + return *res; + } + + void UpdateSubparserHelp(const HelpParams &params) const + { + if (parserCoroutine) + { + RaiiSubparser coro(*this, params); +#ifndef ARGS_NOEXCEPT + try + { + parserCoroutine(coro.Parser()); + } + catch (args::SubparserError&) + { + } +#else + parserCoroutine(coro.Parser()); +#endif + } + } + + public: + Command(Group &base_, std::string name_, std::string help_, std::function<void(Subparser&)> coroutine_ = {}) + : name(std::move(name_)), help(std::move(help_)), parserCoroutine(std::move(coroutine_)) + { + base_.Add(*this); + } + + /** The description that appears on the prog line after options + */ + const std::string &ProglinePostfix() const + { return proglinePostfix; } + + /** The description that appears on the prog line after options + */ + void ProglinePostfix(const std::string &proglinePostfix_) + { this->proglinePostfix = proglinePostfix_; } + + /** The description that appears above options + */ + const std::string &Description() const + { return description; } + /** The description that appears above options + */ + + void Description(const std::string &description_) + { this->description = description_; } + + /** The description that appears below options + */ + const std::string &Epilog() const + { return epilog; } + + /** The description that appears below options + */ + void Epilog(const std::string &epilog_) + { this->epilog = epilog_; } + + /** The name of command + */ + const std::string &Name() const + { return name; } + + /** The description of command + */ + const std::string &Help() const + { return help; } + + /** If value is true, parser will fail if no command was parsed. + * + * Default: true. + */ + void RequireCommand(bool value) + { commandIsRequired = value; } + + virtual bool IsGroup() const override + { return false; } + + virtual bool Matched() const noexcept override + { return Base::Matched(); } + + operator bool() const noexcept + { return Matched(); } + + void Match() noexcept + { matched = true; } + + void SelectCommand(Command *c) noexcept + { + selectedCommand = c; + + if (c != nullptr) + { + c->Match(); + } + } + + virtual FlagBase *Match(const EitherFlag &flag) override + { + if (selectedCommand != nullptr) + { + if (auto *res = selectedCommand->Match(flag)) + { + return res; + } + + for (auto *child: Children()) + { + if ((child->GetOptions() & Options::Global) != Options::None) + { + if (auto *res = child->Match(flag)) + { + return res; + } + } + } + + return nullptr; + } + + if (subparser != nullptr) + { + return subparser->Match(flag); + } + + return Matched() ? Group::Match(flag) : nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + std::vector<FlagBase*> res; + + if (!Matched()) + { + return res; + } + + for (auto *child: Children()) + { + if (selectedCommand == nullptr || (child->GetOptions() & Options::Global) != Options::None) + { + auto childFlags = child->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + } + + if (selectedCommand != nullptr) + { + auto childFlags = selectedCommand->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + + if (subparser != nullptr) + { + auto childFlags = subparser->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + + return res; + } + + virtual PositionalBase *GetNextPositional() override + { + if (selectedCommand != nullptr) + { + if (auto *res = selectedCommand->GetNextPositional()) + { + return res; + } + + for (auto *child: Children()) + { + if ((child->GetOptions() & Options::Global) != Options::None) + { + if (auto *res = child->GetNextPositional()) + { + return res; + } + } + } + + return nullptr; + } + + if (subparser != nullptr) + { + return subparser->GetNextPositional(); + } + + return Matched() ? Group::GetNextPositional() : nullptr; + } + + virtual bool HasFlag() const override + { + return subparserHasFlag || Group::HasFlag(); + } + + virtual bool HasPositional() const override + { + return subparserHasPositional || Group::HasPositional(); + } + + virtual bool HasCommand() const override + { + return true; + } + + std::vector<std::string> GetCommandProgramLine(const HelpParams &params) const + { + UpdateSubparserHelp(params); + + auto res = Group::GetProgramLine(params); + res.insert(res.end(), subparserProgramLine.begin(), subparserProgramLine.end()); + + if (!params.proglineCommand.empty() && (Group::HasCommand() || subparserHasCommand)) + { + res.insert(res.begin(), commandIsRequired ? params.proglineCommand : "[" + params.proglineCommand + "]"); + } + + if (!Name().empty()) + { + res.insert(res.begin(), Name()); + } + + if ((subparserHasFlag || Group::HasFlag()) && params.showProglineOptions && !params.proglineShowFlags) + { + res.push_back(params.proglineOptions); + } + + if (!ProglinePostfix().empty()) + { + std::string line; + for (char c : ProglinePostfix()) + { + if (isspace(c)) + { + if (!line.empty()) + { + res.push_back(line); + line.clear(); + } + + if (c == '\n') + { + res.push_back("\n"); + } + } + else + { + line += c; + } + } + + if (!line.empty()) + { + res.push_back(line); + } + } + + return res; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + if (!Matched()) + { + return {}; + } + + return GetCommandProgramLine(params); + } + + virtual std::vector<Command*> GetCommands() override + { + if (selectedCommand != nullptr) + { + return selectedCommand->GetCommands(); + } + + if (Matched()) + { + return Group::GetCommands(); + } + + return { this }; + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned int indent) const override + { + std::vector<std::tuple<std::string, std::string, unsigned>> descriptions; + unsigned addindent = 0; + + UpdateSubparserHelp(params); + + if (!Matched()) + { + if (params.showCommandFullHelp) + { + std::ostringstream s; + bool empty = true; + for (const auto &progline: GetCommandProgramLine(params)) + { + if (!empty) + { + s << ' '; + } + else + { + empty = false; + } + + s << progline; + } + + descriptions.emplace_back(s.str(), "", indent); + } + else + { + descriptions.emplace_back(Name(), help, indent); + } + + if (!params.showCommandChildren && !params.showCommandFullHelp) + { + return descriptions; + } + + addindent = 1; + } + + if (params.showCommandFullHelp && !Matched()) + { + descriptions.emplace_back("", "", indent + addindent); + descriptions.emplace_back(Description().empty() ? Help() : Description(), "", indent + addindent); + descriptions.emplace_back("", "", indent + addindent); + } + + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + { + continue; + } + + auto groupDescriptions = child->GetDescription(params, indent + addindent); + descriptions.insert( + std::end(descriptions), + std::make_move_iterator(std::begin(groupDescriptions)), + std::make_move_iterator(std::end(groupDescriptions))); + } + + for (auto childDescription: subparserDescription) + { + std::get<2>(childDescription) += indent + addindent; + descriptions.push_back(std::move(childDescription)); + } + + if (params.showCommandFullHelp && !Matched()) + { + descriptions.emplace_back("", "", indent + addindent); + if (!Epilog().empty()) + { + descriptions.emplace_back(Epilog(), "", indent + addindent); + descriptions.emplace_back("", "", indent + addindent); + } + } + + return descriptions; + } + + virtual void Validate(const std::string &shortprefix, const std::string &longprefix) const override + { + if (!Matched()) + { + return; + } + + auto onValidationError = [&] + { + std::ostringstream problem; + problem << "Group validation failed somewhere!"; +#ifdef ARGS_NOEXCEPT + error = Error::Validation; + errorMsg = problem.str(); +#else + throw ValidationError(problem.str()); +#endif + }; + + for (Base *child: Children()) + { + if (child->IsGroup() && !child->Matched()) + { + onValidationError(); + } + + child->Validate(shortprefix, longprefix); + } + + if (subparser != nullptr) + { + subparser->Validate(shortprefix, longprefix); + if (!subparser->Matched()) + { + onValidationError(); + } + } + + if (selectedCommand == nullptr && commandIsRequired && (Group::HasCommand() || subparserHasCommand)) + { + std::ostringstream problem; + problem << "Command is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Validation; + errorMsg = problem.str(); +#else + throw ValidationError(problem.str()); +#endif + } + } + + virtual void Reset() noexcept override + { + Group::Reset(); + selectedCommand = nullptr; + subparserProgramLine.clear(); + subparserDescription.clear(); + subparserHasFlag = false; + subparserHasPositional = false; + subparserHasCommand = false; +#ifdef ARGS_NOEXCEPT + subparserError = Error::None; +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + if (!Matched()) + { + return Error::None; + } + + if (error != Error::None) + { + return error; + } + + if (subparserError != Error::None) + { + return subparserError; + } + + return Group::GetError(); + } +#endif + }; + + /** The main user facing command line argument parser class + */ + class ArgumentParser : public Command + { + friend class Subparser; + + private: + std::string longprefix; + std::string shortprefix; + + std::string longseparator; + + std::string terminator; + + bool allowJoinedShortValue = true; + bool allowJoinedLongValue = true; + bool allowSeparateShortValue = true; + bool allowSeparateLongValue = true; + + CompletionFlag *completion = nullptr; + bool readCompletion = false; + + protected: + enum class OptionType + { + LongFlag, + ShortFlag, + Positional + }; + + OptionType ParseOption(const std::string &s, bool allowEmpty = false) + { + if (s.find(longprefix) == 0 && (allowEmpty || s.length() > longprefix.length())) + { + return OptionType::LongFlag; + } + + if (s.find(shortprefix) == 0 && (allowEmpty || s.length() > shortprefix.length())) + { + return OptionType::ShortFlag; + } + + return OptionType::Positional; + } + + template <typename It> + bool Complete(FlagBase &flag, It it, It end) + { + auto nextIt = it; + if (!readCompletion || (++nextIt != end)) + { + return false; + } + + const auto &chunk = *it; + for (auto &choice : flag.HelpChoices(helpParams)) + { + AddCompletionReply(chunk, choice); + } + +#ifndef ARGS_NOEXCEPT + throw Completion(completion->Get()); +#else + return true; +#endif + } + + /** (INTERNAL) Parse flag's values + * + * \param arg The string to display in error message as a flag name + * \param[in, out] it The iterator to first value. It will point to the last value + * \param end The end iterator + * \param joinedArg Joined value (e.g. bar in --foo=bar) + * \param canDiscardJoined If true joined value can be parsed as flag not as a value (as in -abcd) + * \param[out] values The vector to store parsed arg's values + */ + template <typename It> + std::string ParseArgsValues(FlagBase &flag, const std::string &arg, It &it, It end, + const bool allowSeparate, const bool allowJoined, + const bool hasJoined, const std::string &joinedArg, + const bool canDiscardJoined, std::vector<std::string> &values) + { + values.clear(); + + Nargs nargs = flag.NumberOfArguments(); + + if (hasJoined && !allowJoined && nargs.min != 0) + { + return "Flag '" + arg + "' was passed a joined argument, but these are disallowed"; + } + + if (hasJoined) + { + if (!canDiscardJoined || nargs.max != 0) + { + values.push_back(joinedArg); + } + } else if (!allowSeparate) + { + if (nargs.min != 0) + { + return "Flag '" + arg + "' was passed a separate argument, but these are disallowed"; + } + } else + { + auto valueIt = it; + ++valueIt; + + while (valueIt != end && + values.size() < nargs.max && + (nargs.min == nargs.max || ParseOption(*valueIt) == OptionType::Positional)) + { + if (Complete(flag, valueIt, end)) + { + it = end; + return ""; + } + + values.push_back(*valueIt); + ++it; + ++valueIt; + } + } + + if (values.size() > nargs.max) + { + return "Passed an argument into a non-argument flag: " + arg; + } else if (values.size() < nargs.min) + { + if (nargs.min == 1 && nargs.max == 1) + { + return "Flag '" + arg + "' requires an argument but received none"; + } else if (nargs.min == 1) + { + return "Flag '" + arg + "' requires at least one argument but received none"; + } else if (nargs.min != nargs.max) + { + return "Flag '" + arg + "' requires at least " + std::to_string(nargs.min) + + " arguments but received " + std::to_string(values.size()); + } else + { + return "Flag '" + arg + "' requires " + std::to_string(nargs.min) + + " arguments but received " + std::to_string(values.size()); + } + } + + return {}; + } + + template <typename It> + bool ParseLong(It &it, It end) + { + const auto &chunk = *it; + const auto argchunk = chunk.substr(longprefix.size()); + // Try to separate it, in case of a separator: + const auto separator = longseparator.empty() ? argchunk.npos : argchunk.find(longseparator); + // If the separator is in the argument, separate it. + const auto arg = (separator != argchunk.npos ? + std::string(argchunk, 0, separator) + : argchunk); + const auto joined = (separator != argchunk.npos ? + argchunk.substr(separator + longseparator.size()) + : std::string()); + + if (auto flag = Match(arg)) + { + std::vector<std::string> values; + const std::string errorMessage = ParseArgsValues(*flag, arg, it, end, allowSeparateLongValue, allowJoinedLongValue, + separator != argchunk.npos, joined, false, values); + if (!errorMessage.empty()) + { +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + if (!readCompletion) + { + flag->ParseValue(values); + } + + if (flag->KickOut()) + { + ++it; + return false; + } + } else + { + const std::string errorMessage("Flag could not be matched: " + arg); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + return true; + } + + template <typename It> + bool ParseShort(It &it, It end) + { + const auto &chunk = *it; + const auto argchunk = chunk.substr(shortprefix.size()); + for (auto argit = std::begin(argchunk); argit != std::end(argchunk); ++argit) + { + const auto arg = *argit; + + if (auto flag = Match(arg)) + { + const std::string value(argit + 1, std::end(argchunk)); + std::vector<std::string> values; + const std::string errorMessage = ParseArgsValues(*flag, std::string(1, arg), it, end, + allowSeparateShortValue, allowJoinedShortValue, + !value.empty(), value, !value.empty(), values); + + if (!errorMessage.empty()) + { +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + if (!readCompletion) + { + flag->ParseValue(values); + } + + if (flag->KickOut()) + { + ++it; + return false; + } + + if (!values.empty()) + { + break; + } + } else + { + const std::string errorMessage("Flag could not be matched: '" + std::string(1, arg) + "'"); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + } + + return true; + } + + bool AddCompletionReply(const std::string &cur, const std::string &choice) + { + if (cur.empty() || choice.find(cur) == 0) + { + if (completion->syntax == "bash" && ParseOption(choice) == OptionType::LongFlag && choice.find(longseparator) != std::string::npos) + { + completion->reply.push_back(choice.substr(choice.find(longseparator) + 1)); + } else + { + completion->reply.push_back(choice); + } + return true; + } + + return false; + } + + template <typename It> + bool Complete(It it, It end) + { + auto nextIt = it; + if (!readCompletion || (++nextIt != end)) + { + return false; + } + + const auto &chunk = *it; + auto pos = GetNextPositional(); + std::vector<Command *> commands = GetCommands(); + const auto optionType = ParseOption(chunk, true); + + if (!commands.empty() && (chunk.empty() || optionType == OptionType::Positional)) + { + for (auto &cmd : commands) + { + if ((cmd->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + AddCompletionReply(chunk, cmd->Name()); + } + } + } else + { + bool hasPositionalCompletion = true; + + if (!commands.empty()) + { + for (auto &cmd : commands) + { + if ((cmd->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + AddCompletionReply(chunk, cmd->Name()); + } + } + } else if (pos) + { + if ((pos->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + auto choices = pos->HelpChoices(helpParams); + hasPositionalCompletion = !choices.empty() || optionType != OptionType::Positional; + for (auto &choice : choices) + { + AddCompletionReply(chunk, choice); + } + } + } + + if (hasPositionalCompletion) + { + auto flags = GetAllFlags(); + for (auto flag : flags) + { + if ((flag->GetOptions() & Options::HiddenFromCompletion) != Options::None) + { + continue; + } + + auto &matcher = flag->GetMatcher(); + if (!AddCompletionReply(chunk, matcher.GetShortOrAny().str(shortprefix, longprefix))) + { + for (auto &flagName : matcher.GetFlagStrings()) + { + if (AddCompletionReply(chunk, flagName.str(shortprefix, longprefix))) + { + break; + } + } + } + } + + if (optionType == OptionType::LongFlag && allowJoinedLongValue) + { + const auto separator = longseparator.empty() ? chunk.npos : chunk.find(longseparator); + if (separator != chunk.npos) + { + std::string arg(chunk, 0, separator); + if (auto flag = this->Match(arg.substr(longprefix.size()))) + { + for (auto &choice : flag->HelpChoices(helpParams)) + { + AddCompletionReply(chunk, arg + longseparator + choice); + } + } + } + } else if (optionType == OptionType::ShortFlag && allowJoinedShortValue) + { + if (chunk.size() > shortprefix.size() + 1) + { + auto arg = chunk.at(shortprefix.size()); + //TODO: support -abcVALUE where a and b take no value + if (auto flag = this->Match(arg)) + { + for (auto &choice : flag->HelpChoices(helpParams)) + { + AddCompletionReply(chunk, shortprefix + arg + choice); + } + } + } + } + } + } + +#ifndef ARGS_NOEXCEPT + throw Completion(completion->Get()); +#else + return true; +#endif + } + + template <typename It> + It Parse(It begin, It end) + { + bool terminated = false; + std::vector<Command *> commands = GetCommands(); + + // Check all arg chunks + for (auto it = begin; it != end; ++it) + { + if (Complete(it, end)) + { + return end; + } + + const auto &chunk = *it; + + if (!terminated && chunk == terminator) + { + terminated = true; + } else if (!terminated && ParseOption(chunk) == OptionType::LongFlag) + { + if (!ParseLong(it, end)) + { + return it; + } + } else if (!terminated && ParseOption(chunk) == OptionType::ShortFlag) + { + if (!ParseShort(it, end)) + { + return it; + } + } else if (!terminated && !commands.empty()) + { + auto itCommand = std::find_if(commands.begin(), commands.end(), [&chunk](Command *c) { return c->Name() == chunk; }); + if (itCommand == commands.end()) + { + const std::string errorMessage("Unknown command: " + chunk); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return it; +#endif + } + + SelectCommand(*itCommand); + + if (const auto &coroutine = GetCoroutine()) + { + ++it; + RaiiSubparser coro(*this, std::vector<std::string>(it, end)); + coroutine(coro.Parser()); +#ifdef ARGS_NOEXCEPT + error = GetError(); + if (error != Error::None) + { + return end; + } + + if (!coro.Parser().IsParsed()) + { + error = Error::Usage; + return end; + } +#else + if (!coro.Parser().IsParsed()) + { + throw UsageError("Subparser::Parse was not called"); + } +#endif + + break; + } + + commands = GetCommands(); + } else + { + auto pos = GetNextPositional(); + if (pos) + { + pos->ParseValue(chunk); + + if (pos->KickOut()) + { + return ++it; + } + } else + { + const std::string errorMessage("Passed in argument, but no positional arguments were ready to receive it: " + chunk); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return it; +#endif + } + } + + if (!readCompletion && completion != nullptr && completion->Matched()) + { +#ifdef ARGS_NOEXCEPT + error = Error::Completion; +#endif + readCompletion = true; + ++it; + const auto argsLeft = static_cast<size_t>(std::distance(it, end)); + if (completion->cword == 0 || argsLeft <= 1 || completion->cword >= argsLeft) + { +#ifndef ARGS_NOEXCEPT + throw Completion(""); +#endif + } + + std::vector<std::string> curArgs(++it, end); + curArgs.resize(completion->cword); + + if (completion->syntax == "bash") + { + // bash tokenizes --flag=value as --flag=value + for (size_t idx = 0; idx < curArgs.size(); ) + { + if (idx > 0 && curArgs[idx] == "=") + { + curArgs[idx - 1] += "="; + // Avoid warnings from -Wsign-conversion + const auto signedIdx = static_cast<std::ptrdiff_t>(idx); + if (idx + 1 < curArgs.size()) + { + curArgs[idx - 1] += curArgs[idx + 1]; + curArgs.erase(curArgs.begin() + signedIdx, curArgs.begin() + signedIdx + 2); + } else + { + curArgs.erase(curArgs.begin() + signedIdx); + } + } else + { + ++idx; + } + } + + } +#ifndef ARGS_NOEXCEPT + try + { + Parse(curArgs.begin(), curArgs.end()); + throw Completion(""); + } + catch (Completion &) + { + throw; + } + catch (args::Error&) + { + throw Completion(""); + } +#else + return Parse(curArgs.begin(), curArgs.end()); +#endif + } + } + + Validate(shortprefix, longprefix); + return end; + } + + public: + HelpParams helpParams; + + ArgumentParser(const std::string &description_, const std::string &epilog_ = std::string()) + { + Description(description_); + Epilog(epilog_); + LongPrefix("--"); + ShortPrefix("-"); + LongSeparator("="); + Terminator("--"); + SetArgumentSeparations(true, true, true, true); + matched = true; + } + + void AddCompletion(CompletionFlag &completionFlag) + { + completion = &completionFlag; + Add(completionFlag); + } + + /** The program name for help generation + */ + const std::string &Prog() const + { return helpParams.programName; } + /** The program name for help generation + */ + void Prog(const std::string &prog_) + { this->helpParams.programName = prog_; } + + /** The prefix for long flags + */ + const std::string &LongPrefix() const + { return longprefix; } + /** The prefix for long flags + */ + void LongPrefix(const std::string &longprefix_) + { + this->longprefix = longprefix_; + this->helpParams.longPrefix = longprefix_; + } + + /** The prefix for short flags + */ + const std::string &ShortPrefix() const + { return shortprefix; } + /** The prefix for short flags + */ + void ShortPrefix(const std::string &shortprefix_) + { + this->shortprefix = shortprefix_; + this->helpParams.shortPrefix = shortprefix_; + } + + /** The separator for long flags + */ + const std::string &LongSeparator() const + { return longseparator; } + /** The separator for long flags + */ + void LongSeparator(const std::string &longseparator_) + { + if (longseparator_.empty()) + { + const std::string errorMessage("longseparator can not be set to empty"); +#ifdef ARGS_NOEXCEPT + error = Error::Usage; + errorMsg = errorMessage; +#else + throw UsageError(errorMessage); +#endif + } else + { + this->longseparator = longseparator_; + this->helpParams.longSeparator = allowJoinedLongValue ? longseparator_ : " "; + } + } + + /** The terminator that forcibly separates flags from positionals + */ + const std::string &Terminator() const + { return terminator; } + /** The terminator that forcibly separates flags from positionals + */ + void Terminator(const std::string &terminator_) + { this->terminator = terminator_; } + + /** Get the current argument separation parameters. + * + * See SetArgumentSeparations for details on what each one means. + */ + void GetArgumentSeparations( + bool &allowJoinedShortValue_, + bool &allowJoinedLongValue_, + bool &allowSeparateShortValue_, + bool &allowSeparateLongValue_) const + { + allowJoinedShortValue_ = this->allowJoinedShortValue; + allowJoinedLongValue_ = this->allowJoinedLongValue; + allowSeparateShortValue_ = this->allowSeparateShortValue; + allowSeparateLongValue_ = this->allowSeparateLongValue; + } + + /** Change allowed option separation. + * + * \param allowJoinedShortValue_ Allow a short flag that accepts an argument to be passed its argument immediately next to it (ie. in the same argv field) + * \param allowJoinedLongValue_ Allow a long flag that accepts an argument to be passed its argument separated by the longseparator (ie. in the same argv field) + * \param allowSeparateShortValue_ Allow a short flag that accepts an argument to be passed its argument separated by whitespace (ie. in the next argv field) + * \param allowSeparateLongValue_ Allow a long flag that accepts an argument to be passed its argument separated by whitespace (ie. in the next argv field) + */ + void SetArgumentSeparations( + const bool allowJoinedShortValue_, + const bool allowJoinedLongValue_, + const bool allowSeparateShortValue_, + const bool allowSeparateLongValue_) + { + this->allowJoinedShortValue = allowJoinedShortValue_; + this->allowJoinedLongValue = allowJoinedLongValue_; + this->allowSeparateShortValue = allowSeparateShortValue_; + this->allowSeparateLongValue = allowSeparateLongValue_; + + this->helpParams.longSeparator = allowJoinedLongValue ? longseparator : " "; + this->helpParams.shortSeparator = allowJoinedShortValue ? "" : " "; + } + + /** Pass the help menu into an ostream + */ + void Help(std::ostream &help_) const + { + auto &command = SelectedCommand(); + const auto &commandDescription = command.Description().empty() ? command.Help() : command.Description(); + const auto description_text = Wrap(commandDescription, helpParams.width - helpParams.descriptionindent); + const auto epilog_text = Wrap(command.Epilog(), helpParams.width - helpParams.descriptionindent); + + const bool hasoptions = command.HasFlag(); + const bool hasarguments = command.HasPositional(); + + std::vector<std::string> prognameline; + prognameline.push_back(helpParams.usageString); + prognameline.push_back(Prog()); + auto commandProgLine = command.GetProgramLine(helpParams); + prognameline.insert(prognameline.end(), commandProgLine.begin(), commandProgLine.end()); + + const auto proglines = Wrap(prognameline.begin(), prognameline.end(), + helpParams.width - (helpParams.progindent + helpParams.progtailindent), + helpParams.width - helpParams.progindent); + auto progit = std::begin(proglines); + if (progit != std::end(proglines)) + { + help_ << std::string(helpParams.progindent, ' ') << *progit << '\n'; + ++progit; + } + for (; progit != std::end(proglines); ++progit) + { + help_ << std::string(helpParams.progtailindent, ' ') << *progit << '\n'; + } + + help_ << '\n'; + + if (!description_text.empty()) + { + for (const auto &line: description_text) + { + help_ << std::string(helpParams.descriptionindent, ' ') << line << "\n"; + } + help_ << "\n"; + } + + bool lastDescriptionIsNewline = false; + + if (!helpParams.optionsString.empty()) + { + help_ << std::string(helpParams.progindent, ' ') << helpParams.optionsString << "\n\n"; + } + + for (const auto &desc: command.GetDescription(helpParams, 0)) + { + lastDescriptionIsNewline = std::get<0>(desc).empty() && std::get<1>(desc).empty(); + const auto groupindent = std::get<2>(desc) * helpParams.eachgroupindent; + const auto flags = Wrap(std::get<0>(desc), helpParams.width - (helpParams.flagindent + helpParams.helpindent + helpParams.gutter)); + const auto info = Wrap(std::get<1>(desc), helpParams.width - (helpParams.helpindent + groupindent)); + + std::string::size_type flagssize = 0; + for (auto flagsit = std::begin(flags); flagsit != std::end(flags); ++flagsit) + { + if (flagsit != std::begin(flags)) + { + help_ << '\n'; + } + help_ << std::string(groupindent + helpParams.flagindent, ' ') << *flagsit; + flagssize = Glyphs(*flagsit); + } + + auto infoit = std::begin(info); + // groupindent is on both sides of this inequality, and therefore can be removed + if ((helpParams.flagindent + flagssize + helpParams.gutter) > helpParams.helpindent || infoit == std::end(info) || helpParams.addNewlineBeforeDescription) + { + help_ << '\n'; + } else + { + // groupindent is on both sides of the minus sign, and therefore doesn't actually need to be in here + help_ << std::string(helpParams.helpindent - (helpParams.flagindent + flagssize), ' ') << *infoit << '\n'; + ++infoit; + } + for (; infoit != std::end(info); ++infoit) + { + help_ << std::string(groupindent + helpParams.helpindent, ' ') << *infoit << '\n'; + } + } + if (hasoptions && hasarguments && helpParams.showTerminator) + { + lastDescriptionIsNewline = false; + for (const auto &item: Wrap(std::string("\"") + terminator + "\" can be used to terminate flag options and force all following arguments to be treated as positional options", helpParams.width - helpParams.flagindent)) + { + help_ << std::string(helpParams.flagindent, ' ') << item << '\n'; + } + } + + if (!lastDescriptionIsNewline) + { + help_ << "\n"; + } + + for (const auto &line: epilog_text) + { + help_ << std::string(helpParams.descriptionindent, ' ') << line << "\n"; + } + } + + /** Generate a help menu as a string. + * + * \return the help text as a single string + */ + std::string Help() const + { + std::ostringstream help_; + Help(help_); + return help_.str(); + } + + virtual void Reset() noexcept override + { + Command::Reset(); + matched = true; + readCompletion = false; + } + + /** Parse all arguments. + * + * \param begin an iterator to the beginning of the argument list + * \param end an iterator to the past-the-end element of the argument list + * \return the iterator after the last parsed value. Only useful for kick-out + */ + template <typename It> + It ParseArgs(It begin, It end) + { + // Reset all Matched statuses and errors + Reset(); +#ifdef ARGS_NOEXCEPT + error = GetError(); + if (error != Error::None) + { + return end; + } +#endif + return Parse(begin, end); + } + + /** Parse all arguments. + * + * \param args an iterable of the arguments + * \return the iterator after the last parsed value. Only useful for kick-out + */ + template <typename T> + auto ParseArgs(const T &args) -> decltype(std::begin(args)) + { + return ParseArgs(std::begin(args), std::end(args)); + } + + /** Convenience function to parse the CLI from argc and argv + * + * Just assigns the program name and vectorizes arguments for passing into ParseArgs() + * + * \return whether or not all arguments were parsed. This works for detecting kick-out, but is generally useless as it can't do anything with it. + */ + bool ParseCLI(const int argc, const char * const * argv) + { + if (Prog().empty()) + { + Prog(argv[0]); + } + const std::vector<std::string> args(argv + 1, argv + argc); + return ParseArgs(args) == std::end(args); + } + + template <typename T> + bool ParseCLI(const T &args) + { + return ParseArgs(args) == std::end(args); + } + }; + + inline Command::RaiiSubparser::RaiiSubparser(ArgumentParser &parser_, std::vector<std::string> args_) + : command(parser_.SelectedCommand()), parser(std::move(args_), parser_, command, parser_.helpParams), oldSubparser(command.subparser) + { + command.subparser = &parser; + } + + inline Command::RaiiSubparser::RaiiSubparser(const Command &command_, const HelpParams &params_): command(command_), parser(command, params_), oldSubparser(command.subparser) + { + command.subparser = &parser; + } + + inline void Subparser::Parse() + { + isParsed = true; + Reset(); + command.subparserDescription = GetDescription(helpParams, 0); + command.subparserHasFlag = HasFlag(); + command.subparserHasPositional = HasPositional(); + command.subparserHasCommand = HasCommand(); + command.subparserProgramLine = GetProgramLine(helpParams); + if (parser == nullptr) + { +#ifndef ARGS_NOEXCEPT + throw args::SubparserError(); +#else + error = Error::Subparser; + return; +#endif + } + + auto it = parser->Parse(args.begin(), args.end()); + command.Validate(parser->ShortPrefix(), parser->LongPrefix()); + kicked.assign(it, args.end()); + +#ifdef ARGS_NOEXCEPT + command.subparserError = GetError(); +#endif + } + + inline std::ostream &operator<<(std::ostream &os, const ArgumentParser &parser) + { + parser.Help(os); + return os; + } + + /** Boolean argument matcher + */ + class Flag : public FlagBase + { + public: + Flag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): FlagBase(name_, help_, std::move(matcher_), options_) + { + group_.Add(*this); + } + + Flag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false): Flag(group_, name_, help_, std::move(matcher_), extraError_ ? Options::Single : Options::None) + { + } + + virtual ~Flag() {} + + /** Get whether this was matched + */ + bool Get() const + { + return Matched(); + } + + virtual Nargs NumberOfArguments() const noexcept override + { + return 0; + } + + virtual void ParseValue(const std::vector<std::string>&) override + { + } + }; + + /** Help flag class + * + * Works like a regular flag, but throws an instance of Help when it is matched + */ + class HelpFlag : public Flag + { + public: + HelpFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_ = {}): Flag(group_, name_, help_, std::move(matcher_), options_) {} + + virtual ~HelpFlag() {} + + virtual void ParseValue(const std::vector<std::string> &) + { +#ifdef ARGS_NOEXCEPT + error = Error::Help; + errorMsg = Name(); +#else + throw Help(Name()); +#endif + } + + /** Get whether this was matched + */ + bool Get() const noexcept + { + return Matched(); + } + }; + + /** A flag class that simply counts the number of times it's matched + */ + class CounterFlag : public Flag + { + private: + const int startcount; + int count; + + public: + CounterFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const int startcount_ = 0, Options options_ = {}): + Flag(group_, name_, help_, std::move(matcher_), options_), startcount(startcount_), count(startcount_) {} + + virtual ~CounterFlag() {} + + virtual FlagBase *Match(const EitherFlag &arg) override + { + auto me = FlagBase::Match(arg); + if (me) + { + ++count; + } + return me; + } + + /** Get the count + */ + int &Get() noexcept + { + return count; + } + + virtual void Reset() noexcept override + { + FlagBase::Reset(); + count = startcount; + } + }; + + /** A flag class that calls a function when it's matched + */ + class ActionFlag : public FlagBase + { + private: + std::function<void(const std::vector<std::string> &)> action; + Nargs nargs; + + public: + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, std::function<void(const std::vector<std::string> &)> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), action(std::move(action_)), nargs(nargs_) + { + group_.Add(*this); + } + + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function<void(const std::string &)> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), nargs(1) + { + group_.Add(*this); + action = [action_](const std::vector<std::string> &a) { return action_(a.at(0)); }; + } + + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function<void()> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), nargs(0) + { + group_.Add(*this); + action = [action_](const std::vector<std::string> &) { return action_(); }; + } + + virtual Nargs NumberOfArguments() const noexcept override + { return nargs; } + + virtual void ParseValue(const std::vector<std::string> &value) override + { action(value); } + }; + + /** A default Reader class for argument classes + * + * If destination type is assignable to std::string it uses an assignment to std::string. + * Otherwise ValueReader simply uses a std::istringstream to read into the destination type, and + * raises a ParseError if there are any characters left. + */ + struct ValueReader + { + template <typename T> + typename std::enable_if<!std::is_assignable<T, std::string>::value, bool>::type + operator ()(const std::string &name, const std::string &value, T &destination) + { + std::istringstream ss(value); + bool failed = !(ss >> destination); + + if (!failed) + { + ss >> std::ws; + } + + if (ss.rdbuf()->in_avail() > 0 || failed) + { +#ifdef ARGS_NOEXCEPT + (void)name; + return false; +#else + std::ostringstream problem; + problem << "Argument '" << name << "' received invalid value type '" << value << "'"; + throw ParseError(problem.str()); +#endif + } + return true; + } + + template <typename T> + typename std::enable_if<std::is_assignable<T, std::string>::value, bool>::type + operator()(const std::string &, const std::string &value, T &destination) + { + destination = value; + return true; + } + }; + + /** An argument-accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class ValueFlag : public ValueFlagBase + { + protected: + T value; + T defaultValue; + + virtual std::string GetDefaultString(const HelpParams&) const override + { + return detail::ToString(defaultValue); + } + + private: + Reader reader; + + public: + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_, Options options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_ = T(), const bool extraError_ = false): ValueFlag(group_, name_, help_, std::move(matcher_), defaultValue_, extraError_ ? Options::Single : Options::None) + { + } + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): ValueFlag(group_, name_, help_, std::move(matcher_), T(), options_) + { + } + + virtual ~ValueFlag() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, this->value)) + { + error = Error::Parse; + } +#else + reader(name, value_, this->value); +#endif + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + value = defaultValue; + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + /** Get the default value + */ + const T &GetDefault() noexcept + { + return defaultValue; + } + }; + + /** An optional argument-accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class ImplicitValueFlag : public ValueFlag<T, Reader> + { + protected: + T implicitValue; + + public: + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &implicitValue_, const T &defaultValue_ = T(), Options options_ = {}) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), defaultValue_, options_), implicitValue(implicitValue_) + { + } + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_ = T(), Options options_ = {}) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), defaultValue_, options_), implicitValue(defaultValue_) + { + } + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), {}, options_), implicitValue() + { + } + + virtual ~ImplicitValueFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return {0, 1}; + } + + virtual void ParseValue(const std::vector<std::string> &value_) override + { + if (value_.empty()) + { + this->value = implicitValue; + } else + { + ValueFlag<T, Reader>::ParseValue(value_); + } + } + }; + + /** A variadic arguments accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class NargsValueFlag : public FlagBase + { + protected: + + List<T> values; + const List<T> defaultValues; + Nargs nargs; + Reader reader; + + public: + + typedef List<T> Container; + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + NargsValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, const List<T> &defaultValues_ = {}, Options options_ = {}) + : FlagBase(name_, help_, std::move(matcher_), options_), values(defaultValues_), defaultValues(defaultValues_),nargs(nargs_) + { + group_.Add(*this); + } + + virtual ~NargsValueFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return nargs; + } + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + values.clear(); + + for (const std::string &value : values_) + { + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value, v)) + { + error = Error::Parse; + } +#else + reader(name, value, v); +#endif + values.insert(std::end(values), v); + } + } + + List<T> &Get() noexcept + { + return values; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + + virtual void Reset() noexcept override + { + FlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + }; + + /** An argument-accepting flag class that pushes the found values into a list + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class ValueFlagList : public ValueFlagBase + { + private: + using Container = List<T>; + Container values; + const Container defaultValues; + Reader reader; + + public: + + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + ValueFlagList(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Container &defaultValues_ = Container(), Options options_ = {}): + ValueFlagBase(name_, help_, std::move(matcher_), options_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~ValueFlagList() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, v)) + { + error = Error::Parse; + } +#else + reader(name, value_, v); +#endif + values.insert(std::end(values), v); + } + + /** Get the values + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A mapping value flag class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapFlag : public ValueFlagBase + { + private: + const Map<K, T> map; + T value; + const T defaultValue; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const T &defaultValue_, Options options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), map(map_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const T &defaultValue_ = T(), const bool extraError_ = false): MapFlag(group_, name_, help_, std::move(matcher_), map_, defaultValue_, extraError_ ? Options::Single : Options::None) + { + } + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, Options options_): MapFlag(group_, name_, help_, std::move(matcher_), map_, T(), options_) + { + } + + virtual ~MapFlag() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->value = it->second; + } + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + value = defaultValue; + } + }; + + /** A mapping value flag list class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapFlagList : public ValueFlagBase + { + private: + using Container = List<T>; + const Map<K, T> map; + Container values; + const Container defaultValues; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + MapFlagList(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const Container &defaultValues_ = Container()): ValueFlagBase(name_, help_, std::move(matcher_)), map(map_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~MapFlagList() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value = values_.at(0); + + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value, key)) + { + error = Error::Parse; + } +#else + reader(name, value, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->values.emplace_back(it->second); + } + } + + /** Get the value + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A positional argument class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class Positional : public PositionalBase + { + private: + T value; + const T defaultValue; + Reader reader; + public: + Positional(Group &group_, const std::string &name_, const std::string &help_, const T &defaultValue_ = T(), Options options_ = {}): PositionalBase(name_, help_, options_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + Positional(Group &group_, const std::string &name_, const std::string &help_, Options options_): Positional(group_, name_, help_, T(), options_) + { + } + + virtual ~Positional() {} + + virtual void ParseValue(const std::string &value_) override + { +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, this->value)) + { + error = Error::Parse; + } +#else + reader(name, value_, this->value); +#endif + ready = false; + matched = true; + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + value = defaultValue; + } + }; + + /** A positional argument class that pushes the found values into a list + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class PositionalList : public PositionalBase + { + private: + using Container = List<T>; + Container values; + const Container defaultValues; + Reader reader; + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + PositionalList(Group &group_, const std::string &name_, const std::string &help_, const Container &defaultValues_ = Container(), Options options_ = {}): PositionalBase(name_, help_, options_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + PositionalList(Group &group_, const std::string &name_, const std::string &help_, Options options_): PositionalList(group_, name_, help_, {}, options_) + { + } + + virtual ~PositionalList() {} + + virtual void ParseValue(const std::string &value_) override + { + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, v)) + { + error = Error::Parse; + } +#else + reader(name, value_, v); +#endif + values.insert(std::end(values), v); + matched = true; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + /** Get the values + */ + Container &Get() noexcept + { + return values; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + values = defaultValues; + } + + virtual PositionalBase *GetNextPositional() override + { + const bool wasMatched = Matched(); + auto me = PositionalBase::GetNextPositional(); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A positional argument mapping class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapPositional : public PositionalBase + { + private: + const Map<K, T> map; + T value; + const T defaultValue; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + + MapPositional(Group &group_, const std::string &name_, const std::string &help_, const Map<K, T> &map_, const T &defaultValue_ = T(), Options options_ = {}): + PositionalBase(name_, help_, options_), map(map_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + virtual ~MapPositional() {} + + virtual void ParseValue(const std::string &value_) override + { + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->value = it->second; + ready = false; + matched = true; + } + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + value = defaultValue; + } + }; + + /** A positional argument mapping list class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapPositionalList : public PositionalBase + { + private: + using Container = List<T>; + + const Map<K, T> map; + Container values; + const Container defaultValues; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + MapPositionalList(Group &group_, const std::string &name_, const std::string &help_, const Map<K, T> &map_, const Container &defaultValues_ = Container(), Options options_ = {}): + PositionalBase(name_, help_, options_), map(map_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~MapPositionalList() {} + + virtual void ParseValue(const std::string &value_) override + { + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->values.emplace_back(it->second); + matched = true; + } + } + + /** Get the value + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + values = defaultValues; + } + + virtual PositionalBase *GetNextPositional() override + { + const bool wasMatched = Matched(); + auto me = PositionalBase::GetNextPositional(); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; +} + +#endif
CTPUG__wafer-657
icalendar 5.0 breaks the tests With icalendar 5.0, the test_ics_view test fails with ``` File "/home/runner/work/wafer/wafer/wafer/schedule/tests/test_views.py", line 1526, in test_ics_view 20 self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME') 21 File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/icalendar/caselessdict.py", line 40, in __getitem__ 22 return super().__getitem__(key.upper()) 23 KeyError: 'VALUE' ``` but it works fine with 4.1 There's nothing obvious in the icalendar changelog about this behaviour change, so more investriagtion is needed.
[ { "content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<5',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-regi...
[ { "content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<5',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-regi...
diff --git a/setup.py b/setup.py index 94d66192..ff944903 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ 'django-select2', 'djangorestframework', 'drf-extensions>=0.5.0', - 'icalendar>=4.0,<5.0', + 'icalendar>=4.0', 'jsonfield', 'markdown>=2.5', 'pillow', diff --git a/wafer/schedule/tests/test_views.py b/wafer/schedule/tests/test_views.py index 8656e727..0f1e1cdd 100644 --- a/wafer/schedule/tests/test_views.py +++ b/wafer/schedule/tests/test_views.py @@ -1523,7 +1523,6 @@ def test_ics_view(self): self.assertEqual(len(calendar.walk(name='VEVENT')), 9) # Check we have the right time in places event = calendar.walk(name='VEVENT')[0] - self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME') self.assertEqual(event['dtstart'].dt, D.datetime(2013, 9, 22, 10, 0, 0, tzinfo=D.timezone.utc)) # Check that we have the page slug in the ical event self.assertTrue('/test0/' in event['url'])
translate__translate-4646
rc file parser doesn't treat all whitespace similarly e.g. res.rc ``` IDD_DIALOG DIALOG 0, 0, 340, 180 CAPTION "Caption" BEGIN LTEXT "Right",IDC_STATIC_HEADER,7,0,258,8,NOT WS_GROUP LTEXT "Wrong",IDC_STATIC_HEADER ,7,0,258,8,NOT WS_GROUP END ``` running `rc2po res.rc res.po`, produces res.po containing: ``` #: DIALOG.IDD_DIALOG.CAPTION msgid "Caption" msgstr "" #: DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER msgid "Right" msgstr "" #: DIALOG.IDD_DIALOG.LTEXT.[%27IDC_STATIC_HEADER%27] msgid "Wrong" msgstr "" ```
[ { "content": "#\n# Copyright 2004-2006,2008-2009 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either v...
[ { "content": "#\n# Copyright 2004-2006,2008-2009 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either v...
diff --git a/translate/storage/rc.py b/translate/storage/rc.py index f3fd76a0c1..4c4a2e2f75 100644 --- a/translate/storage/rc.py +++ b/translate/storage/rc.py @@ -189,7 +189,7 @@ def rc_statement(): join_string=" ", ) - combined_constants = delimited_list(constant, "|") + combined_constants = delimited_list(constant, "|", min=2) concatenated_string = OneOrMore(quoted_string) diff --git a/translate/storage/test_rc.py b/translate/storage/test_rc.py index 463524e642..52a095f8f2 100644 --- a/translate/storage/test_rc.py +++ b/translate/storage/test_rc.py @@ -523,3 +523,22 @@ def test_textinclude_appstudio(self): assert len(rc_file.units) == 2 assert rc_file.units[0].source == "Copied" assert rc_file.units[1].source == "Other" + + def test_id_whitespace(self): + rc_source = """ +IDD_DIALOG DIALOG 0, 0, 340, 180 +CAPTION "Caption" +BEGIN + LTEXT "Right",IDC_STATIC_HEADER,7,0,258,8,NOT WS_GROUP + LTEXT "Wrong",IDC_STATIC_HEADER2 + ,7,0,258,8,NOT WS_GROUP +END +""" + rc_file = self.source_parse(rc_source, encoding="utf-16") + assert len(rc_file.units) == 3 + assert rc_file.units[0].source == "Caption" + assert rc_file.units[0].name == "DIALOG.IDD_DIALOG.CAPTION" + assert rc_file.units[1].source == "Right" + assert rc_file.units[1].name == "DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER" + assert rc_file.units[2].source == "Wrong" + assert rc_file.units[2].name == "DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER2"
streamlit__streamlit-1682
Error during exception handling in st.write - TypeError: exception() takes 3 positional arguments but 4 were given # Summary I'm trying to display a dataframe with `st.write` and it fails with a streamlit error while trying to handle an error from rendering the object. ```python TypeError: exception() takes 3 positional arguments but 4 were given Traceback: File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/ScriptRunner.py", line 322, in _run_script exec(code, module.__dict__) File "/scratch/bs3639/bosch-urban-sound/boschurbansnd/app.py", line 256, in <module> data_summary(dflabels, dffiles) File "/scratch/bs3639/bosch-urban-sound/boschurbansnd/app.py", line 130, in data_summary st.write(dffiles.reset_index(drop=True).head()) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/__init__.py", line 411, in write exception(exc, exc_tb) # noqa: F821 File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 122, in wrapped_method return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 367, in _enqueue_new_element_delta rv = marshall_element(msg.delta.new_element) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 120, in marshall_element return method(dg, element, *args, **kwargs) ``` # Steps to reproduce It happens when you raise an exception during type conversion in `st.write`. Here's the simplest example I could think of that throws the same error. ```python import streamlit as st class breakstuff: def __str__(self): raise ValueError st.write(breakstuff()) ``` ## Expected behavior: It should display the `ValueError` ## Actual behavior: Instead it throws a streamlit internal `TypeError` error (see above traceback). It means that I can't actually debug the exception that is throwing that code. # Debug info - Streamlit version: `Streamlit, version 0.60.0` (get it with `$ streamlit version`) - Python version: `Python 3.6.10 :: Anaconda, Inc.` (get it with `$ python --version`) - Using Conda? PipEnv? PyEnv? Pex? - Conda - OS version: - Browser version: # Additional information From a quick spin following the traceback, I believe I can see the issue. The traceback says that the error originates here: https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/DeltaGenerator.py#L120 The wrapper provides 2 args and it says it's receiving 4 while expecting 3, so that means that the wrapped method is being called with 2 instead of an expected 1 argument. Earlier in the traceback, it says that it's being raised in `st.write` by `exception` (notice it's being called with 2 arguments): https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/__init__.py#L409-L411 Looking at its definition, `exception` is wrapped with `_with_element` and takes 3 arguments, 2 of which are provided by the wrapper. https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/DeltaGenerator.py#L788-L789 tl;dr - you probably shouldn't be passing the traceback into the exception function (or perhaps you meant to and you haven't finished implementing it on the other side.) Either way, it's broken rn. # Possible Solution? From a cursory glance, it looks like you should just simplify to this and this issue will go away: ```python # change here: https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/__init__.py#L409-L411 try: ... except Exception as exc: exception(exc) ```
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
diff --git a/lib/streamlit/__init__.py b/lib/streamlit/__init__.py index 1a306d74c095..8f5dbb534aec 100644 --- a/lib/streamlit/__init__.py +++ b/lib/streamlit/__init__.py @@ -407,9 +407,8 @@ def flush_buffer(): flush_buffer() - except Exception: - _, exc, exc_tb = _sys.exc_info() - exception(exc, exc_tb) # noqa: F821 + except Exception as exc: + exception(exc) def experimental_show(*args): diff --git a/lib/tests/streamlit/write_test.py b/lib/tests/streamlit/write_test.py index 85af335d8bed..69d1ab849234 100644 --- a/lib/tests/streamlit/write_test.py +++ b/lib/tests/streamlit/write_test.py @@ -184,7 +184,12 @@ def __str__(self): def test_exception(self): """Test st.write that raises an exception.""" - with patch("streamlit.markdown") as m, patch("streamlit.exception") as e: + # We patch streamlit.exception to observe it, but we also make sure + # it's still called (via side_effect). This ensures that it's called + # with the proper arguments. + with patch("streamlit.markdown") as m, patch( + "streamlit.exception", side_effect=st.exception + ) as e: m.side_effect = Exception("some exception") st.write("some text")
Lightning-AI__torchmetrics-1384
module 'torchmetrics.classification' has no attribute 'AUC' ## 🐛 Bug Importing all the classification metrics causes the `AttributeError`: `from torchmetrics.classification import *` `AttributeError: module 'torchmetrics.classification' has no attribute 'AUC'` Environment torchmetrics 0.11.0 pytorch 1.13.0 In order to fix it someone should remove AUC from the list __all__ (src/torchmetrics/classification/__init__.py)
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
diff --git a/src/torchmetrics/classification/__init__.py b/src/torchmetrics/classification/__init__.py index ad9c1e39e36..60069403e8a 100644 --- a/src/torchmetrics/classification/__init__.py +++ b/src/torchmetrics/classification/__init__.py @@ -119,7 +119,6 @@ "BinaryAccuracy", "MulticlassAccuracy", "MultilabelAccuracy", - "AUC", "AUROC", "BinaryAUROC", "MulticlassAUROC",
vllm-project__vllm-1212
[v0.2.0] Release Tracker ## Major changes * Up to 60% performance improvement by optimizing de-tokenization and sampler * Initial support for AWQ (performance not optimized) * Support for RoPE scaling and LongChat * Support for Mistral-7B ## PRs to be merged before the release - [x] Vectorized sampler: #1048, #820 - [x] LongChat: #555 - [x] `TORCH_CUDA_ARCH_LIST` build option: #1074 - [x] Support for Mistral-7B: #1196 - [x] #1198 - ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_c...
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_c...
diff --git a/vllm/__init__.py b/vllm/__init__.py index b7b019f57b2..6a8b7c8fb9b 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -8,7 +8,7 @@ from vllm.outputs import CompletionOutput, RequestOutput from vllm.sampling_params import SamplingParams -__version__ = "0.1.7" +__version__ = "0.2.0" __all__ = [ "LLM",
python-pillow__Pillow-3912
Crash on trying to load corrupted font as file handle ### What did you do? When loading corrupted file by handle, PIL crashes: ```console # python -c "from PIL import ImageFont; print(ImageFont.truetype(open('setup.py', 'rb')))" double free or corruption (top) Aborted ``` (Originally observed on actually corrupt TTF file, but it as well triggered by non font) ### What did you expect to happen? Raise an exception ### What actually happened? Crash (SIGABRT) ### What are your OS, Python and Pillow versions? * OS: Linux * Python: 3.7.3 * Pillow: 6.0.0, reproduced with current git master
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PIL raster font management\n#\n# History:\n# 1996-08-07 fl created (experimental)\n# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3\n# 1999-02-06 fl rewrote most font management stuff in C\n# 1999-03-17 fl take pth files into...
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PIL raster font management\n#\n# History:\n# 1996-08-07 fl created (experimental)\n# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3\n# 1999-02-06 fl rewrote most font management stuff in C\n# 1999-03-17 fl take pth files into...
diff --git a/Tests/test_imagefont.py b/Tests/test_imagefont.py index 3388c205579..0ee3b979e83 100644 --- a/Tests/test_imagefont.py +++ b/Tests/test_imagefont.py @@ -420,6 +420,10 @@ def test_load_path_not_found(self): self.assertRaises(IOError, ImageFont.load_path, filename) self.assertRaises(IOError, ImageFont.truetype, filename) + def test_load_non_font_bytes(self): + with open("Tests/images/hopper.jpg", "rb") as f: + self.assertRaises(IOError, ImageFont.truetype, f) + def test_default_font(self): # Arrange txt = 'This is a "better than nothing" default font.' diff --git a/src/PIL/ImageFont.py b/src/PIL/ImageFont.py index 7074a70c01a..f43f95b9ac9 100644 --- a/src/PIL/ImageFont.py +++ b/src/PIL/ImageFont.py @@ -545,6 +545,8 @@ def freetype(font): try: return freetype(font) except IOError: + if not isPath(font): + raise ttf_filename = os.path.basename(font) dirs = [] diff --git a/src/_imagingft.c b/src/_imagingft.c index f6bd787ef5c..28e6d2b5e01 100644 --- a/src/_imagingft.c +++ b/src/_imagingft.c @@ -315,6 +315,7 @@ getfont(PyObject* self_, PyObject* args, PyObject* kw) if (error) { if (self->font_bytes) { PyMem_Free(self->font_bytes); + self->font_bytes = NULL; } Py_DECREF(self); return geterror(error);
vllm-project__vllm-2337
[v0.2.7] Release Tracker **ETA**: Jan 3rd - 4th ## Major changes TBD ## PRs to be merged before the release - [x] #2221 - [ ] ~~#2293~~ (deferred)
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_c...
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_c...
diff --git a/vllm/__init__.py b/vllm/__init__.py index e5cd1c2f333..327dfad0635 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -8,7 +8,7 @@ from vllm.outputs import CompletionOutput, RequestOutput from vllm.sampling_params import SamplingParams -__version__ = "0.2.6" +__version__ = "0.2.7" __all__ = [ "LLM",
meltano__meltano-7022
bug: Integration tests failing on main ### Meltano Version N/A ### Python Version NA ### Bug scope Other ### Operating System N/A ### Description Example failures on `main`: - https://github.com/meltano/meltano/actions/runs/3534445738 - https://github.com/meltano/meltano/actions/runs/3534480620 Example success on `release/2.10.0`: - https://github.com/meltano/meltano/actions/runs/3534468951 Affects all integration tests, and may be responsible for failures in the Pytest workflow: - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930358463 - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930359021 - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930359587 ### Code _No response_
[ { "content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collec...
[ { "content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collec...
diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py index cec36f627d..956bdc1315 100644 --- a/src/meltano/core/venv_service.py +++ b/src/meltano/core/venv_service.py @@ -57,7 +57,7 @@ def venv_platform_specs(): raise Exception(f"Platform {system!r} not supported.") from ex -PIP_PACKAGES = ("pip", "setuptools", "wheel") +PIP_PACKAGES = ("pip", "setuptools==57.5.0", "wheel") class VirtualEnv:
ivy-llc__ivy-13924
atan2 Implementing atan2 functionality for TensorFlow frontend. Solves https://github.com/unifyai/ivy/issues/1545
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\...
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\...
diff --git a/ivy/functional/frontends/tensorflow/math.py b/ivy/functional/frontends/tensorflow/math.py index 71aa26155de7f..27eaf525ed1cd 100644 --- a/ivy/functional/frontends/tensorflow/math.py +++ b/ivy/functional/frontends/tensorflow/math.py @@ -512,6 +512,11 @@ def atan(x, name=None): return ivy.atan(x) +@to_ivy_arrays_and_back +def atan2(y, x, name=None): + return ivy.atan2(y, x) + + @to_ivy_arrays_and_back def log(x, name=None): return ivy.log(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 6752026cce8bd..35c44ccce1a3d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -2082,6 +2082,35 @@ def test_tensorflow_cosh( ) +# atan2 +@handle_frontend_test( + fn_tree="tensorflow.math.atan2", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True + ), + test_with_out=st.just(False), +) +def test_tensorflow_atan2( + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + on_device, +): + input_dtype, x = dtype_and_x + assume(not np.any(np.isclose(x[1], 0))) + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + y=x[0], + x=x[1], + ) + + # less_equal @handle_frontend_test( fn_tree="tensorflow.math.less_equal",
encode__uvicorn-324
TypeError: __init__() when run "uvicorn app:App" I'm working on Mac Os Sierra 10.12.6, python 3.7.2 and uvicorn via pip3 0.5.1. When I run the example uvicorn app:App get the following error: Traceback (most recent call last): File "/usr/local/bin/uvicorn", line 11, in <module> load_entry_point('uvicorn==0.5.1', 'console_scripts', 'uvicorn')() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 489, in load_entry_point return get_distribution(dist).load_entry_point(group, name) File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2793, in load_entry_point return ep.load() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2411, in load return self.resolve() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2417, in resolve module = __import__(self.module_name, fromlist=['__name__'], level=0) File "/usr/local/lib/python3.7/site-packages/uvicorn/__init__.py", line 2, in <module> from uvicorn.main import Server, main, run File "/usr/local/lib/python3.7/site-packages/uvicorn/main.py", line 212, in <module> ssl_ciphers: str, File "/usr/local/lib/python3.7/site-packages/click/decorators.py", line 170, in decorator _param_memo(f, OptionClass(param_decls, **attrs)) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1460, in __init__ Parameter.__init__(self, param_decls, type=type, **attrs) TypeError: __init__() got an unexpected keyword argument 'hidden' Thank you
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init_...
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init_...
diff --git a/setup.py b/setup.py index 3123b4a70..802cda43d 100755 --- a/setup.py +++ b/setup.py @@ -41,11 +41,11 @@ def get_packages(package): ) requirements = [ - "click", - "h11", - "websockets>=6.0", - "httptools;" + env_marker, - "uvloop;" + env_marker, + "click==7.*", + "h11==0.8.*", + "websockets==7.*", + "httptools==0.0.13 ;" + env_marker, + "uvloop==0.12.* ;" + env_marker, ]
django-cms__django-cms-2207
Fixture loading in Postgres Get the following error when loading json fixtures with Postgres and django 1.3.1 IntegrityError: duplicate key value violates unique constraint "cms_placeholder_pkey" Forked repository and created test case for this on https://github.com/mthornhill/django-cms to recreate 1. clone directory git clone https://mthornhill@github.com/mthornhill/django-cms.git 2. make a virtual environment cd django-cms virtualenv . --no-site-packages 3. run FixtureTestCase ./runtests.sh -d 13 --rebuild-env FixtureTestCase
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.conf import get_cms_setting\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import signals\nfrom django.dispatch import Signal\n\nfrom cms.cache.permissions import clear_user_permission_cache, clear_permission_cache\nfrom cms.mo...
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.conf import get_cms_setting\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import signals\nfrom django.dispatch import Signal\n\nfrom cms.cache.permissions import clear_user_permission_cache, clear_permission_cache\nfrom cms.mo...
diff --git a/cms/signals.py b/cms/signals.py index 9bfd860128d..8b7ade88b31 100644 --- a/cms/signals.py +++ b/cms/signals.py @@ -196,7 +196,8 @@ def post_save_page(instance, **kwargs): def update_placeholders(instance, **kwargs): - instance.rescan_placeholders() + if not kwargs.get('raw'): + instance.rescan_placeholders() def invalidate_menu_cache(instance, **kwargs): diff --git a/cms/tests/__init__.py b/cms/tests/__init__.py index 6abf5811f45..f78fa4e62f4 100644 --- a/cms/tests/__init__.py +++ b/cms/tests/__init__.py @@ -31,8 +31,9 @@ from cms.tests.urlutils import * from cms.tests.views import * from cms.tests.management import * +from cms.tests.fixture_loading import * from cms.tests.menu_page_viewperm import * from cms.tests.menu_page_viewperm_staff import * from cms.tests.nested_plugins import * from cms.tests.check import * -from cms.tests.no_i18n import * \ No newline at end of file +from cms.tests.no_i18n import * diff --git a/cms/tests/fixture_loading.py b/cms/tests/fixture_loading.py new file mode 100644 index 00000000000..b5aa7db1623 --- /dev/null +++ b/cms/tests/fixture_loading.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +import tempfile +import codecs + +try: + from cStringIO import StringIO +except: + from io import StringIO + +from django.core.management import call_command + +from cms.test_utils.fixtures.navextenders import NavextendersFixture +from cms.test_utils.testcases import SettingsOverrideTestCase +from cms.models import Page + + +class FixtureTestCase(NavextendersFixture, SettingsOverrideTestCase): + + def test_fixture_load(self): + """ + This test dumps a live set of pages, cleanup the database and load it + again. + This makes fixtures unnecessary and it's easier to maintain. + """ + output = StringIO() + dump = tempfile.mkstemp(".json") + call_command('dumpdata', 'cms', indent=3, stdout=output) + Page.objects.all().delete() + output.seek(0) + with codecs.open(dump[1], 'w', 'utf-8') as dumpfile: + dumpfile.write(output.read()) + + self.assertEqual(0, Page.objects.count()) + # Transaction disable, otherwise the connection it the test would be + # isolated from the data loaded in the different command connection + call_command('loaddata', dump[1], commit=False, stdout=output) + self.assertEqual(10, Page.objects.count())
kubeflow__pipelines-2610
kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory **What happened:** The 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files. This is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz If you try to install and run kfp 0.1.35 via pip it causes an error: ``` Traceback (most recent call last): File "/Users/shenderson/venvs/kubeflow/bin/kfp", line 5, in <module> from kfp.__main__ import main File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module> from .cli.cli import main File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py", line 21, in <module> from .diagnose_me_cli import diagnose_me File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py", line 6, in <module> from .diagnose_me import dev_env ModuleNotFoundError: No module named 'kfp.cli.diagnose_me' ``` **What did you expect to happen:** All kfp modules including the diagnose_me package to be installed. **What steps did you take:** * Run `pip3 install --upgrade --force --no-cache-dir kfp` * Run `kfp`
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab...
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab...
diff --git a/sdk/python/setup.py b/sdk/python/setup.py index 46f51e5c08c..f656fa72808 100644 --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -64,6 +64,7 @@ def find_version(*file_path_parts): packages=[ 'kfp', 'kfp.cli', + 'kfp.cli.diagnose_me', 'kfp.compiler', 'kfp.components', 'kfp.components.structures',
pyqtgraph__pyqtgraph-1242
Bugfix: PlotCurveItem.sigClicked emits MouseClickEvent Hi all, currently `PlotCurveItem.sigClicked` emits `self`, a `PlotCurveItem`: ``` def mouseClickEvent(self, ev): if not self.clickable or ev.button() != QtCore.Qt.LeftButton: return if self.mouseShape().contains(ev.pos()): ev.accept() self.sigClicked.emit(self) ``` Since it can be useful to get the MouseClickEvent in the GraphicsView I suggest to either emit `self.sigClicked.emit(ev)` or `self.sigClicked.emit(self, ev)` cheers!
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\ntry:\n from ..Qt import QtOpenGL\n HAVE_OPENGL = True\nexcept:\n HAVE_OPENGL = False\n\nimport numpy as np\nfrom .GraphicsObject import GraphicsObject\nfrom .. import functions as fn\nfrom ..Point import Point\nimport struct, sys\nf...
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\ntry:\n from ..Qt import QtOpenGL\n HAVE_OPENGL = True\nexcept:\n HAVE_OPENGL = False\n\nimport numpy as np\nfrom .GraphicsObject import GraphicsObject\nfrom .. import functions as fn\nfrom ..Point import Point\nimport struct, sys\nf...
diff --git a/pyqtgraph/graphicsItems/PlotCurveItem.py b/pyqtgraph/graphicsItems/PlotCurveItem.py index c3a58da2ed..b6c6d21653 100644 --- a/pyqtgraph/graphicsItems/PlotCurveItem.py +++ b/pyqtgraph/graphicsItems/PlotCurveItem.py @@ -613,7 +613,7 @@ def mouseClickEvent(self, ev): return if self.mouseShape().contains(ev.pos()): ev.accept() - self.sigClicked.emit(self) + self.sigClicked.emit(self, ev)
rasterio__rasterio-1692
more explicit NotImplementedError messages in read mode ? In wanting to set a GeoTIFF's CRS, I encountered [this](https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx#L516) NotImplementedError when trying to run the following code: ``` with rasterio.open(filepath) as src: src.crs = "EPSG:3857" ``` Though in retrospect it is obvious the above will fail without explicitly specifying the proper mode , i.e. `'r+'` in this case, I was momentarily thrown off by the error and assumed something was wrong with my approach. Would a more explicit error message be useful here?
[ { "content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised wh...
[ { "content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised wh...
diff --git a/CHANGES.txt b/CHANGES.txt index cf785e5e0..6216af653 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -4,6 +4,9 @@ Changes 1.0.23 (TBD) ------------ +- Attempts to set attributes of datasets opened in "r" mode now raise a custom + DatasetAttributeError. This exception derives from both RasterioError and + NotImplementedError, which maintains backwards compatibility (#1676). - Block sizes are no longer guarded when creating untiled datasets (#1689). - CRS objects are now hashable and equivalent CRS objects have the same hash value (#1684). diff --git a/rasterio/_base.pyx b/rasterio/_base.pyx index 91d778a11..2a828386e 100644 --- a/rasterio/_base.pyx +++ b/rasterio/_base.pyx @@ -26,6 +26,7 @@ from rasterio.enums import ( ColorInterp, Compression, Interleaving, MaskFlags, PhotometricInterp) from rasterio.env import Env, env_ctx_if_needed from rasterio.errors import ( + DatasetAttributeError, RasterioIOError, CRSError, DriverRegistrationError, NotGeoreferencedWarning, RasterBlockError, BandOverviewError) from rasterio.profiles import Profile @@ -450,7 +451,7 @@ cdef class DatasetBase(object): return self.get_nodatavals() def _set_nodatavals(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property nodata: """The dataset's single nodata value @@ -513,7 +514,7 @@ cdef class DatasetBase(object): for x in self._mask_flags()) def _set_crs(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property crs: """The dataset's coordinate reference system @@ -533,16 +534,16 @@ cdef class DatasetBase(object): self._set_crs(value) def _set_all_descriptions(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_scales(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_offsets(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_units(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property descriptions: """Descriptions for each dataset band @@ -563,7 +564,7 @@ cdef class DatasetBase(object): self._set_all_descriptions(value) def write_transform(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property transform: """The dataset's georeferencing transformation matrix @@ -1184,7 +1185,7 @@ cdef class DatasetBase(object): for i in range(num_gcps)], crs) def _set_gcps(self, values): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property gcps: """ground control points and their coordinate reference system. diff --git a/rasterio/errors.py b/rasterio/errors.py index 97f63303f..cf91f8456 100644 --- a/rasterio/errors.py +++ b/rasterio/errors.py @@ -102,3 +102,7 @@ class UnsupportedOperation(RasterioError): class OverviewCreationError(RasterioError): """Raised when creation of an overview fails""" + + +class DatasetAttributeError(RasterioError, NotImplementedError): + """Raised when dataset attributes are misused""" diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 2b106caf5..0086b7e99 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -11,7 +11,7 @@ import rasterio from rasterio.enums import Compression -from rasterio.errors import RasterioIOError +from rasterio.errors import RasterioIOError, DatasetAttributeError from rasterio.transform import Affine @@ -65,3 +65,16 @@ def test_tiled_dataset_blocksize_guard(tmp_path): rasterio.open( tmp_file, "w", driver="GTiff", count=1, height=13, width=13, dtype="uint8", crs="epsg:3857", transform=Affine.identity(), tiled=True, blockxsize=256, blockysize=256) + +def test_dataset_readonly_attributes(path_rgb_byte_tif): + """Attempts to set read-only attributes fail with DatasetAttributeError""" + with pytest.raises(DatasetAttributeError): + with rasterio.open(path_rgb_byte_tif) as dataset: + dataset.crs = "foo" + + +def test_dataset_readonly_attributes(path_rgb_byte_tif): + """Attempts to set read-only attributes still fail with NotImplementedError""" + with pytest.raises(NotImplementedError): + with rasterio.open(path_rgb_byte_tif) as dataset: + dataset.crs = "foo"
pyjanitor-devs__pyjanitor-1175
[BUG] pandas 1.5.x `_MergeOperation` doesn't have `copy` keyword anymore Raised errors from [the latest testing env](https://github.com/pyjanitor-devs/pyjanitor/actions/runs/3255090961/jobs/5344044127#step:5:1909) which pandas version is 1.5.0. The pandas version of [environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/dev/environment-dev.yml#L36) is 1.3.5, so it would raise any errors. ```python ___________________________ test_extension_array_eq ____________________________ [gw1] linux -- Python 3.10.6 /usr/share/miniconda3/envs/test/bin/python def test_extension_array_eq(): """Extension arrays when matching on equality.""" df1 = pd.DataFrame( {"id": [1, 1, 1, 2, 2, 3], "value_1": [2, 5, 7, 1, 3, 4]} ) df1 = df1.astype({"value_1": "Int64"}) df2 = pd.DataFrame( { "id": [1, 1, 1, 1, 2, 2, 2, 3], "value_2A": [0, 3, 7, 12, 0, 2, 3, 1], "value_2B": [1, 5, 9, 15, 1, 4, 6, 3], } ) df2 = df2.astype({"value_2A": "Int64"}) > expected = df1.conditional_join( df2, ("id", "id", "=="), ("value_1", "value_2A", ">"), use_numba=False, sort_by_appearance=False, ) tests/functions/test_conditional_join.py:2962: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/share/miniconda3/envs/test/lib/python3.10/site-packages/pandas_flavor/register.py:29: in __call__ return method(self._obj, *args, **kwargs) janitor/functions/conditional_join.py:150: in conditional_join return _conditional_join_compute( janitor/functions/conditional_join.py:419: in _conditional_join_compute result = _multiple_conditional_join_eq(df, right, conditions, keep) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ df = id value_1 0 1 2 1 1 5 2 1 7 3 2 1 4 2 3 5 3 4 right = id value_2A value_2B 0 1 0 1 1 1 3 5 2 1 7 9 3 1 12 15 4 2 0 1 5 2 2 4 6 2 3 6 7 3 1 3 conditions = (('id', 'id', '=='), ('value_1', 'value_2A', '>')), keep = 'all' def _multiple_conditional_join_eq( df: pd.DataFrame, right: pd.DataFrame, conditions: list, keep: str ) -> tuple: """ Get indices for multiple conditions, if any of the conditions has an `==` operator. Returns a tuple of (df_index, right_index) """ eqs = [ (left_on, right_on) for left_on, right_on, op in conditions if op == _JoinOperator.STRICTLY_EQUAL.value ] left_on, right_on = zip(*eqs) left_on = [*left_on] right_on = [*right_on] rest = ( (df[left_on], right[right_on], op) for left_on, right_on, op in conditions if op != _JoinOperator.STRICTLY_EQUAL.value ) > left_index, right_index = _MergeOperation( df, right, left_on=left_on, right_on=right_on, sort=False, copy=False, )._get_join_indexers() E TypeError: _MergeOperation.__init__() got an unexpected keyword argument 'copy' janitor/functions/conditional_join.py:899: TypeError ``` closed to #1143
[ { "content": "import operator\nfrom enum import Enum\nfrom typing import Union, Any, Optional, Hashable, Literal\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_dtype_equal,\n is_exten...
[ { "content": "import operator\nfrom enum import Enum\nfrom typing import Union, Any, Optional, Hashable, Literal\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_dtype_equal,\n is_exten...
diff --git a/CHANGELOG.md b/CHANGELOG.md index c92fee966..16744781d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,7 @@ - [ENH] The parameter `column_name` of `change_type` totally supports inputing multi-column now. #1163 @Zeroto521 - [ENH] Fix error when `sort_by_appearance=True` is combined with `dropna=True`. Issue #1168 @samukweku - [ENH] Add explicit default parameter to `case_when` function. Issue #1159 @samukweku - +- [BUG] pandas 1.5.x `_MergeOperation` doesn't have `copy` keyword anymore. Issue #1174 @Zeroto521 ## [v0.23.1] - 2022-05-03 diff --git a/janitor/functions/conditional_join.py b/janitor/functions/conditional_join.py index 417d7c902..2f8438166 100644 --- a/janitor/functions/conditional_join.py +++ b/janitor/functions/conditional_join.py @@ -902,7 +902,6 @@ def _multiple_conditional_join_eq( left_on=left_on, right_on=right_on, sort=False, - copy=False, )._get_join_indexers() if not left_index.size: diff --git a/tests/functions/test_case_when.py b/tests/functions/test_case_when.py index 846072be9..6c239a09f 100644 --- a/tests/functions/test_case_when.py +++ b/tests/functions/test_case_when.py @@ -175,7 +175,7 @@ def test_case_when_default_array(df): ) expected = np.where(df.numbers > 1, df.numbers + 10, default) expected = df.assign(bleh=expected) - assert_frame_equal(result, expected) + assert_frame_equal(result, expected, check_dtype=False) @given(df=categoricaldf_strategy())
kivy__kivy-4149
ModalView background size is not updated Since https://github.com/kivy/kivy/pull/4136 the ModalView background is not resized when the window size changes, run `kivy/uix/modalview.py`, then resize the window. ![capture](https://cloud.githubusercontent.com/assets/7513068/14303295/159f0434-fbb2-11e5-93b3-8de02ffd4e49.PNG)
[ { "content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, eithe...
[ { "content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, eithe...
diff --git a/kivy/uix/modalview.py b/kivy/uix/modalview.py index 504f8deb42..8c7e20ce99 100644 --- a/kivy/uix/modalview.py +++ b/kivy/uix/modalview.py @@ -143,7 +143,7 @@ class ModalView(AnchorLayout): _anim_duration = NumericProperty(.1) - _window = ObjectProperty(None, allownone=True) + _window = ObjectProperty(None, allownone=True, rebind=True) __events__ = ('on_open', 'on_dismiss')
deis__deis-4163
bug(client): deis apps only shows 100 apps The client is not using the pagination to return all the apps
[ { "content": "\"\"\"\nDjango settings for the Deis project.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport random\nimport string\nimport sys\nimport tempfile\nimport ldap\n\nfrom django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\nPROJECT_ROOT = os.path.normpath(os.path....
[ { "content": "\"\"\"\nDjango settings for the Deis project.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport random\nimport string\nimport sys\nimport tempfile\nimport ldap\n\nfrom django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\nPROJECT_ROOT = os.path.normpath(os.path....
diff --git a/client-go/cmd/apps.go b/client-go/cmd/apps.go index c05aca0829..f6ecb687f5 100644 --- a/client-go/cmd/apps.go +++ b/client-go/cmd/apps.go @@ -55,20 +55,24 @@ func AppCreate(id string, buildpack string, remote string, noRemote bool) error } // AppsList lists apps on the Deis controller. -func AppsList() error { +func AppsList(results int) error { c, err := client.New() if err != nil { return err } - apps, err := apps.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + apps, count, err := apps.List(c, results) if err != nil { return err } - fmt.Println("=== Apps") + fmt.Printf("=== Apps%s", limitCount(len(apps), count)) for _, app := range apps { fmt.Println(app.ID) diff --git a/client-go/cmd/builds.go b/client-go/cmd/builds.go index d1f1006fcf..2f6a93881d 100644 --- a/client-go/cmd/builds.go +++ b/client-go/cmd/builds.go @@ -9,20 +9,24 @@ import ( ) // BuildsList lists an app's builds. -func BuildsList(appID string) error { +func BuildsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - builds, err := builds.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + builds, count, err := builds.List(c, appID, results) if err != nil { return err } - fmt.Printf("=== %s Builds\n", appID) + fmt.Printf("=== %s Builds%s", appID, limitCount(len(builds), count)) for _, build := range builds { fmt.Println(build.UUID, build.Created) diff --git a/client-go/cmd/certs.go b/client-go/cmd/certs.go index 5e787c1564..9f63705f2b 100644 --- a/client-go/cmd/certs.go +++ b/client-go/cmd/certs.go @@ -12,14 +12,18 @@ import ( ) // CertsList lists certs registered with the controller. -func CertsList() error { +func CertsList(results int) error { c, err := client.New() if err != nil { return err } - certList, err := certs.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + certList, _, err := certs.List(c, results) if err != nil { return err diff --git a/client-go/cmd/domains.go b/client-go/cmd/domains.go index 13f3b0ee95..13e95a5965 100644 --- a/client-go/cmd/domains.go +++ b/client-go/cmd/domains.go @@ -7,20 +7,24 @@ import ( ) // DomainsList lists domains registered with an app. -func DomainsList(appID string) error { +func DomainsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - domains, err := domains.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + domains, count, err := domains.List(c, appID, results) if err != nil { return err } - fmt.Printf("=== %s Domains\n", appID) + fmt.Printf("=== %s Domains%s", appID, limitCount(len(domains), count)) for _, domain := range domains { fmt.Println(domain.Domain) diff --git a/client-go/cmd/keys.go b/client-go/cmd/keys.go index 3b5d67b44c..b1855a719d 100644 --- a/client-go/cmd/keys.go +++ b/client-go/cmd/keys.go @@ -14,20 +14,24 @@ import ( ) // KeysList lists a user's keys. -func KeysList() error { +func KeysList(results int) error { c, err := client.New() if err != nil { return err } - keys, err := keys.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + keys, count, err := keys.List(c, results) if err != nil { return err } - fmt.Printf("=== %s Keys\n", c.Username) + fmt.Printf("=== %s Keys%s", c.Username, limitCount(len(keys), count)) for _, key := range keys { fmt.Printf("%s %s...%s\n", key.ID, key.Public[:16], key.Public[len(key.Public)-10:]) diff --git a/client-go/cmd/perms.go b/client-go/cmd/perms.go index 7930bf0c32..a8fbdd9e45 100644 --- a/client-go/cmd/perms.go +++ b/client-go/cmd/perms.go @@ -9,7 +9,7 @@ import ( ) // PermsList prints which users have permissions. -func PermsList(appID string, admin bool) error { +func PermsList(appID string, admin bool, results int) error { c, appID, err := permsLoad(appID, admin) if err != nil { @@ -17,9 +17,13 @@ func PermsList(appID string, admin bool) error { } var users []string + var count int if admin { - users, err = perms.ListAdmins(c) + if results == defaultLimit { + results = c.ResponseLimit + } + users, count, err = perms.ListAdmins(c, results) } else { users, err = perms.List(c, appID) } @@ -29,7 +33,7 @@ func PermsList(appID string, admin bool) error { } if admin { - fmt.Println("=== Administrators") + fmt.Printf("=== Administrators%s", limitCount(len(users), count)) } else { fmt.Printf("=== %s's Users\n", appID) } diff --git a/client-go/cmd/ps.go b/client-go/cmd/ps.go index f950f0f4b8..4bd6ab77ca 100644 --- a/client-go/cmd/ps.go +++ b/client-go/cmd/ps.go @@ -12,20 +12,24 @@ import ( ) // PsList lists an app's processes. -func PsList(appID string) error { +func PsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - processes, err := ps.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + processes, count, err := ps.List(c, appID, results) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } @@ -69,13 +73,13 @@ func PsScale(appID string, targets []string) error { fmt.Printf("done in %ds\n", int(time.Since(startTime).Seconds())) - processes, err := ps.List(c, appID) + processes, count, err := ps.List(c, appID, c.ResponseLimit) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } @@ -119,20 +123,20 @@ func PsRestart(appID, target string) error { fmt.Printf("done in %ds\n", int(time.Since(startTime).Seconds())) - processes, err := ps.List(c, appID) + processes, count, err := ps.List(c, appID, c.ResponseLimit) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } -func printProcesses(appID string, processes []api.Process) { +func printProcesses(appID string, processes []api.Process, count int) { psMap := ps.ByType(processes) - fmt.Printf("=== %s Processes\n", appID) + fmt.Printf("=== %s Processes%s", appID, limitCount(len(processes), count)) for psType, procs := range psMap { fmt.Printf("--- %s:\n", psType) diff --git a/client-go/cmd/releases.go b/client-go/cmd/releases.go index 4350799e45..366164c413 100644 --- a/client-go/cmd/releases.go +++ b/client-go/cmd/releases.go @@ -9,16 +9,20 @@ import ( ) // ReleasesList lists an app's releases. -func ReleasesList(appID string) error { +func ReleasesList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - releases, err := releases.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + releases, count, err := releases.List(c, appID, results) - fmt.Printf("=== %s Releases\n", appID) + fmt.Printf("=== %s Releases%s", appID, limitCount(len(releases), count)) w := new(tabwriter.Writer) diff --git a/client-go/cmd/users.go b/client-go/cmd/users.go index 87e97e43ea..8b79ac4657 100644 --- a/client-go/cmd/users.go +++ b/client-go/cmd/users.go @@ -8,20 +8,24 @@ import ( ) // UsersList lists users registered with the controller. -func UsersList() error { +func UsersList(results int) error { c, err := client.New() if err != nil { return err } - users, err := users.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + users, count, err := users.List(c, results) if err != nil { return err } - fmt.Println("=== Users") + fmt.Printf("=== Users%s", limitCount(len(users), count)) for _, user := range users { fmt.Println(user.Username) diff --git a/client-go/cmd/utils.go b/client-go/cmd/utils.go index 4e712c1214..8e3fbf4f5b 100644 --- a/client-go/cmd/utils.go +++ b/client-go/cmd/utils.go @@ -10,6 +10,8 @@ import ( "github.com/deis/deis/client-go/pkg/git" ) +var defaultLimit = -1 + func progress() chan bool { frames := []string{"...", "o..", ".o.", "..o"} backspaces := strings.Repeat("\b", 3) @@ -78,3 +80,11 @@ func drinkOfChoice() string { return drink } + +func limitCount(objs, total int) string { + if objs == total { + return "\n" + } + + return fmt.Sprintf(" (%d of %d)\n", objs, total) +} diff --git a/client-go/controller/api/apps.go b/client-go/controller/api/apps.go index d76a7b9e65..600a4696f9 100644 --- a/client-go/controller/api/apps.go +++ b/client-go/controller/api/apps.go @@ -10,14 +10,6 @@ type App struct { UUID string `json:"uuid"` } -// Apps is the definition of GET /v1/apps/. -type Apps struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Apps []App `json:"results"` -} - // AppCreateRequest is the definition of POST /v1/apps/. type AppCreateRequest struct { ID string `json:"id,omitempty"` diff --git a/client-go/controller/api/builds.go b/client-go/controller/api/builds.go index 19fc94c333..1e239a115b 100644 --- a/client-go/controller/api/builds.go +++ b/client-go/controller/api/builds.go @@ -13,14 +13,6 @@ type Build struct { UUID string `json:"uuid"` } -// Builds is the structure of GET /v1/apps/<app id>/builds/. -type Builds struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Builds []Build `json:"results"` -} - // CreateBuildRequest is the structure of POST /v1/apps/<app id>/builds/. type CreateBuildRequest struct { Image string `json:"image"` diff --git a/client-go/controller/api/certs.go b/client-go/controller/api/certs.go index 5cea946e86..53dc84711d 100644 --- a/client-go/controller/api/certs.go +++ b/client-go/controller/api/certs.go @@ -12,14 +12,6 @@ type Cert struct { ID int `json:"id,omitempty"` } -// Certs is the definition of GET /v1/certs/. -type Certs struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Certs []Cert `json:"results"` -} - // CertCreateRequest is the definition of POST /v1/certs/. type CertCreateRequest struct { Certificate string `json:"certificate"` diff --git a/client-go/controller/api/domains.go b/client-go/controller/api/domains.go index bb13fad87a..542e1699f6 100644 --- a/client-go/controller/api/domains.go +++ b/client-go/controller/api/domains.go @@ -9,14 +9,6 @@ type Domain struct { Updated string `json:"updated"` } -// Domains is the structure of GET /v1/app/<app id>/domains/. -type Domains struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Domains []Domain `json:"results"` -} - // DomainCreateRequest is the structure of POST /v1/app/<app id>/domains/. type DomainCreateRequest struct { Domain string `json:"domain"` diff --git a/client-go/controller/api/keys.go b/client-go/controller/api/keys.go index ff89e03a2e..eb5b11c7d7 100644 --- a/client-go/controller/api/keys.go +++ b/client-go/controller/api/keys.go @@ -10,14 +10,6 @@ type Key struct { UUID string `json:"uuid"` } -// Keys is the definition of GET /v1/keys/. -type Keys struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Keys []Key `json:"results"` -} - // KeyCreateRequest is the definition of POST /v1/keys/. type KeyCreateRequest struct { ID string `json:"id"` diff --git a/client-go/controller/api/perms.go b/client-go/controller/api/perms.go index 707bf9ff06..04b71753a9 100644 --- a/client-go/controller/api/perms.go +++ b/client-go/controller/api/perms.go @@ -5,16 +5,6 @@ type PermsAppResponse struct { Users []string `json:"users"` } -// PermsAdminResponse is the definition of GET /v1/admin/perms/. -type PermsAdminResponse struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Users []struct { - Username string `json:"username"` - } `json:"results"` -} - // PermsRequest is the definition of a requst on /perms/. type PermsRequest struct { Username string `json:"username"` diff --git a/client-go/controller/api/ps.go b/client-go/controller/api/ps.go index 1127424a0d..56c7841aad 100644 --- a/client-go/controller/api/ps.go +++ b/client-go/controller/api/ps.go @@ -12,11 +12,3 @@ type Process struct { Num int `json:"num"` State string `json:"state"` } - -// Processes defines the structure of processes. -type Processes struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Processes []Process `json:"results"` -} diff --git a/client-go/controller/api/releases.go b/client-go/controller/api/releases.go index 0274982467..3917a6c3e3 100644 --- a/client-go/controller/api/releases.go +++ b/client-go/controller/api/releases.go @@ -13,14 +13,6 @@ type Release struct { Version int `json:"version"` } -// Releases is the definition of GET /v1/apps/<app id>/releases/. -type Releases struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Releases []Release `json:"results"` -} - // ReleaseRollback is the defenition of POST /v1/apps/<app id>/releases/. type ReleaseRollback struct { Version int `json:"version"` diff --git a/client-go/controller/api/users.go b/client-go/controller/api/users.go index 307127c855..e83a63787d 100644 --- a/client-go/controller/api/users.go +++ b/client-go/controller/api/users.go @@ -13,11 +13,3 @@ type User struct { IsActive bool `json:"is_active"` DateJoined string `json:"date_joined"` } - -// Users is the definition of GET /v1/users. -type Users struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Users []User `json:"results"` -} diff --git a/client-go/controller/client/client.go b/client-go/controller/client/client.go index ebb2683360..d13d63bfe3 100644 --- a/client-go/controller/client/client.go +++ b/client-go/controller/client/client.go @@ -26,13 +26,21 @@ type Client struct { // Username is the name of the user performing requests against the API. Username string + + // ResponseLimit is the number of results to return on requests that can be limited. + ResponseLimit int } +// DefaultResponseLimit is the default number of responses to return on requests that can +// be limited. +var DefaultResponseLimit = 100 + type settingsFile struct { Username string `json:"username"` SslVerify bool `json:"ssl_verify"` Controller string `json:"controller"` Token string `json:"token"` + Limit int `json:"response_limit"` } // New creates a new client from a settings file. @@ -62,15 +70,23 @@ func New() (*Client, error) { return nil, err } + if settings.Limit <= 0 { + settings.Limit = DefaultResponseLimit + } + return &Client{HTTPClient: CreateHTTPClient(settings.SslVerify), SSLVerify: settings.SslVerify, - ControllerURL: *u, Token: settings.Token, Username: settings.Username}, nil + ControllerURL: *u, Token: settings.Token, Username: settings.Username, + ResponseLimit: settings.Limit}, nil } // Save settings to a file func (c Client) Save() error { - settings := settingsFile{Username: c.Username, - SslVerify: c.SSLVerify, - Controller: c.ControllerURL.String(), Token: c.Token} + settings := settingsFile{Username: c.Username, SslVerify: c.SSLVerify, + Controller: c.ControllerURL.String(), Token: c.Token, Limit: c.ResponseLimit} + + if settings.Limit <= 0 { + settings.Limit = DefaultResponseLimit + } settingsContents, err := json.Marshal(settings) diff --git a/client-go/controller/client/client_test.go b/client-go/controller/client/client_test.go index 952082b06e..73259520ed 100644 --- a/client-go/controller/client/client_test.go +++ b/client-go/controller/client/client_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -const sFile string = `{"username":"t","ssl_verify":false,"controller":"http://d.t","token":"a"}` +const sFile string = `{"username":"t","ssl_verify":false,"controller":"http://d.t","token":"a","response_limit": 50}` func createTempProfile(contents string) error { name, err := ioutil.TempDir("", "client") @@ -62,9 +62,15 @@ func TestLoadSave(t *testing.T) { t.Errorf("Expected %s, Got %s", expected, client.ControllerURL.String()) } + expectedI := 50 + if client.ResponseLimit != expectedI { + t.Errorf("Expected %d, Got %d", expectedI, client.ResponseLimit) + } + client.SSLVerify = true client.Token = "b" client.Username = "c" + client.ResponseLimit = 0 u, err := url.Parse("http://deis.test") @@ -99,6 +105,11 @@ func TestLoadSave(t *testing.T) { if client.ControllerURL.String() != expected { t.Errorf("Expected %s, Got %s", expected, client.ControllerURL.String()) } + + expectedI = 100 + if client.ResponseLimit != expectedI { + t.Errorf("Expected %d, Got %d", expectedI, client.ResponseLimit) + } } func TestDeleteSettings(t *testing.T) { diff --git a/client-go/controller/client/http.go b/client-go/controller/client/http.go index 03f0e43ab5..67433666d0 100644 --- a/client-go/controller/client/http.go +++ b/client-go/controller/client/http.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "reflect" + "strconv" "strings" "github.com/deis/deis/version" @@ -60,6 +61,28 @@ func (c Client) Request(method string, path string, body []byte) (*http.Response return res, nil } +// LimitedRequest allows limiting the number of responses in a request. +func (c Client) LimitedRequest(path string, results int) (string, int, error) { + body, err := c.BasicRequest("GET", path+"?page_size="+strconv.Itoa(results), nil) + + if err != nil { + return "", -1, err + } + + res := make(map[string]interface{}) + if err = json.Unmarshal([]byte(body), &res); err != nil { + return "", -1, err + } + + out, err := json.Marshal(res["results"].([]interface{})) + + if err != nil { + return "", -1, err + } + + return string(out), int(res["count"].(float64)), nil +} + // BasicRequest makes a simple http request on the controller. func (c Client) BasicRequest(method string, path string, body []byte) (string, error) { res, err := c.Request(method, path, body) diff --git a/client-go/controller/client/http_test.go b/client-go/controller/client/http_test.go index 4144e1ac4e..67381cdd5c 100644 --- a/client-go/controller/client/http_test.go +++ b/client-go/controller/client/http_test.go @@ -13,6 +13,22 @@ import ( type fakeHTTPServer struct{} +const limitedFixture string = ` +{ + "count": 4, + "next": "http://replaced.com/limited2/", + "previous": null, + "results": [ + { + "test": "foo" + }, + { + "test": "bar" + } + ] +} +` + func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { res.Header().Add("DEIS_API_VERSION", version.APIVersion) @@ -31,6 +47,11 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { return } + if req.URL.Path == "/limited/" && req.Method == "GET" && req.URL.RawQuery == "page_size=2" { + res.Write([]byte(limitedFixture)) + return + } + if req.URL.Path == "/basic/" && req.Method == "POST" { eT := "token abc" if req.Header.Get("Authorization") != eT { @@ -180,3 +201,38 @@ func TestCheckErrorsReturnsNil(t *testing.T) { } } } + +func TestLimitedRequest(t *testing.T) { + t.Parallel() + + handler := fakeHTTPServer{} + server := httptest.NewServer(handler) + defer server.Close() + + u, err := url.Parse(server.URL) + + if err != nil { + t.Fatal(err) + } + + httpClient := CreateHTTPClient(false) + + client := Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} + + expected := `[{"test":"foo"},{"test":"bar"}]` + expectedC := 4 + + actual, count, err := client.LimitedRequest("/limited/", 2) + + if err != nil { + t.Fatal(err) + } + + if count != expectedC { + t.Errorf("Expected %d, Got %d", expectedC, count) + } + + if actual != expected { + t.Errorf("Expected %s, Got %s", expected, actual) + } +} diff --git a/client-go/controller/models/apps/apps.go b/client-go/controller/models/apps/apps.go index 57bd2f915a..bc705a8f84 100644 --- a/client-go/controller/models/apps/apps.go +++ b/client-go/controller/models/apps/apps.go @@ -11,19 +11,19 @@ import ( ) // List lists apps on a Deis controller. -func List(c *client.Client) ([]api.App, error) { - body, err := c.BasicRequest("GET", "/v1/apps/", nil) +func List(c *client.Client, results int) ([]api.App, int, error) { + body, count, err := c.LimitedRequest("/v1/apps/", results) if err != nil { - return []api.App{}, err + return []api.App{}, -1, err } - apps := api.Apps{} + var apps []api.App if err = json.Unmarshal([]byte(body), &apps); err != nil { - return []api.App{}, err + return []api.App{}, -1, err } - return apps.Apps, nil + return apps, count, nil } // New creates a new app. diff --git a/client-go/controller/models/apps/apps_test.go b/client-go/controller/models/apps/apps_test.go index 886ce44ddb..d4d3112877 100644 --- a/client-go/controller/models/apps/apps_test.go +++ b/client-go/controller/models/apps/apps_test.go @@ -291,7 +291,7 @@ func TestAppsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/builds/builds.go b/client-go/controller/models/builds/builds.go index 885251f941..b2f60e27fd 100644 --- a/client-go/controller/models/builds/builds.go +++ b/client-go/controller/models/builds/builds.go @@ -9,20 +9,20 @@ import ( ) // List lists an app's builds. -func List(c *client.Client, appID string) ([]api.Build, error) { +func List(c *client.Client, appID string, results int) ([]api.Build, int, error) { u := fmt.Sprintf("/v1/apps/%s/builds/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Build{}, err + return []api.Build{}, -1, err } - builds := api.Builds{} + var builds []api.Build if err = json.Unmarshal([]byte(body), &builds); err != nil { - return []api.Build{}, err + return []api.Build{}, -1, err } - return builds.Builds, nil + return builds, count, nil } // New creates a build for an app. diff --git a/client-go/controller/models/builds/builds_test.go b/client-go/controller/models/builds/builds_test.go index 49a38eb04a..a50f4ec33d 100644 --- a/client-go/controller/models/builds/builds_test.go +++ b/client-go/controller/models/builds/builds_test.go @@ -122,7 +122,7 @@ func TestBuildsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/certs/certs.go b/client-go/controller/models/certs/certs.go index dac5f6f1d2..e6ea69e434 100644 --- a/client-go/controller/models/certs/certs.go +++ b/client-go/controller/models/certs/certs.go @@ -9,19 +9,19 @@ import ( ) // List certs registered with the controller. -func List(c *client.Client) ([]api.Cert, error) { - body, err := c.BasicRequest("GET", "/v1/certs/", nil) +func List(c *client.Client, results int) ([]api.Cert, int, error) { + body, count, err := c.LimitedRequest("/v1/certs/", results) if err != nil { - return []api.Cert{}, err + return []api.Cert{}, -1, err } - res := api.Certs{} + var res []api.Cert if err = json.Unmarshal([]byte(body), &res); err != nil { - return []api.Cert{}, err + return []api.Cert{}, -1, err } - return res.Certs, nil + return res, count, nil } // New creates a new cert. diff --git a/client-go/controller/models/certs/certs_test.go b/client-go/controller/models/certs/certs_test.go index 909dde06f7..78e10ad6e8 100644 --- a/client-go/controller/models/certs/certs_test.go +++ b/client-go/controller/models/certs/certs_test.go @@ -104,7 +104,7 @@ func TestCertsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/domains/domains.go b/client-go/controller/models/domains/domains.go index aa98adce3b..793f8fe673 100644 --- a/client-go/controller/models/domains/domains.go +++ b/client-go/controller/models/domains/domains.go @@ -9,20 +9,20 @@ import ( ) // List domains registered with an app. -func List(c *client.Client, appID string) ([]api.Domain, error) { +func List(c *client.Client, appID string, results int) ([]api.Domain, int, error) { u := fmt.Sprintf("/v1/apps/%s/domains/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Domain{}, err + return []api.Domain{}, -1, err } - domains := api.Domains{} + var domains []api.Domain if err = json.Unmarshal([]byte(body), &domains); err != nil { - return []api.Domain{}, err + return []api.Domain{}, -1, err } - return domains.Domains, nil + return domains, count, nil } // New adds a domain to an app. diff --git a/client-go/controller/models/domains/domains_test.go b/client-go/controller/models/domains/domains_test.go index 83724c9d40..7c2a9a1676 100644 --- a/client-go/controller/models/domains/domains_test.go +++ b/client-go/controller/models/domains/domains_test.go @@ -110,7 +110,7 @@ func TestDomainsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/keys/keys.go b/client-go/controller/models/keys/keys.go index e164cf55d6..c82b6f39ed 100644 --- a/client-go/controller/models/keys/keys.go +++ b/client-go/controller/models/keys/keys.go @@ -9,19 +9,19 @@ import ( ) // List keys on a controller. -func List(c *client.Client) ([]api.Key, error) { - body, err := c.BasicRequest("GET", "/v1/keys/", nil) +func List(c *client.Client, results int) ([]api.Key, int, error) { + body, count, err := c.LimitedRequest("/v1/keys/", results) if err != nil { - return []api.Key{}, err + return []api.Key{}, -1, err } - keys := api.Keys{} + var keys []api.Key if err = json.Unmarshal([]byte(body), &keys); err != nil { - return []api.Key{}, err + return []api.Key{}, -1, err } - return keys.Keys, nil + return keys, count, nil } // New creates a new key. diff --git a/client-go/controller/models/keys/keys_test.go b/client-go/controller/models/keys/keys_test.go index 698f024118..e5f93777a3 100644 --- a/client-go/controller/models/keys/keys_test.go +++ b/client-go/controller/models/keys/keys_test.go @@ -113,7 +113,7 @@ func TestKeysList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/perms/perms.go b/client-go/controller/models/perms/perms.go index fe01ce974d..a5d497f2d5 100644 --- a/client-go/controller/models/perms/perms.go +++ b/client-go/controller/models/perms/perms.go @@ -10,13 +10,13 @@ import ( // List users that can access an app. func List(c *client.Client, appID string) ([]string, error) { - body, err := doList(c, fmt.Sprintf("/v1/apps/%s/perms/", appID)) + body, err := c.BasicRequest("GET", fmt.Sprintf("/v1/apps/%s/perms/", appID), nil) if err != nil { return []string{}, err } - users := api.PermsAppResponse{} + var users api.PermsAppResponse if err = json.Unmarshal([]byte(body), &users); err != nil { return []string{}, err } @@ -25,35 +25,25 @@ func List(c *client.Client, appID string) ([]string, error) { } // ListAdmins lists administrators. -func ListAdmins(c *client.Client) ([]string, error) { - body, err := doList(c, "/v1/admin/perms/") +func ListAdmins(c *client.Client, results int) ([]string, int, error) { + body, count, err := c.LimitedRequest("/v1/admin/perms/", results) if err != nil { - return []string{}, err + return []string{}, -1, err } - users := api.PermsAdminResponse{} + var users []api.PermsRequest if err = json.Unmarshal([]byte(body), &users); err != nil { - return []string{}, err + return []string{}, -1, err } usersList := []string{} - for _, user := range users.Users { + for _, user := range users { usersList = append(usersList, user.Username) } - return usersList, nil -} - -func doList(c *client.Client, u string) (string, error) { - body, err := c.BasicRequest("GET", u, nil) - - if err != nil { - return "", err - } - - return body, nil + return usersList, count, nil } // New adds a user to an app. diff --git a/client-go/controller/models/perms/perms_test.go b/client-go/controller/models/perms/perms_test.go index f02d3f4c75..a96cb8bc37 100644 --- a/client-go/controller/models/perms/perms_test.go +++ b/client-go/controller/models/perms/perms_test.go @@ -170,7 +170,7 @@ func TestListAdmins(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := ListAdmins(&client) + actual, _, err := ListAdmins(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/ps/ps.go b/client-go/controller/models/ps/ps.go index d5e732df18..4ba8a9f8d3 100644 --- a/client-go/controller/models/ps/ps.go +++ b/client-go/controller/models/ps/ps.go @@ -10,20 +10,20 @@ import ( ) // List an app's processes. -func List(c *client.Client, appID string) ([]api.Process, error) { +func List(c *client.Client, appID string, results int) ([]api.Process, int, error) { u := fmt.Sprintf("/v1/apps/%s/containers/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Process{}, err + return []api.Process{}, -1, err } - procs := api.Processes{} + var procs []api.Process if err = json.Unmarshal([]byte(body), &procs); err != nil { - return []api.Process{}, err + return []api.Process{}, -1, err } - return procs.Processes, nil + return procs, count, nil } // Scale an app's processes. diff --git a/client-go/controller/models/ps/ps_test.go b/client-go/controller/models/ps/ps_test.go index 3b376f326d..9a91f928ad 100644 --- a/client-go/controller/models/ps/ps_test.go +++ b/client-go/controller/models/ps/ps_test.go @@ -163,7 +163,7 @@ func TestProcessesList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/releases/releases.go b/client-go/controller/models/releases/releases.go index 968128ecaa..2d7ca6f105 100644 --- a/client-go/controller/models/releases/releases.go +++ b/client-go/controller/models/releases/releases.go @@ -9,21 +9,21 @@ import ( ) // List lists an app's releases. -func List(c *client.Client, appID string) ([]api.Release, error) { +func List(c *client.Client, appID string, results int) ([]api.Release, int, error) { u := fmt.Sprintf("/v1/apps/%s/releases/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Release{}, err + return []api.Release{}, -1, err } - releases := api.Releases{} + var releases []api.Release if err = json.Unmarshal([]byte(body), &releases); err != nil { - return []api.Release{}, err + return []api.Release{}, -1, err } - return releases.Releases, nil + return releases, count, nil } // Get a release of an app. diff --git a/client-go/controller/models/releases/releases_test.go b/client-go/controller/models/releases/releases_test.go index 3c8eb5fbfb..1f6030ebf8 100644 --- a/client-go/controller/models/releases/releases_test.go +++ b/client-go/controller/models/releases/releases_test.go @@ -151,7 +151,7 @@ func TestReleasesList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/users/users.go b/client-go/controller/models/users/users.go index 68edd9d4dd..8ca60e5508 100644 --- a/client-go/controller/models/users/users.go +++ b/client-go/controller/models/users/users.go @@ -8,17 +8,17 @@ import ( ) // List users registered with the controller. -func List(c *client.Client) ([]api.User, error) { - body, err := c.BasicRequest("GET", "/v1/users/", nil) +func List(c *client.Client, results int) ([]api.User, int, error) { + body, count, err := c.LimitedRequest("/v1/users/", results) if err != nil { - return []api.User{}, err + return []api.User{}, -1, err } - users := api.Users{} + var users []api.User if err = json.Unmarshal([]byte(body), &users); err != nil { - return []api.User{}, err + return []api.User{}, -1, err } - return users.Users, nil + return users, count, nil } diff --git a/client-go/controller/models/users/users_test.go b/client-go/controller/models/users/users_test.go index 7d34d5962f..ecca96465a 100644 --- a/client-go/controller/models/users/users_test.go +++ b/client-go/controller/models/users/users_test.go @@ -83,7 +83,7 @@ func TestUsersList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/parser/apps.go b/client-go/parser/apps.go index aed575cd99..f678e170cd 100644 --- a/client-go/parser/apps.go +++ b/client-go/parser/apps.go @@ -91,13 +91,25 @@ func appsList(argv []string) error { usage := ` Lists applications visible to the current user. -Usage: deis apps:list +Usage: deis apps:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.AppsList() + return cmd.AppsList(results) } func appInfo(argv []string) error { diff --git a/client-go/parser/builds.go b/client-go/parser/builds.go index d1737069f3..3516a7286a 100644 --- a/client-go/parser/builds.go +++ b/client-go/parser/builds.go @@ -44,6 +44,8 @@ Usage: deis builds:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -52,7 +54,13 @@ Options: return err } - return cmd.BuildsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.BuildsList(safeGetValue(args, "--app"), results) } func buildsCreate(argv []string) error { diff --git a/client-go/parser/certs.go b/client-go/parser/certs.go index 245ecdbdb5..f33f5d3dee 100644 --- a/client-go/parser/certs.go +++ b/client-go/parser/certs.go @@ -42,14 +42,26 @@ func certsList(argv []string) error { usage := ` Show certificate information for an SSL application. -Usage: deis certs:list +Usage: deis certs:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.CertsList() + return cmd.CertsList(results) } func certAdd(argv []string) error { diff --git a/client-go/parser/domains.go b/client-go/parser/domains.go index 3522989d9f..ace96edcf5 100644 --- a/client-go/parser/domains.go +++ b/client-go/parser/domains.go @@ -69,8 +69,10 @@ Lists domains bound to an application. Usage: deis domains:list [options] Options: - -a --app=<app> - the uniquely identifiable name for the application. + -a --app=<app> + the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -79,7 +81,13 @@ Options: return err } - return cmd.DomainsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.DomainsList(safeGetValue(args, "--app"), results) } func domainsRemove(argv []string) error { diff --git a/client-go/parser/keys.go b/client-go/parser/keys.go index 0b694947a8..0a73adad94 100644 --- a/client-go/parser/keys.go +++ b/client-go/parser/keys.go @@ -42,14 +42,26 @@ func keysList(argv []string) error { usage := ` Lists SSH keys for the logged in user. -Usage: deis keys:list +Usage: deis keys:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.KeysList() + return cmd.KeysList(results) } func keyAdd(argv []string) error { diff --git a/client-go/parser/perms.go b/client-go/parser/perms.go index b665648411..dc41fa3d66 100644 --- a/client-go/parser/perms.go +++ b/client-go/parser/perms.go @@ -43,7 +43,7 @@ func permsList(argv []string) error { Lists all users with permission to use an app, or lists all users with system administrator privileges. -Usage: deis perms:list [-a --app=<app>|--admin] +Usage: deis perms:list [-a --app=<app>|--admin|--admin --limit=<num>] Options: -a --app=<app> @@ -51,6 +51,8 @@ Options: for the application. --admin lists all users with system administrator privileges. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -61,7 +63,13 @@ Options: admin := args["--admin"].(bool) - return cmd.PermsList(safeGetValue(args, "--app"), admin) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.PermsList(safeGetValue(args, "--app"), admin, results) } func permCreate(argv []string) error { diff --git a/client-go/parser/ps.go b/client-go/parser/ps.go index 34d894955c..3cc5c8fca1 100644 --- a/client-go/parser/ps.go +++ b/client-go/parser/ps.go @@ -47,6 +47,8 @@ Usage: deis ps:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -55,7 +57,13 @@ Options: return err } - return cmd.PsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.PsList(safeGetValue(args, "--app"), results) } func psRestart(argv []string) error { diff --git a/client-go/parser/releases.go b/client-go/parser/releases.go index 6d3a96cb17..dbe319e3b6 100644 --- a/client-go/parser/releases.go +++ b/client-go/parser/releases.go @@ -48,6 +48,8 @@ Usage: deis releases:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -56,7 +58,13 @@ Options: return err } - return cmd.ReleasesList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.ReleasesList(safeGetValue(args, "--app"), results) } func releasesInfo(argv []string) error { diff --git a/client-go/parser/users.go b/client-go/parser/users.go index a6bc489a67..30c98d3253 100644 --- a/client-go/parser/users.go +++ b/client-go/parser/users.go @@ -37,12 +37,24 @@ func usersList(argv []string) error { Lists all registered users. Requires admin privilages. -Usage: deis users:list +Usage: deis users:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.UsersList() + return cmd.UsersList(results) } diff --git a/client-go/parser/utils.go b/client-go/parser/utils.go index 3ed100067b..7bcb2707b8 100644 --- a/client-go/parser/utils.go +++ b/client-go/parser/utils.go @@ -2,6 +2,7 @@ package parser import ( "fmt" + "strconv" ) // docopt expects commands to be in the proper format, but we split them apart for @@ -21,6 +22,14 @@ func safeGetValue(args map[string]interface{}, key string) string { return args[key].(string) } +func responseLimit(limit string) (int, error) { + if limit == "" { + return -1, nil + } + + return strconv.Atoi(limit) +} + // PrintUsage runs if no matching command is found. func PrintUsage() { fmt.Println("Found no matching command, try 'deis help'") diff --git a/controller/deis/settings.py b/controller/deis/settings.py index 3bfbf45d74..abbec4c92e 100644 --- a/controller/deis/settings.py +++ b/controller/deis/settings.py @@ -199,6 +199,7 @@ 'rest_framework.renderers.JSONRenderer', ), 'PAGINATE_BY': 100, + 'PAGINATE_BY_PARAM': 'page_size', 'TEST_REQUEST_DEFAULT_FORMAT': 'json', } diff --git a/docs/reference/api-v1.6.rst b/docs/reference/api-v1.6.rst index 2037f897eb..00dd0920a5 100644 --- a/docs/reference/api-v1.6.rst +++ b/docs/reference/api-v1.6.rst @@ -16,6 +16,8 @@ What's New **New!** administrators no longer have to supply a password when deleting another user. +**New!** ``?page_size`` query parameter for paginated requests to set the number of results per page. + Authentication --------------
paperless-ngx__paperless-ngx-4602
[BUG] Unable to delete notes in 2.00 beta rc1 ### Description Error delete notes in 2.00 beta rc1 ### Steps to reproduce Existing or newly created notes cannot be deleted Newly created note overwrites existing old note ### Webserver logs ```bash {"headers":{"normalizedNames":{},"lazyUpdate":null},"status":404,"statusText":"Not Found","url":"http://192.168.0.110:8777/api/documents/1812/notes/?id=421","ok":false,"name":"HttpErrorResponse","message":"Http failure response for http://192.168.0.110:8777/api/documents/1812/notes/?id=421: 404 Not Found","error":{"detail":"Nicht gefunden."}} ``` ### Browser logs _No response_ ### Paperless-ngx version 2.00 beta rc1 ### Host OS Synology ### Installation method Docker - official image ### Browser Firefox ### Configuration changes _No response_ ### Other _No response_
[ { "content": "import itertools\nimport json\nimport logging\nimport os\nimport re\nimport tempfile\nimport urllib\nimport zipfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import mktime\nfrom unicodedata import normalize\nfrom urllib.parse import quote\n\nimport pathvalidate\nfrom djan...
[ { "content": "import itertools\nimport json\nimport logging\nimport os\nimport re\nimport tempfile\nimport urllib\nimport zipfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import mktime\nfrom unicodedata import normalize\nfrom urllib.parse import quote\n\nimport pathvalidate\nfrom djan...
diff --git a/src/documents/views.py b/src/documents/views.py index 00d16022f4b..386f8740441 100644 --- a/src/documents/views.py +++ b/src/documents/views.py @@ -583,7 +583,7 @@ def notes(self, request, pk=None): from documents import index - index.add_or_update_document(self.get_object()) + index.add_or_update_document(doc) return Response(self.getNotes(doc))
mozilla__pontoon-3090
Document DDoS mitigation The `BLOCKED_IPS` env variable is not documented here: https://mozilla-pontoon.readthedocs.io/en/latest/admin/deployment.html. We should also add a paragraph here on DDoS mitigation: https://mozilla-pontoon.readthedocs.io/en/latest/admin/maintenance.html
[ { "content": "\"\"\"Django settings for Pontoon.\"\"\"\nimport re\nimport os\nimport socket\n\nfrom django.utils.functional import lazy\n\nimport dj_database_url\n\n\n_dirname = os.path.dirname\n\nROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))\n\n\ndef path(*args):\n return os.path.join(ROOT,...
[ { "content": "\"\"\"Django settings for Pontoon.\"\"\"\nimport re\nimport os\nimport socket\n\nfrom django.utils.functional import lazy\n\nimport dj_database_url\n\n\n_dirname = os.path.dirname\n\nROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))\n\n\ndef path(*args):\n return os.path.join(ROOT,...
diff --git a/docs/admin/deployment.rst b/docs/admin/deployment.rst index 0c1f56164a..02bd4e7430 100644 --- a/docs/admin/deployment.rst +++ b/docs/admin/deployment.rst @@ -72,6 +72,10 @@ you create: Set to 'gitlab' if you want to use 'GitLab' (corresponding GITLAB_* settings must be set if required). Set to 'google' if you want to use 'Google' (corresponding GOOGLE_* settings must be set). +``BLOCKED_IPS`` + A comma-separated list of IP addresses to be blocked from accessing the app, + for example because they are DDoS'ing the server. + ``CELERY_ALWAYS_EAGER`` Controls whether asynchronous tasks (mainly used during sync) are sent to Celery or executed immediately and synchronously. Set this to ``False`` on diff --git a/docs/admin/maintenance.rst b/docs/admin/maintenance.rst index 9790d4e8d2..8f0ff28d27 100644 --- a/docs/admin/maintenance.rst +++ b/docs/admin/maintenance.rst @@ -40,3 +40,16 @@ Finally, you need to simply access the worker: # Replace my-app-name with your Heroku app's name. celery --broker=`heroku config:get RABBITMQ_URL --app=my-app-name` worker + +Mitigating DDoS attacks +----------------------- +In a distributed denial-of-service attack (`DDoS`_ attack), the incoming traffic +flooding the victim originates from many different sources. This stops everyone +else from accessing the website as there is too much traffic flowing to it. + +One way to mitigate DDoS attacks is to identify the IP addresses of the +attackers and block them. Find the attacking IP addresses in the Log +Management Add-On (Papertrail) and add them to the BLOCKED_IPs config variable +in Heroku Settings. + +.. _DDoS: https://en.wikipedia.org/wiki/Denial-of-service_attack diff --git a/pontoon/settings/base.py b/pontoon/settings/base.py index 453be98510..1f45038f7a 100644 --- a/pontoon/settings/base.py +++ b/pontoon/settings/base.py @@ -267,6 +267,7 @@ def _default_from_email(): "django_ace", ) +# A list of IP addresses to be blocked from accessing the app, because they are DDoS'ing the server BLOCKED_IPS = os.environ.get("BLOCKED_IPS", "").split(",") MIDDLEWARE = (
open-telemetry__opentelemetry-python-1653
Consider renaming Resource.create_empty() to Resource.get_empty() Specially given the fact a cached instance is returned, i.e. no actual creation happens.
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by...
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ddc4e879b4..339d4b86741 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Rename `IdsGenerator` to `IdGenerator` ([#1651])(https://github.com/open-telemetry/opentelemetry-python/pull/1651) +- Rename Resource's `create_empty` to `get_empty` + ([#1653])(https://github.com/open-telemetry/opentelemetry-python/pull/1653) ## [0.18b0](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.18b0) - 2021-02-16 diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py index c840e5f298e..a7fa6dd54c3 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py @@ -188,7 +188,7 @@ def create(attributes: typing.Optional[Attributes] = None) -> "Resource": return resource @staticmethod - def create_empty() -> "Resource": + def get_empty() -> "Resource": return _EMPTY_RESOURCE @property diff --git a/opentelemetry-sdk/tests/resources/test_resources.py b/opentelemetry-sdk/tests/resources/test_resources.py index 3effa5d2452..4151c2fbd8d 100644 --- a/opentelemetry-sdk/tests/resources/test_resources.py +++ b/opentelemetry-sdk/tests/resources/test_resources.py @@ -59,7 +59,7 @@ def test_create(self): self.assertEqual(resource.attributes, expected_with_envar) os.environ[resources.OTEL_RESOURCE_ATTRIBUTES] = "" - resource = resources.Resource.create_empty() + resource = resources.Resource.get_empty() self.assertEqual(resource, resources._EMPTY_RESOURCE) resource = resources.Resource.create(None) @@ -140,9 +140,7 @@ def test_service_name_using_process_name(self): def test_aggregated_resources_no_detectors(self): aggregated_resources = resources.get_aggregated_resources([]) - self.assertEqual( - aggregated_resources, resources.Resource.create_empty() - ) + self.assertEqual(aggregated_resources, resources.Resource.get_empty()) def test_aggregated_resources_with_static_resource(self): static_resource = resources.Resource({"static_key": "static_value"}) @@ -208,7 +206,7 @@ def test_resource_detector_ignore_error(self): resource_detector.raise_on_error = False self.assertEqual( resources.get_aggregated_resources([resource_detector]), - resources.Resource.create_empty(), + resources.Resource.get_empty(), ) def test_resource_detector_raise_error(self): @@ -245,7 +243,7 @@ def tearDown(self) -> None: def test_empty(self): detector = resources.OTELResourceDetector() os.environ[resources.OTEL_RESOURCE_ATTRIBUTES] = "" - self.assertEqual(detector.detect(), resources.Resource.create_empty()) + self.assertEqual(detector.detect(), resources.Resource.get_empty()) def test_one(self): detector = resources.OTELResourceDetector()
googleapis__python-spanner-django-109
djangotests: ERROR: test_extra_method_select_argument_with_dashes (basic.tests.ModelTest) ```shell ====================================================================== ERROR: test_extra_method_select_argument_with_dashes (basic.tests.ModelTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 79, in next return six.next(self._wrapped) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 364, in __next__ return self._next() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 358, in _next raise self grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.INVALID_ARGUMENT details = "Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^" debug_error_string = "{"created":"@1575261818.820579000","description":"Error received from peer ipv4:172.217.11.170:443","file":"src/core/lib/surface/call.cc","file_line":1046,"grpc_message":"Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^","grpc_status":3}" > The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/emmanuelodeke/Desktop/spanner-orm/django_tests/django/tests/basic/tests.py", line 265, in test_extra_method_select_argument_with_dashes self.assertEqual(articles[0].undashedvalue, 2) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 308, in __getitem__ qs._fetch_all() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 1242, in _fetch_all self._result_cache = list(self._iterable_class(self)) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 55, in __iter__ results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1133, in execute_sql return list(result) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1512, in cursor_iter for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1512, in <lambda> for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/utils.py", line 96, in inner return func(*args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/spanner/dbapi/cursor.py", line 218, in fetchmany items.append(tuple(self.__next__())) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/spanner/dbapi/cursor.py", line 186, in __next__ return next(self.__itr) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/streamed.py", line 143, in __iter__ self._consume_next() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/streamed.py", line 116, in _consume_next response = six.next(self._response_iterator) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/snapshot.py", line 45, in _restart_on_unavailable for item in iterator: File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 81, in next six.raise_from(exceptions.from_grpc_error(exc), exc) File "<string>", line 3, in raise_from google.api_core.exceptions.InvalidArgument: 400 Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^ ``` But really Cloud Spanner doesn't support dashed values as per ```SQL SELECT (1) AS dashed-value, blogpost.post_id FROM blogpost ```
[ { "content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's loo...
[ { "content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's loo...
diff --git a/spanner/django/operations.py b/spanner/django/operations.py index afd8f1b89c..72edac74d9 100644 --- a/spanner/django/operations.py +++ b/spanner/django/operations.py @@ -17,6 +17,8 @@ class DatabaseOperations(BaseDatabaseOperations): } def quote_name(self, name): + if '-' in name: + return '`' + name + '`' return name def bulk_insert_sql(self, fields, placeholder_rows):
tobymao__sqlglot-2800
ParseError when using LIKE/ILIKE on an element in an object in Snowflake I'm getting `ParseError: Invalid expression / Unexpected token` when using `LIKE` or `ILIKE` on an element within an object in Snowflake. Example: ``` import sqlglot sqlglot.parse(""" select parse_json('{"x": "hello"}'):x like 'hello' """, read="snowflake") sqlglot.parse(""" select data:x like 'hello' from some_table """, read="snowflake") ``` Both of these cause the parsing error, but both are valid Snowflake statements.
[ { "content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot._typing import E\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n binary_from_function,\n date_delta_sql,\n date_trun...
[ { "content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot._typing import E\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n binary_from_function,\n date_delta_sql,\n date_trun...
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index ad14e6ee74..454df94c9e 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -328,6 +328,9 @@ def _parse_colon_get_path( if not self._match(TokenType.COLON): break + if self._match_set(self.RANGE_PARSERS): + this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this + return this diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 602bc63027..39963b2817 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -78,6 +78,14 @@ def test_snowflake(self): self.validate_identity( "SELECT a FROM test PIVOT(SUM(x) FOR y IN ('z', 'q')) AS x TABLESAMPLE (0.1)" ) + self.validate_identity( + """SELECT PARSE_JSON('{"x": "hello"}'):x LIKE 'hello'""", + """SELECT GET_PATH(PARSE_JSON('{"x": "hello"}'), 'x') LIKE 'hello'""", + ) + self.validate_identity( + """SELECT data:x LIKE 'hello' FROM some_table""", + """SELECT GET_PATH(data, 'x') LIKE 'hello' FROM some_table""", + ) self.validate_identity( "SELECT SUM({ fn CONVERT(123, SQL_DOUBLE) })", "SELECT SUM(CAST(123 AS DOUBLE))",
urllib3__urllib3-2655
Problem with urllib3.HTTPResponse.geturl() type hint ### Subject Why the return type hint for `urllib3.response.BaseHTTPResponse.geturl()` is `Optional[Union[str, "Literal[False]"]]` but not `Optional[str]` ?
[ { "content": "import io\nimport json as _json\nimport logging\nimport re\nimport zlib\nfrom contextlib import contextmanager\nfrom http.client import HTTPMessage as _HttplibHTTPMessage\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom typing import ...
[ { "content": "import io\nimport json as _json\nimport logging\nimport re\nimport zlib\nfrom contextlib import contextmanager\nfrom http.client import HTTPMessage as _HttplibHTTPMessage\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom typing import ...
diff --git a/src/urllib3/response.py b/src/urllib3/response.py index 8690d2d7a8..448627c79e 100644 --- a/src/urllib3/response.py +++ b/src/urllib3/response.py @@ -411,7 +411,7 @@ def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]: def info(self) -> HTTPHeaderDict: return self.headers - def geturl(self) -> Optional[Union[str, "Literal[False]"]]: + def geturl(self) -> Optional[str]: return self.url
python-pillow__Pillow-4788
PSD Plugin does not register a MIME type The [`PSDImagePlugin`](https://github.com/python-pillow/Pillow/blob/master/src/PIL/PsdImagePlugin.py) does not register a MIME type as I'd expect it to. The correct MIME for PSD images, according to IANA, is ["image/vnd.adobe.photoshop"](https://www.iana.org/assignments/media-types/image/vnd.adobe.photoshop). Is there a reason this isn't registered? PSD Plugin does not register a MIME type The [`PSDImagePlugin`](https://github.com/python-pillow/Pillow/blob/master/src/PIL/PsdImagePlugin.py) does not register a MIME type as I'd expect it to. The correct MIME for PSD images, according to IANA, is ["image/vnd.adobe.photoshop"](https://www.iana.org/assignments/media-types/image/vnd.adobe.photoshop). Is there a reason this isn't registered?
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secre...
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secre...
diff --git a/Tests/test_file_psd.py b/Tests/test_file_psd.py index 011efc9773e..6b26fe44288 100644 --- a/Tests/test_file_psd.py +++ b/Tests/test_file_psd.py @@ -12,6 +12,7 @@ def test_sanity(): assert im.mode == "RGB" assert im.size == (128, 128) assert im.format == "PSD" + assert im.get_format_mimetype() == "image/vnd.adobe.photoshop" im2 = hopper() assert_image_similar(im, im2, 4.8) diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py index f019bb64eb1..80bc116fc14 100644 --- a/src/PIL/PsdImagePlugin.py +++ b/src/PIL/PsdImagePlugin.py @@ -307,3 +307,5 @@ def _maketile(file, mode, bbox, channels): Image.register_open(PsdImageFile.format, PsdImageFile, _accept) Image.register_extension(PsdImageFile.format, ".psd") + +Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
getredash__redash-2501
Non blocking widget refresh indicator When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times. To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress. Is the following animation enough? ![](http://g.recordit.co/CyccMD6dFc.gif) After refreshing a dashboard, widgets become draggable even when not in edit mode
[ { "content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n...
[ { "content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n...
diff --git a/client/app/components/dashboards/add-widget-dialog.js b/client/app/components/dashboards/add-widget-dialog.js index 12796e938c..055055928a 100644 --- a/client/app/components/dashboards/add-widget-dialog.js +++ b/client/app/components/dashboards/add-widget-dialog.js @@ -94,11 +94,10 @@ const AddWidgetDialog = { widget.options.position.col = position.col; widget.options.position.row = position.row; - widget.$save() - .then((response) => { - // update dashboard layout - this.dashboard.version = response.version; - this.dashboard.widgets.push(new Widget(response.widget)); + widget + .save() + .then(() => { + this.dashboard.widgets.push(widget); this.close(); }) .catch(() => { diff --git a/client/app/components/dashboards/widget.html b/client/app/components/dashboards/widget.html index 8f84c98373..ccfdb36353 100644 --- a/client/app/components/dashboards/widget.html +++ b/client/app/components/dashboards/widget.html @@ -10,7 +10,7 @@ </div> <div class="dropdown pull-right widget-menu-regular" ng-if="!$ctrl.public" uib-dropdown> <div class="actions"> - <a data-toggle="dropdown" uib-dropdown-toggle><i class="zmdi zmdi-more"></i></a> + <a data-toggle="dropdown" uib-dropdown-toggle><i class="zmdi zmdi-more-vert"></i></a> </div> <ul class="dropdown-menu pull-right" uib-dropdown-menu style="z-index:1000000"> @@ -51,8 +51,10 @@ </div> <div class="body-row clearfix tile__bottom-control"> - <a class="small hidden-print" ng-click="$ctrl.reload(true)" ng-if="!$ctrl.public"> - <i class="zmdi zmdi-time-restore"></i> <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()"></span> + <a class="small hidden-print" ng-click="$ctrl.refresh()" ng-if="!$ctrl.public"> + <i ng-class='{"zmdi-hc-spin": $ctrl.widget.loading}' class="zmdi zmdi-refresh"></i> + <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()" ng-if="!$ctrl.widget.loading"></span> + <rd-timer timestamp="$ctrl.widget.refreshStartedAt" ng-if="$ctrl.widget.loading"></rd-timer> </a> <span class="small hidden-print" ng-if="$ctrl.public"> <i class="zmdi zmdi-time-restore"></i> <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()"></span> @@ -61,7 +63,7 @@ <i class="zmdi zmdi-time-restore"></i> {{$ctrl.widget.getQueryResult().getUpdatedAt() | dateTime}} </span> - <button class="btn btn-sm btn-default pull-right hidden-print btn-transparent btn__refresh" ng-click="$ctrl.reload(true)" ng-if="!$ctrl.public"><i class="zmdi zmdi-refresh"></i></button> + <button class="btn btn-sm btn-default pull-right hidden-print btn-transparent btn__refresh" ng-click="$ctrl.refresh()" ng-if="!$ctrl.public"><i class="zmdi zmdi-refresh"></i></button> </div> </div> diff --git a/client/app/components/dashboards/widget.js b/client/app/components/dashboards/widget.js index 5b6fdde0a8..ddefe33f00 100644 --- a/client/app/components/dashboards/widget.js +++ b/client/app/components/dashboards/widget.js @@ -19,7 +19,7 @@ const EditTextBoxComponent = { if (this.widget.new_text !== this.widget.existing_text) { this.widget.text = this.widget.new_text; this.widget - .$save() + .save() .then(() => { this.close(); }) @@ -67,9 +67,7 @@ function DashboardWidgetCtrl($location, $uibModal, $window, Events, currentUser) Events.record('delete', 'widget', this.widget.id); - this.widget.$delete((response) => { - this.dashboard.widgets = this.dashboard.widgets.filter(w => w.id !== undefined && w.id !== this.widget.id); - this.dashboard.version = response.version; + this.widget.delete().then(() => { if (this.deleted) { this.deleted({}); } @@ -78,18 +76,21 @@ function DashboardWidgetCtrl($location, $uibModal, $window, Events, currentUser) Events.record('view', 'widget', this.widget.id); - this.reload = (force) => { + this.load = (refresh = false) => { const maxAge = $location.search().maxAge; - this.widget.load(force, maxAge); + this.widget.load(refresh, maxAge); + }; + + this.refresh = () => { + this.load(true); }; if (this.widget.visualization) { Events.record('view', 'query', this.widget.visualization.query.id, { dashboard: true }); Events.record('view', 'visualization', this.widget.visualization.id, { dashboard: true }); - this.reload(false); - this.type = 'visualization'; + this.load(); } else if (this.widget.restricted) { this.type = 'restricted'; } else { diff --git a/client/app/pages/dashboards/dashboard.html b/client/app/pages/dashboards/dashboard.html index 74e04a8e81..bd49593bc4 100644 --- a/client/app/pages/dashboards/dashboard.html +++ b/client/app/pages/dashboards/dashboard.html @@ -31,7 +31,7 @@ <h3> <div class="btn-group" uib-dropdown ng-if="!$ctrl.layoutEditing"> <button id="split-button" type="button" ng-class="{'btn-default btn-sm': $ctrl.refreshRate === null,'btn-primary btn-sm':$ctrl.refreshRate !== null}" - class="btn btn-sm" ng-click="$ctrl.loadDashboard(true)"> + class="btn btn-sm" ng-click="$ctrl.refreshDashboard()"> <i class="zmdi zmdi-refresh"></i> {{$ctrl.refreshRate === null ? 'Refresh' : $ctrl.refreshRate.name}} </button> <button type="button" class="btn" uib-dropdown-toggle @@ -92,7 +92,7 @@ <h3> ng-repeat="widget in $ctrl.dashboard.widgets track by widget.id" gridstack-item="widget.options.position" gridstack-item-id="{{ widget.id }}"> <div class="grid-stack-item-content"> - <dashboard-widget widget="widget" dashboard="$ctrl.dashboard" on-delete="$ctrl.removeWidget()"></dashboard-widget> + <dashboard-widget widget="widget" dashboard="$ctrl.dashboard" on-delete="$ctrl.removeWidget(widget.id)"></dashboard-widget> </div> </div> </div> diff --git a/client/app/pages/dashboards/dashboard.js b/client/app/pages/dashboards/dashboard.js index 45d3dcd9fe..b0ed9fdfc6 100644 --- a/client/app/pages/dashboards/dashboard.js +++ b/client/app/pages/dashboards/dashboard.js @@ -46,7 +46,7 @@ function DashboardCtrl( this.saveInProgress = true; const showMessages = true; return $q - .all(_.map(widgets, widget => widget.$save())) + .all(_.map(widgets, widget => widget.save())) .then(() => { if (showMessages) { toastr.success('Changes saved.'); @@ -83,7 +83,7 @@ function DashboardCtrl( this.refreshRate = rate; if (rate !== null) { if (load) { - this.loadDashboard(true); + this.refreshDashboard(); } this.autoRefresh(); } @@ -118,7 +118,7 @@ function DashboardCtrl( }; const collectFilters = (dashboard, forceRefresh) => { - const queryResultPromises = _.compact(this.dashboard.widgets.map(widget => widget.loadPromise(forceRefresh))); + const queryResultPromises = _.compact(this.dashboard.widgets.map(widget => widget.load(forceRefresh))); $q.all(queryResultPromises).then((queryResults) => { const filters = {}; @@ -206,9 +206,13 @@ function DashboardCtrl( this.loadDashboard(); + this.refreshDashboard = () => { + renderDashboard(this.dashboard, true); + }; + this.autoRefresh = () => { $timeout(() => { - this.loadDashboard(true); + this.refreshDashboard(); }, this.refreshRate.rate * 1000).then(() => this.autoRefresh()); }; @@ -319,12 +323,13 @@ function DashboardCtrl( // Save position of newly added widget (but not entire layout) const widget = _.last(this.dashboard.widgets); if (_.isObject(widget)) { - return widget.$save(); + return widget.save(); } }); }; - this.removeWidget = () => { + this.removeWidget = (widgetId) => { + this.dashboard.widgets = this.dashboard.widgets.filter(w => w.id !== undefined && w.id !== widgetId); this.extractGlobalParameters(); if (!this.layoutEditing) { // We need to wait a bit while `angular` updates widgets, and only then save new layout diff --git a/client/app/services/widget.js b/client/app/services/widget.js index 4efef4dfd0..a0dc6f8434 100644 --- a/client/app/services/widget.js +++ b/client/app/services/widget.js @@ -1,142 +1,166 @@ +import moment from 'moment'; import { truncate } from 'underscore.string'; -import { pick, flatten, extend, isObject } from 'underscore'; +import { each, pick, extend, isObject } from 'underscore'; + +function calculatePositionOptions(Visualization, dashboardGridOptions, widget) { + widget.width = 1; // Backward compatibility, user on back-end + + const visualizationOptions = { + autoHeight: false, + sizeX: Math.round(dashboardGridOptions.columns / 2), + sizeY: dashboardGridOptions.defaultSizeY, + minSizeX: dashboardGridOptions.minSizeX, + maxSizeX: dashboardGridOptions.maxSizeX, + minSizeY: dashboardGridOptions.minSizeY, + maxSizeY: dashboardGridOptions.maxSizeY, + }; -function Widget($resource, $http, Query, Visualization, dashboardGridOptions) { - function prepareForSave(data) { - return pick(data, 'options', 'text', 'id', 'width', 'dashboard_id', 'visualization_id'); - } + const visualization = widget.visualization ? Visualization.visualizations[widget.visualization.type] : null; + if (isObject(visualization)) { + const options = extend({}, visualization.defaultOptions); - const WidgetResource = $resource( - 'api/widgets/:id', - { id: '@id' }, - { - get: { method: 'GET' }, - save: { - method: 'POST', - transformRequest: flatten([prepareForSave, $http.defaults.transformRequest]), - }, - query: { method: 'GET', isArray: true }, - remove: { method: 'DELETE' }, - delete: { method: 'DELETE' }, - }, - ); - - WidgetResource.prototype.getQuery = function getQuery() { - if (!this.query && this.visualization) { - this.query = new Query(this.visualization.query); + if (Object.prototype.hasOwnProperty.call(options, 'autoHeight')) { + visualizationOptions.autoHeight = options.autoHeight; } - return this.query; - }; + // Width constraints + const minColumns = parseInt(options.minColumns, 10); + if (isFinite(minColumns) && minColumns >= 0) { + visualizationOptions.minSizeX = minColumns; + } + const maxColumns = parseInt(options.maxColumns, 10); + if (isFinite(maxColumns) && maxColumns >= 0) { + visualizationOptions.maxSizeX = Math.min(maxColumns, dashboardGridOptions.columns); + } - WidgetResource.prototype.getQueryResult = function getQueryResult(force, maxAge) { - return this.load(force, maxAge); - }; + // Height constraints + // `minRows` is preferred, but it should be kept for backward compatibility + const height = parseInt(options.height, 10); + if (isFinite(height)) { + visualizationOptions.minSizeY = Math.ceil(height / dashboardGridOptions.rowHeight); + } + const minRows = parseInt(options.minRows, 10); + if (isFinite(minRows)) { + visualizationOptions.minSizeY = minRows; + } + const maxRows = parseInt(options.maxRows, 10); + if (isFinite(maxRows) && maxRows >= 0) { + visualizationOptions.maxSizeY = maxRows; + } - WidgetResource.prototype.load = function load(force, maxAge) { - if (!this.visualization) { - return undefined; + // Default dimensions + const defaultWidth = parseInt(options.defaultColumns, 10); + if (isFinite(defaultWidth) && defaultWidth > 0) { + visualizationOptions.sizeX = defaultWidth; + } + const defaultHeight = parseInt(options.defaultRows, 10); + if (isFinite(defaultHeight) && defaultHeight > 0) { + visualizationOptions.sizeY = defaultHeight; } + } - if (force || this.queryResult === undefined) { - if (maxAge === undefined || force) { - maxAge = force ? 0 : undefined; + return visualizationOptions; +} + +function WidgetFactory($http, Query, Visualization, dashboardGridOptions) { + class Widget { + constructor(data) { + // Copy properties + each(data, (v, k) => { + this[k] = v; + }); + + const visualizationOptions = calculatePositionOptions(Visualization, dashboardGridOptions, this); + + this.options = this.options || {}; + this.options.position = extend( + {}, + visualizationOptions, + pick(this.options.position, ['col', 'row', 'sizeX', 'sizeY', 'autoHeight']), + ); + + if (this.options.position.sizeY < 0) { + this.options.position.autoHeight = true; } - this.queryResult = this.getQuery().getQueryResult(maxAge); + + // Save original position (create a shallow copy) + this.$originalPosition = extend({}, this.options.position); } - return this.queryResult; - }; + getQuery() { + if (!this.query && this.visualization) { + this.query = new Query(this.visualization.query); + } - WidgetResource.prototype.loadPromise = function loadPromise(force, maxAge) { - return this.load(force, maxAge).toPromise(); - }; + return this.query; + } - WidgetResource.prototype.getName = function getName() { - if (this.visualization) { - return `${this.visualization.query.name} (${this.visualization.name})`; + getQueryResult() { + return this.data; } - return truncate(this.text, 20); - }; - function WidgetConstructor(widget) { - widget.width = 1; // Backward compatibility, user on back-end - - const visualizationOptions = { - autoHeight: false, - sizeX: Math.round(dashboardGridOptions.columns / 2), - sizeY: dashboardGridOptions.defaultSizeY, - minSizeX: dashboardGridOptions.minSizeX, - maxSizeX: dashboardGridOptions.maxSizeX, - minSizeY: dashboardGridOptions.minSizeY, - maxSizeY: dashboardGridOptions.maxSizeY, - }; - const visualization = widget.visualization ? Visualization.visualizations[widget.visualization.type] : null; - if (isObject(visualization)) { - const options = extend({}, visualization.defaultOptions); - - if (Object.prototype.hasOwnProperty.call(options, 'autoHeight')) { - visualizationOptions.autoHeight = options.autoHeight; + getName() { + if (this.visualization) { + return `${this.visualization.query.name} (${this.visualization.name})`; } + return truncate(this.text, 20); + } - // Width constraints - const minColumns = parseInt(options.minColumns, 10); - if (isFinite(minColumns) && minColumns >= 0) { - visualizationOptions.minSizeX = minColumns; - } - const maxColumns = parseInt(options.maxColumns, 10); - if (isFinite(maxColumns) && maxColumns >= 0) { - visualizationOptions.maxSizeX = Math.min(maxColumns, dashboardGridOptions.columns); - } + load(force, maxAge) { + this.loading = true; + this.refreshStartedAt = moment(); - // Height constraints - // `minRows` is preferred, but it should be kept for backward compatibility - const height = parseInt(options.height, 10); - if (isFinite(height)) { - visualizationOptions.minSizeY = Math.ceil(height / dashboardGridOptions.rowHeight); - } - const minRows = parseInt(options.minRows, 10); - if (isFinite(minRows)) { - visualizationOptions.minSizeY = minRows; - } - const maxRows = parseInt(options.maxRows, 10); - if (isFinite(maxRows) && maxRows >= 0) { - visualizationOptions.maxSizeY = maxRows; + if (!this.visualization) { + return undefined; } - // Default dimensions - const defaultWidth = parseInt(options.defaultColumns, 10); - if (isFinite(defaultWidth) && defaultWidth > 0) { - visualizationOptions.sizeX = defaultWidth; - } - const defaultHeight = parseInt(options.defaultRows, 10); - if (isFinite(defaultHeight) && defaultHeight > 0) { - visualizationOptions.sizeY = defaultHeight; + if (force || this.queryResult === undefined) { + if (maxAge === undefined || force) { + maxAge = force ? 0 : undefined; + } + this.queryResult = this.getQuery().getQueryResult(maxAge); + + this.queryResult.toPromise().then( + (queryResult) => { + this.data = queryResult; + this.loading = false; + }, + () => { + this.loading = false; + this.data = null; + }, + ); } + + return this.queryResult.toPromise(); } - widget.options = widget.options || {}; - widget.options.position = extend( - {}, - visualizationOptions, - pick(widget.options.position, ['col', 'row', 'sizeX', 'sizeY', 'autoHeight']), - ); + save() { + const data = pick(this, 'options', 'text', 'id', 'width', 'dashboard_id', 'visualization_id'); - if (widget.options.position.sizeY < 0) { - widget.options.position.autoHeight = true; - } + let url = 'api/widgets'; + if (this.id) { + url = `${url}/${this.id}`; + } - const result = new WidgetResource(widget); + return $http.post(url, data).then((response) => { + each(response.data, (v, k) => { + this[k] = v; + }); - // Save original position (create a shallow copy) - result.$originalPosition = extend({}, result.options.position); + return this; + }); + } - return result; + delete() { + const url = `api/widgets/${this.id}`; + return $http.delete(url); + } } - return WidgetConstructor; + return Widget; } export default function init(ngModule) { - ngModule.factory('Widget', Widget); + ngModule.factory('Widget', WidgetFactory); } diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py index 939f2fbc0f..5376a8074b 100644 --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -44,7 +44,7 @@ def post(self): models.db.session.commit() models.db.session.commit() - return {'widget': widget.to_dict()} + return widget.to_dict() class WidgetResource(BaseResource): diff --git a/tests/handlers/test_widgets.py b/tests/handlers/test_widgets.py index f928c3d0c1..702ef6f828 100644 --- a/tests/handlers/test_widgets.py +++ b/tests/handlers/test_widgets.py @@ -54,7 +54,7 @@ def test_create_text_widget(self): rv = self.make_request('post', '/api/widgets', data=data) self.assertEquals(rv.status_code, 200) - self.assertEquals(rv.json['widget']['text'], 'Sample text.') + self.assertEquals(rv.json['text'], 'Sample text.') def test_delete_widget(self): widget = self.factory.create_widget() diff --git a/webpack.config.js b/webpack.config.js index c3a3ddf320..2af1d7376e 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -1,94 +1,87 @@ /* eslint-disable */ -const fs = require('fs'); -const webpack = require('webpack'); -const HtmlWebpackPlugin = require('html-webpack-plugin'); +const fs = require("fs"); +const webpack = require("webpack"); +const HtmlWebpackPlugin = require("html-webpack-plugin"); const ExtractTextPlugin = require("extract-text-webpack-plugin"); -const WebpackBuildNotifierPlugin = require('webpack-build-notifier'); -const ManifestPlugin = require('webpack-manifest-plugin'); -const CopyWebpackPlugin = require('copy-webpack-plugin'); -const LessPluginAutoPrefix = require('less-plugin-autoprefix'); -const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin; -const path = require('path'); +const WebpackBuildNotifierPlugin = require("webpack-build-notifier"); +const ManifestPlugin = require("webpack-manifest-plugin"); +const CopyWebpackPlugin = require("copy-webpack-plugin"); +const LessPluginAutoPrefix = require("less-plugin-autoprefix"); +const BundleAnalyzerPlugin = require("webpack-bundle-analyzer").BundleAnalyzerPlugin; +const path = require("path"); -const redashBackend = process.env.REDASH_BACKEND || 'http://localhost:5000'; +const redashBackend = process.env.REDASH_BACKEND || "http://localhost:5000"; -const basePath = fs.realpathSync(path.join(__dirname, 'client')); -const appPath = fs.realpathSync(path.join(__dirname, 'client', 'app')); +const basePath = fs.realpathSync(path.join(__dirname, "client")); +const appPath = fs.realpathSync(path.join(__dirname, "client", "app")); const config = { entry: { - app: [ - './client/app/index.js', - './client/app/assets/less/main.less', - ], - server: [ - './client/app/assets/less/server.less', - ], + app: ["./client/app/index.js", "./client/app/assets/less/main.less"], + server: ["./client/app/assets/less/server.less"] }, output: { - path: path.join(basePath, './dist'), - filename: '[name].js', - publicPath: '/static/' + path: path.join(basePath, "./dist"), + filename: "[name].js", + publicPath: "/static/" }, resolve: { alias: { - '@': appPath, + "@": appPath, // Currently `lodash` is used only by `gridstack.js`, but it can work // with `underscore` as well, so set an alias to avoid bundling both `lodash` and // `underscore`. When adding new libraries, check if they can work // with `underscore`, otherwise remove this line - 'lodash': 'underscore', + lodash: "underscore" } }, plugins: [ - new WebpackBuildNotifierPlugin({title: 'Redash'}), + new WebpackBuildNotifierPlugin({ title: "Redash" }), new webpack.DefinePlugin({ - ON_TEST: process.env.NODE_ENV === 'test' + ON_TEST: process.env.NODE_ENV === "test" }), // Enforce angular to use jQuery instead of jqLite - new webpack.ProvidePlugin({'window.jQuery': 'jquery'}), + new webpack.ProvidePlugin({ "window.jQuery": "jquery" }), // bundle only default `moment` locale (`en`) new webpack.ContextReplacementPlugin(/moment[\/\\]locale$/, /en/), new webpack.optimize.CommonsChunkPlugin({ - name: 'vendor', - minChunks: function (module, count) { + name: "vendor", + minChunks: function(module, count) { // any required modules inside node_modules are extracted to vendor return ( module.resource && /\.js$/.test(module.resource) && - module.resource.indexOf( - path.join(__dirname, './node_modules') - ) === 0 - ) + module.resource.indexOf(path.join(__dirname, "./node_modules")) === 0 + ); } }), // extract webpack runtime and module manifest to its own file in order to // prevent vendor hash from being updated whenever app bundle is updated new webpack.optimize.CommonsChunkPlugin({ - name: 'manifest', - chunks: ['vendor'] + name: "manifest", + chunks: ["vendor"] }), new HtmlWebpackPlugin({ - template: './client/app/index.html', - filename: 'index.html', - excludeChunks: ['server'], + template: "./client/app/index.html", + filename: "index.html", + excludeChunks: ["server"] }), new HtmlWebpackPlugin({ - template: './client/app/multi_org.html', - filename: 'multi_org.html', - excludeChunks: ['server'], + template: "./client/app/multi_org.html", + filename: "multi_org.html", + excludeChunks: ["server"] }), new ExtractTextPlugin({ - filename: '[name].[chunkhash].css', + filename: "[name].[chunkhash].css" }), new ManifestPlugin({ - fileName: 'asset-manifest.json' + fileName: "asset-manifest.json" }), new CopyWebpackPlugin([ - { from: 'client/app/assets/robots.txt' }, - { from: 'client/app/assets/css/login.css', to: 'styles/login.css' }, - { from: 'node_modules/jquery/dist/jquery.min.js', to: 'js/jquery.min.js' }, + { from: "client/app/assets/robots.txt" }, + { from: "client/app/assets/css/login.css", to: "styles/login.css" }, + { from: "node_modules/jquery/dist/jquery.min.js", to: "js/jquery.min.js" } ]) ], @@ -97,113 +90,122 @@ const config = { { test: /\.js$/, exclude: /node_modules/, - use: ['babel-loader', 'eslint-loader'] + use: ["babel-loader", "eslint-loader"] }, { test: /\.html$/, exclude: [/node_modules/, /index\.html/], - use: [{ - loader: 'raw-loader' - }] + use: [ + { + loader: "raw-loader" + } + ] }, { test: /\.css$/, - use: ExtractTextPlugin.extract([{ - loader: 'css-loader', - options: { - minimize: process.env.NODE_ENV === 'production' + use: ExtractTextPlugin.extract([ + { + loader: "css-loader", + options: { + minimize: process.env.NODE_ENV === "production" + } } - }]) + ]) }, { test: /\.less$/, use: ExtractTextPlugin.extract([ { - loader: 'css-loader', + loader: "css-loader", options: { - minimize: process.env.NODE_ENV === 'production' + minimize: process.env.NODE_ENV === "production" } - }, { - loader: 'less-loader', + }, + { + loader: "less-loader", options: { - plugins: [ - new LessPluginAutoPrefix({browsers: ['last 3 versions']}) - ] + plugins: [new LessPluginAutoPrefix({ browsers: ["last 3 versions"] })] } } ]) }, { test: /\.(png|jpe?g|gif|svg)(\?.*)?$/, - use: [{ - loader: 'file-loader', - options: { - context: path.resolve(appPath, './assets/images/'), - outputPath: 'images/', - name: '[path][name].[ext]', + use: [ + { + loader: "file-loader", + options: { + context: path.resolve(appPath, "./assets/images/"), + outputPath: "images/", + name: "[path][name].[ext]" + } } - }] + ] }, { test: /\.geo\.json$/, - use: [{ - loader: 'file-loader', - options: { - outputPath: 'data/', - name: '[hash:7].[name].[ext]', + use: [ + { + loader: "file-loader", + options: { + outputPath: "data/", + name: "[hash:7].[name].[ext]" + } } - }] + ] }, { test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/, - use: [{ - loader: 'url-loader', - options: { - limit: 10000, - name: 'fonts/[name].[hash:7].[ext]' + use: [ + { + loader: "url-loader", + options: { + limit: 10000, + name: "fonts/[name].[hash:7].[ext]" + } } - }] + ] } ] }, - devtool: 'cheap-eval-module-source-map', + devtool: "cheap-eval-module-source-map", stats: { modules: false, - chunkModules: false, + chunkModules: false }, watchOptions: { - ignored: /\.sw.$/, + ignored: /\.sw.$/ }, devServer: { inline: true, - index: '/static/index.html', + index: "/static/index.html", historyApiFallback: { - index: '/static/index.html', - rewrites: [{from: /./, to: '/static/index.html'}], + index: "/static/index.html", + rewrites: [{ from: /./, to: "/static/index.html" }] }, contentBase: false, - publicPath: '/static/', + publicPath: "/static/", proxy: [ { - context: ['/login', '/logout', '/invite', '/setup', '/status.json', '/api', '/oauth'], - target: redashBackend + '/', - changeOrigin: true, - secure: false, + context: ["/login", "/logout", "/invite", "/setup", "/status.json", "/api", "/oauth"], + target: redashBackend + "/", + changeOrigin: false, + secure: false }, { - context: (path) => { + context: path => { // CSS/JS for server-rendered pages should be served from backend return /^\/static\/[a-z]+\.[0-9a-fA-F]+\.(css|js)$/.test(path); }, - target: redashBackend + '/', + target: redashBackend + "/", changeOrigin: true, - secure: false, + secure: false } ], stats: { modules: false, - chunkModules: false, - }, + chunkModules: false + } } }; @@ -211,15 +213,17 @@ if (process.env.DEV_SERVER_HOST) { config.devServer.host = process.env.DEV_SERVER_HOST; } -if (process.env.NODE_ENV === 'production') { - config.output.filename = '[name].[chunkhash].js'; - config.plugins.push(new webpack.optimize.UglifyJsPlugin({ - sourceMap: true, - compress: { - warnings: true - } - })); - config.devtool = 'source-map'; +if (process.env.NODE_ENV === "production") { + config.output.filename = "[name].[chunkhash].js"; + config.plugins.push( + new webpack.optimize.UglifyJsPlugin({ + sourceMap: true, + compress: { + warnings: true + } + }) + ); + config.devtool = "source-map"; } if (process.env.BUNDLE_ANALYZER) {
pantsbuild__pants-20802
jvm_exclude with Group Only Fails Parsing by Coursier **Describe the bug** Running `pants generate-lockfiles` when a `jvm_artifact` contains a `jvm_exclude` that only specifies a group will fail with a "Failed to parse [group-name]" message from Coursier. This is contrary to the documentation for `jvm_exclude` which states "`jvm_exclude`: Exclude the given `artifact` and `group`, or all artifacts from the given `group`." **Pants version** 2.20.0rc2 **OS** MacOS **Additional info** Example Repo https://github.com/NGustafson/pants-examples/blob/main/3rdparty/jvm/BUILD This repo has a single jvm_artifact with nothing else configured. Attempting to run `pants generate-lockfiles` will cause this error: ``` pants generate-lockfiles [ERROR] 1 Exception encountered: Engine traceback: in `generate-lockfiles` goal ProcessExecutionFailure: Process 'Running `coursier fetch` against 1 requirement: org.slf4j:slf4j-log4j12:2.0.12' failed with exit code 1. stdout: stderr: + coursier_exe=__coursier/./cs-aarch64-apple-darwin + shift + json_output_file=coursier_report.json + shift ++ pwd + working_dir=/private/var/folders/cm/gmrdwxcn7tv_cct4dzg38w91kjyl1q/T/pants-sandbox-aM4FVB + __coursier/./cs-aarch64-apple-darwin fetch -r=https://maven-central.storage-download.googleapis.com/maven2 -r=https://repo1.maven.org/maven2 --no-default --json-output-file=coursier_report.json org.slf4j:slf4j-log4j12:2.0.12 --local-exclude-file PANTS_RESOLVE_EXCLUDES Failed to parse org.slf4j Failed to parse org.slf4j ```
[ { "content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport re\nimport xml.etree.ElementTree as ET\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom datacla...
[ { "content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport re\nimport xml.etree.ElementTree as ET\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom datacla...
diff --git a/docs/notes/2.22.x.md b/docs/notes/2.22.x.md index 86beb82ff1b..018defbb47b 100644 --- a/docs/notes/2.22.x.md +++ b/docs/notes/2.22.x.md @@ -36,6 +36,8 @@ docs [here](https://www.pantsbuild.org/2.22/docs/sql). for [`jvm_artifacts`](https://www.pantsbuild.org/2.22/reference/targets/jvm_artifacts) targets generator from `pom.xml`. +Exclusions for `jvm_artifact` and `scala_artifact` now correctly handle a `jvm_exclude` with only the group defined. + ##### Scala Setting the `orphan_files_behaviour = "ignore"` option for [`pants.backend.experimental.scala.lint.scalafix`](https://www.pantsbuild.org/2.22/reference/subsystems/scalafix#orphan_files_behavior) or [`pants.backend.experimental.scala.lint.scalafmt`](https://www.pantsbuild.org/2.22/reference/subsystems/scalafmt#orphan_files_behavior) backend is now properly silent. It previously showed spurious warnings. diff --git a/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py b/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py index 8167db55952..7e8efb87db5 100644 --- a/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py +++ b/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py @@ -18,7 +18,11 @@ from pants.jvm.resolve.coordinate import Coordinate, Coordinates from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules -from pants.jvm.target_types import JvmArtifactJarSourceField, JvmArtifactTarget +from pants.jvm.target_types import ( + JvmArtifactExclusion, + JvmArtifactJarSourceField, + JvmArtifactTarget, +) from pants.jvm.testutil import maybe_skip_jdk_test from pants.jvm.util_rules import ExtractFileDigest from pants.jvm.util_rules import rules as util_rules @@ -662,6 +666,25 @@ def test_transitive_excludes(rule_runner: RuleRunner) -> None: assert not any(i for i in entries if i.coord.artifact == "jackson-core") +@maybe_skip_jdk_test +def test_transitive_group_only_excludes(rule_runner: RuleRunner) -> None: + group_only_excludes = JvmArtifactExclusion(group="com.fasterxml.jackson.core", artifact=None) + + requirement = ArtifactRequirement( + coordinate=Coordinate( + group="com.fasterxml.jackson.module", + artifact="jackson-module-jaxb-annotations", + version="2.17.1", + ), + excludes=frozenset([group_only_excludes.to_coord_str()]), + ) + + resolve = rule_runner.request(CoursierResolvedLockfile, [ArtifactRequirements([requirement])]) + + entries = resolve.entries + assert not any(i for i in entries if i.coord.group == "com.fasterxml.jackson.core") + + @maybe_skip_jdk_test def test_missing_entry_for_transitive_dependency(rule_runner: RuleRunner) -> None: requirement = ArtifactRequirement( diff --git a/src/python/pants/jvm/target_types.py b/src/python/pants/jvm/target_types.py index ea7e3e6a5fc..c696eacbf65 100644 --- a/src/python/pants/jvm/target_types.py +++ b/src/python/pants/jvm/target_types.py @@ -309,6 +309,8 @@ def to_coord_str(self) -> str: result = self.group if self.artifact: result += f":{self.artifact}" + else: + result += ":*" return result
iterative__dvc-10208
dvc push: Unexpected error when pushing to Google Cloud storage or S3 # Bug Report dvc push: "Unexpected error" when pushing to Google Cloud storage or S3 ### Reproduce ``` dvc init dvc remote add -d s3 s3://bucket # or gcs gs://bucket dvc import-url https://data.dvc.org/get-started/data.xml dvc push -v ``` output (s3): ``` 2023-12-27 19:56:42,605 DEBUG: v3.36.1 (pip), CPython 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 2023-12-27 19:56:42,605 DEBUG: command: /path/bin/dvc push -v Collecting |0.00 [00:00, ?entry/s] Pushing |0.00 [00:00, ?file/s] Collecting my.bucket/key on s3 |3.00 [00:00, 4.84entry/s] 2023-12-27 19:56:43,676 ERROR: unexpected error Traceback (most recent call last): File "/path/lib/python3.9/site-packages/dvc/cli/__init__.py", line 211, in main ret = cmd.do_run() File "/path/lib/python3.9/site-packages/dvc/cli/command.py", line 27, in do_run return self.run() File "/path/lib/python3.9/site-packages/dvc/commands/data_sync.py", line 64, in run processed_files_count = self.repo.push( File "/path/lib/python3.9/site-packages/dvc/repo/__init__.py", line 65, in wrapper return f(repo, *args, **kwargs) File "/path/lib/python3.9/site-packages/dvc/repo/push.py", line 144, in push push_transferred, push_failed = ipush( File "/path/lib/python3.9/site-packages/dvc_data/index/push.py", line 101, in push old = build(data.path, data.fs) File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 90, in build for entry in build_entries(path, fs, ignore=ignore): File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 55, in build_entries walk_iter = fs.walk(path, detail=detail) File "/path/lib/python3.9/site-packages/dvc_http/__init__.py", line 162, in walk raise NotImplementedError NotImplementedError 2023-12-27 19:56:43,752 DEBUG: link type reflink is not available ([Errno 95] no more link types left to try out) 2023-12-27 19:56:43,755 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,755 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,757 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,757 DEBUG: Removing '/path/bkw-9036/.dvc/cache/files/md5/.mnnSioPUuXvRUCqUV2ug87.tmp' 2023-12-27 19:56:43,777 DEBUG: Version info for developers: DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.12.2, boto3 = 1.33.13) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: s3 Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/9d9135fb99d9d827364c4dc5a42cdc60 Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2023-12-27 19:56:43,781 DEBUG: Analytics is enabled. 2023-12-27 19:56:43,860 DEBUG: Trying to spawn ['daemon', 'analytics', '/tmp/tmpccxiwrmd', '-v'] 2023-12-27 19:56:43,871 DEBUG: Spawned ['daemon', 'analytics', '/tmp/tmpccxiwrmd', '-v'] with pid 22406 ``` output (gcs): ``` 2023-12-27 19:47:22,768 DEBUG: v3.36.1 (pip), CPython 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 2023-12-27 19:47:22,769 DEBUG: command: /path/bin/dvc push -v Collecting |0.00 [00:00, ?entry/s] Pushing |0.00 [00:00, ?file/s] Collecting bucket/path on gs |3.00 [00:01, 2.84entry/s] 2023-12-27 19:47:24,328 ERROR: unexpected error Traceback (most recent call last): File "/path/lib/python3.9/site-packages/dvc/cli/__init__.py", line 211, in main ret = cmd.do_run() File "/path/lib/python3.9/site-packages/dvc/cli/command.py", line 27, in do_run return self.run() File "/path/lib/python3.9/site-packages/dvc/commands/data_sync.py", line 64, in run processed_files_count = self.repo.push( File "/path/lib/python3.9/site-packages/dvc/repo/__init__.py", line 65, in wrapper return f(repo, *args, **kwargs) File "/path/lib/python3.9/site-packages/dvc/repo/push.py", line 144, in push push_transferred, push_failed = ipush( File "/path/lib/python3.9/site-packages/dvc_data/index/push.py", line 101, in push old = build(data.path, data.fs) File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 90, in build for entry in build_entries(path, fs, ignore=ignore): File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 55, in build_entries walk_iter = fs.walk(path, detail=detail) File "/path/lib/python3.9/site-packages/dvc_http/__init__.py", line 162, in walk raise NotImplementedError NotImplementedError 2023-12-27 19:47:24,370 DEBUG: link type reflink is not available ([Errno 95] no more link types left to try out) 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/bkw-9036/.dvc/cache/files/md5/.M6iwnJkjQgKzg54kN6chVi.tmp' 2023-12-27 19:47:24,377 DEBUG: Version info for developers: DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: gs Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/9d9135fb99d9d827364c4dc5a42cdc60 Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2023-12-27 19:47:24,379 DEBUG: Analytics is enabled. 2023-12-27 19:47:24,445 DEBUG: Trying to spawn ['daemon', 'analytics', '/tmp/tmpk_30nnlt', '-v'] 2023-12-27 19:47:24,455 DEBUG: Spawned ['daemon', 'analytics', '/tmp/tmpk_30nnlt', '-v'] with pid 15755 ``` ### Expected Successful push ### Environment information <!-- This is required to ensure that we can reproduce the bug. --> ``` DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.12.2, boto3 = 1.33.13) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: s3 Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/c9c73dbc105eb09a15137f49a60e6a5b ``` **Additional Information (if any):**
[ { "content": "import logging\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Set,\n Tuple,\...
[ { "content": "import logging\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Set,\n Tuple,\...
diff --git a/dvc/repo/index.py b/dvc/repo/index.py index b3dfb47ee5..bac2bfa36b 100644 --- a/dvc/repo/index.py +++ b/dvc/repo/index.py @@ -221,7 +221,7 @@ def _load_storage_from_out(storage_map, key, out): ), ) ) - storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path)) + storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path, read_only=True)) class Index: diff --git a/pyproject.toml b/pyproject.toml index 54c1b4614e..95e30fe919 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ "configobj>=5.0.6", "distro>=1.3", "dpath<3,>=2.1.0", - "dvc-data>=3.4,<3.5", + "dvc-data>=3.5,<3.6", "dvc-http>=2.29.0", "dvc-render>=1.0.0,<2", "dvc-studio-client>=0.17.1,<1", diff --git a/tests/func/test_repo_index.py b/tests/func/test_repo_index.py index 9a0b06ea4a..42a5123a27 100644 --- a/tests/func/test_repo_index.py +++ b/tests/func/test_repo_index.py @@ -1,3 +1,4 @@ +import os from itertools import chain import pytest @@ -338,3 +339,53 @@ def test_param_keys_top_level_params(tmp_dir, dvc): tmp_dir.gen("dvc.yaml", top_level_params) index = Index.from_repo(dvc) assert index.param_keys == {"repo": {("classifier", "custom_params_file.yaml")}} + + +def test_data_index(tmp_dir, dvc, local_cloud, erepo_dir): + tmp_dir.dvc_gen( + { + "foo": b"foo", + "dir": {"bar": b"bar", "subdir": {"baz": b"baz"}}, + } + ) + + with erepo_dir.chdir(): + erepo_dir.dvc_gen("efoo", b"efoo", commit="create efoo") + erepo_dir.dvc_gen( + "edir", + {"ebar": b"ebar", "esubdir": {"ebaz": b"ebaz"}}, + commit="create edir", + ) + + dvc.imp(os.fspath(erepo_dir), "efoo") + dvc.imp(os.fspath(erepo_dir), "edir") + + local_cloud.gen("ifoo", b"ifoo") + local_cloud.gen("idir", {"ibar": b"ibar", "isubdir": {"ibaz": b"ibaz"}}) + + dvc.imp_url(str(local_cloud / "ifoo")) + dvc.imp_url(str(local_cloud / "idir")) + + index = Index.from_repo(dvc) + assert index.data_keys == { + "local": set(), + "repo": {("dir",), ("edir",), ("efoo",), ("foo",), ("idir",), ("ifoo",)}, + } + + data = index.data["repo"] + assert set(data.keys()) == { + ("dir",), + ("edir",), + ("efoo",), + ("foo",), + ("idir",), + ("ifoo",), + } + + assert not data.storage_map[("foo",)].remote + assert not data.storage_map[("dir",)].remote + + assert data.storage_map[("efoo",)].remote.read_only + assert data.storage_map[("edir",)].remote.read_only + assert data.storage_map[("ifoo",)].remote.read_only + assert data.storage_map[("idir",)].remote.read_only
getmoto__moto-1840
Cryptography Package has a Security Vulnerability Discovered using pipenv's security check feature that there's a vulnerability in the cryptography package versions<2.3. > Checking installed package safety... 36351: cryptography >=1.9.0,<2.3 resolved (2.2.2 installed)! python-cryptography versions >=1.9.0 and <2.3 did not enforce a minimum tag length for finalize_with_tag API. If a user did not validate the input length prior to passing it to finalize_with_tag an attacker could craft an invalid payload with a shortened tag (e.g. 1 byte) such that they would have a 1 in 256 chance of passing the MAC check. GCM tag forgeries can cause key leakage. More details here: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10903 setup.py should be updated to require cryptography>=2.3.0.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.0...
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.3...
diff --git a/setup.py b/setup.py index dad9ab9bb173..98780dd5a2e2 100755 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ "boto>=2.36.0", "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", - "cryptography>=2.0.0", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9",
microsoft__DeepSpeed-2611
[BUG] pydantic DeepSpeedConfigModel has no validator for <class:object> **Describe the bug** During ```from deepspeed.inference.config import DtypeEnum```, got error ``` File "pydantic/main.py", line 299, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 411, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 342, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 456, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 670, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 715, in find_validators RuntimeError: no validator found for <class 'object'>, see `arbitrary_types_allowed` in Config ``` **To Reproduce** Steps to reproduce the behavior: 1. Simple inference script to reproduce ```from deepspeed.inference.config import DtypeEnum``` 2. pydantic 1.8.2, deepspeed 0.8.0+384f17b **Expected behavior** successful import with no error **ds_report output** cannot produce due to the same import error **System info (please complete the following information):** - OS: Red Hat Enterprise Linux Server 7.9 (Maipo) - GPU count and types: one machine with 8 A100s, three machines with 8 A100s each - Hugging Face Transformers 4.19.2, no accelerate - Python version 3.8.13
[ { "content": "\"\"\"\nCopyright (c) Microsoft Corporation\nLicensed under the MIT license.\n\"\"\"\n\"\"\"\nCollection of DeepSpeed configuration utilities\n\"\"\"\nimport json\nimport collections\nimport collections.abc\nfrom functools import reduce\nfrom pydantic import BaseModel\nfrom deepspeed.utils import ...
[ { "content": "\"\"\"\nCopyright (c) Microsoft Corporation\nLicensed under the MIT license.\n\"\"\"\n\"\"\"\nCollection of DeepSpeed configuration utilities\n\"\"\"\nimport json\nimport collections\nimport collections.abc\nfrom functools import reduce\nfrom pydantic import BaseModel\nfrom deepspeed.utils import ...
diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py index 81ef972ac0c4..08a50785ceb9 100755 --- a/deepspeed/runtime/config_utils.py +++ b/deepspeed/runtime/config_utils.py @@ -109,6 +109,7 @@ class Config: use_enum_values = True allow_population_by_field_name = True extra = "forbid" + arbitrary_types_allowed = True class pp_int(int):
PlasmaPy__PlasmaPy-1369
Make test order deterministic so we can use `pytest-xdist` in `plasmapy.particles` The order that tests are run in (what will soon be) `plasmapy.particles` is not deterministic. Some of the functionality in that subpackage uses [set](https://docs.python.org/3/tutorial/datastructures.html#sets) operations, which do not preserve order. Since the order of our tests sometimes depends on set operations, the order of tests changes. Nominally, unit tests should be able to be run in any order. However, I ran into a problem when trying to use [`pytest-xdist`](https://docs.pytest.org/en/3.0.1/xdist.html) to run tests in parallel, as this package currently requires test order to be deterministic when figuring out which tests to send to which processor. Since our test order will only get bigger with time, it would be helpful to make our test order deterministic so that we have the capability of running tests in parallel. The two possibilities that I can think of are: - Use [OrderedSets](https://pypi.org/project/ordered-set/) instead of regular sets (with the disadvantage that this would require adding another dependency to PlasmaPy) - Use sorting in the operations in which tests get selected (probably using `sorted`). The files that are affected include: - [ ] `test_ionization_state.py` - [ ] `test_ionization_states.py` - [ ] `test_parsing.py` - [ ] `test_particle_class.py` - [ ] `test_special_particles.py` The quick way to test this is to install `pytest-xdist`, go to the tests directory, and then try running in the command prompt: ```pytest -n 4 test_parsing.py``` Thanks! Make `IonizationState` and `IonizationStateCollection` tests independent of each other I tried running our tests with the pytest extension pytest-randomly, and it turns out that many of the tests in `plasmapy/particles/test_ionization_state.py` and `plasmapy/particles/test_ionization_collection.py` fail when they are run in random order. This is because the tests depend on each other, which is generally something to avoid, which I only learned recently. We should modify these tests so that they don't depend on the order of test execution. That is to say, we should make these tests completely independent of each other. I found this out by running these lines in the command line: ```bash pip install pytest-randomly pytest --randomly-seed=1235 ``` These tests might be a good place to use pytest fixtures.
[ { "content": "\"\"\"Utilities to help with testing.\"\"\"\n\n__all__ = [\n \"assert_can_handle_nparray\",\n \"run_test\",\n \"run_test_equivalent_calls\",\n]\n\nimport astropy.constants as const\nimport astropy.tests.helper as astrohelper\nimport astropy.units as u\nimport collections\nimport functools...
[ { "content": "\"\"\"Utilities to help with testing.\"\"\"\n\n__all__ = [\n \"assert_can_handle_nparray\",\n \"run_test\",\n \"run_test_equivalent_calls\",\n]\n\nimport astropy.constants as const\nimport astropy.tests.helper as astrohelper\nimport astropy.units as u\nimport collections\nimport functools...
diff --git a/changelog/1369.trivial.rst b/changelog/1369.trivial.rst new file mode 100644 index 0000000000..be90a5f509 --- /dev/null +++ b/changelog/1369.trivial.rst @@ -0,0 +1 @@ +Refactored tests in `plasmapy.particles`. diff --git a/plasmapy/particles/tests/test_atomic.py b/plasmapy/particles/tests/test_atomic.py index 576b99b9e5..dae299f780 100644 --- a/plasmapy/particles/tests/test_atomic.py +++ b/plasmapy/particles/tests/test_atomic.py @@ -3,6 +3,7 @@ from astropy import constants as const from astropy import units as u +from astropy.tests.helper import assert_quantity_allclose from plasmapy.particles.exceptions import ( ChargeError, @@ -45,345 +46,207 @@ # The following lists (with the name of a function -atomic_symbol_table = [ - [1, "H"], - [1, "H"], - ["H", "H"], - ["p", "H"], - ["T", "H"], - ["deuterium", "H"], - ["deuteron", "H"], - ["Tritium", "H"], - ["triton", "H"], - ["H-2", "H"], - ["D", "H"], - ["T", "H"], - ["H-3", "H"], - ["Hydrogen-3", "H"], - ["helium", "He"], - [2, "He"], - ["alpha", "He"], - ["gold", "Au"], - ["Gold", "Au"], - [79, "Au"], - ["79", "Au"], - ["P", "P"], - [118, "Og"], - ["N-14", "N"], - ["N", "N"], - ["H +1", "H"], - ["H 1+", "H"], - ["hydrogen 1+", "H"], - ["deuterium 1+", "H"], - ["Fe 24+", "Fe"], - ["Fe +24", "Fe"], - ["Fe 2-", "Fe"], - ["Fe -2", "Fe"], - ["Fe+", "Fe"], - ["Fe++", "Fe"], - ["Fe-", "Fe"], - ["Fe++++++++++++++", "Fe"], - ["H-0", InvalidParticleError], - [3.14159, TypeError], - ["Og-294b", InvalidParticleError], - ["H-934361079326356530741942970523610389", InvalidParticleError], - ["Fe 2+4", InvalidParticleError], - ["Fe+24", InvalidParticleError], - ["Fe +59", InvalidParticleError], - ["C++++++++++++++++", InvalidParticleError], - ["C-++++", InvalidParticleError], - ["neutron", InvalidElementError], - ["n", InvalidElementError], - ["n-1", InvalidElementError], - ["h", InvalidParticleError], - ["d", InvalidParticleError], - ["he", InvalidParticleError], - ["au", InvalidParticleError], - ["p-", InvalidElementError], - [0, InvalidParticleError], - [119, InvalidParticleError], - ["antiproton", InvalidElementError], -] - -isotope_symbol_table = [ - [("He", 4), "He-4"], - [("helium-4",), "He-4"], - [("H-2",), "D"], - [("Deuterium",), "D"], - [("deuterium",), "D"], - [("deuteron",), "D"], - [("tritium",), "T"], - [("triton",), "T"], - [("Hydrogen-3",), "T"], - [("hydrogen-3",), "T"], - [("H-3",), "T"], - [(1, 2), "D"], - [("Hydrogen", 3), "T"], - [("tritium",), "T"], - [("H", 2), "D"], - [("Alpha",), "He-4"], - [("alpha",), "He-4"], - [(79, 197), "Au-197"], - [("p",), "H-1"], - [("beryllium-8",), "Be-8"], - [("N-13",), "N-13"], - [("p",), "H-1"], - [("proton",), "H-1"], - [("protium",), "H-1"], - [("N-13 2+",), "N-13"], - [("Hydrogen-3 +1",), "T"], - ["Md-260", {"mass_numb": 261}, InvalidParticleError], - ["protium", {"mass_numb": 2}, InvalidParticleError], - ["alpha", {"mass_numb": 3}, InvalidParticleError], - ["O-18", {"mass_numb": 19}, InvalidParticleError], - ["lead-209", {"mass_numb": 511}, InvalidParticleError], - ["He-1", {}, InvalidParticleError], - [24, {"mass_numb": 23}, InvalidParticleError], - ["H", {"mass_numb": 0}, InvalidParticleError], - ["H-1", {"mass_numb": 2}, InvalidParticleError], - ["P", {}, InvalidIsotopeError], - [1, {}, InvalidIsotopeError], - [4, {}, InvalidIsotopeError], - ["hydrogen-444444", {}, InvalidParticleError], - ["Fe", {"mass_numb": 2.1}, TypeError], - ["He", {"mass_numb": "c"}, TypeError], - ["He-3", {"mass_numb": 4}, InvalidParticleError], - ["D", {"mass_numb": 3}, InvalidParticleError], - ["T", {"mass_numb": 2}, InvalidParticleError], - ["Fe", {"mass_numb": None}, InvalidIsotopeError], - ["He", {"mass_numb": 99}, InvalidParticleError], - ["d", {}, InvalidParticleError], - ["h-3", {}, InvalidParticleError], - ["h", {}, InvalidParticleError], - ["d+", {}, InvalidParticleError], - ["H-1", {"mass_numb": 1}, ParticleWarning], - ["H-2", {"mass_numb": 2}, ParticleWarning], - ["T", {"mass_numb": 3}, ParticleWarning], - ["Li-6", {"mass_numb": 6}, ParticleWarning], - ["lithium-6", {"mass_numb": 6}, ParticleWarning], - ["alpha", {"mass_numb": 4}, ParticleWarning], - ["p", {"mass_numb": 1}, ParticleWarning], -] - -atomic_number_table = [ - ["H", 1], - ["D", 1], - ["deuterium", 1], - ["Deuterium", 1], - ["tritium", 1], - ["p", 1], - ["P", 15], - ["Alpha", 2], - ["C-12", 6], - ["Argon", 18], - ["protium", 1], - ["H-3", 1], - ["p+", 1], - ["Be-8", 4], - ["N", 7], - ["N 2+", 7], - ["N +1", 7], - ["N+++", 7], - ["H-3934", InvalidParticleError], - ["C-12b", InvalidParticleError], - [-1.5, TypeError], - ["n", InvalidElementError], - ["n-1", InvalidElementError], - ["neutron", InvalidElementError], - ["Neutron", InvalidElementError], - ["d", InvalidParticleError], - ["t", InvalidParticleError], - ["s-36", InvalidParticleError], -] - -mass_number_table = [ - ["helium-3", 3], - ["Au-197", 197], - ["deuterium", 2], - ["D", 2], - ["H-2", 2], - ["tritium", 3], - ["T", 3], - ["alpha", 4], - ["p", 1], - ["Be-8", 8], - ["N-13", 13], - ["N-13 2+", 13], - ["N-13 +2", 13], - ["N-13+++", 13], - ["H-359", InvalidParticleError], - ["C-12b", InvalidParticleError], - [-1.5, TypeError], - ["N-13+-+-", InvalidParticleError], - ["h-3", InvalidParticleError], - ["n", InvalidIsotopeError], - ["n-1", InvalidIsotopeError], -] - - -element_name_table = [ - ["D", "hydrogen"], - ["deuterium", "hydrogen"], - ["Au", "gold"], - ["alpha", "helium"], - ["helium-4", "helium"], - ["H-2", "hydrogen"], - ["Deuterium", "hydrogen"], - ["Hydrogen-3", "hydrogen"], - ["hydrogen-3", "hydrogen"], - ["H-3", "hydrogen"], - ["tritium", "hydrogen"], - ["Alpha", "helium"], - ["alpha", "helium"], - [1, "hydrogen"], - [26, "iron"], - [79, "gold"], - ["p", "hydrogen"], - ["P", "phosphorus"], - ["Be-8", "beryllium"], - ["Li-7", "lithium"], - ["N", "nitrogen"], - ["N+++", "nitrogen"], - ["D-", "hydrogen"], - ["vegancupcakes", InvalidParticleError], - ["C-+-", InvalidParticleError], - [1.24, TypeError], - ["n", InvalidElementError], - ["neutron", InvalidElementError], - [0, InvalidParticleError], - ["H++", InvalidParticleError], - ["t", InvalidParticleError], - ["pb", InvalidParticleError], - ["d", InvalidParticleError], - ["h-3", InvalidParticleError], - ["Pb-9", InvalidParticleError], - ["H 2+", InvalidParticleError], -] - -standard_atomic_weight_table = [ - ["H", (1.008 * u.u).to(u.kg)], - [1, (1.008 * u.u).to(u.kg)], - ["Hydrogen", (1.008 * u.u).to(u.kg)], - ["Au", u.kg], - ["H-1", ParticleError], - ["help i'm trapped in a unit test", InvalidParticleError], - [1.1, TypeError], - ["n", InvalidElementError], - ["p", ParticleError], - ["alpha", ParticleError], - ["deuteron", ParticleError], - ["tritium", ParticleError], - ["Au+", ParticleError], - ["Fe -2", ParticleError], - ["Og 2+", ParticleError], - ["h", InvalidParticleError], - ["fe", InvalidParticleError], -] - -particle_mass_table = [ - ["proton", const.m_p], - ["H-1+", const.m_p], - ["H-1 +1", const.m_p], - ["H-1 1+", const.m_p], - ["H-1", {"Z": 1}, const.m_p], - ["hydrogen-1", {"Z": 1}, const.m_p], - ["p+", const.m_p], - ["F-19", {"Z": 3}, u.kg], - ["Og 1+", {}, MissingParticleDataError], - ["Fe-56", {"Z": 1.4}, TypeError], - ["H-1 +1", {"Z": 0}, InvalidParticleError], - [26, {"Z": 1, "mass_numb": "a"}, TypeError], - [26, {"Z": 27, "mass_numb": 56}, InvalidParticleError], - ["Og", {"Z": 1}, MissingParticleDataError], - ["Og", {"mass_numb": 696, "Z": 1}, InvalidParticleError], - ["He 1+", {"mass_numb": 99}, InvalidParticleError], - ["fe-56 1+", {}, InvalidParticleError], - ["H-1", {"mass_numb": 1, "Z": 1}, ParticleWarning], - ["H", standard_atomic_weight("H")], -] - -is_stable_table = [ - ["H-1", True], - [(1, 1), True], - ["N-14", True], - [("N", 14), True], - ["P-31", True], - [("P", 31), True], - ["p", True], - ["alpha", True], - ["Xe-124", True], - ["Fe", {"mass_numb": 56}, True], - ["Fe-56", True], - ["iron-56", True], - ["Iron-56", True], - [(26, 56), True], - ["Be-8", False], - ["U-235", False], - ["uranium-235", False], - ["T", False], - [(4, 8), False], - ["tritium", False], - ["Pb-209", False], - ["lead-209", False], - ["Lead-209", False], - ["Pb", {"mass_numb": 209}, False], - [(82, 209), False], - [("hydrogen-444444",), InvalidParticleError], - [("hydrogen", 0), InvalidParticleError], - [("",), InvalidParticleError], - [("pb-209",), InvalidParticleError], - [("h",), InvalidParticleError], - [("He",), InvalidIsotopeError], - [("B",), InvalidIsotopeError], -] - -charge_number_table = [ - ["H+", 1], - ["D +1", 1], - ["tritium 1+", 1], - ["H-", -1], - ["Fe -2", -2], - ["Fe 2-", -2], - ["N--", -2], - ["N++", 2], - ["alpha", 2], - ["proton", 1], - ["deuteron", 1], - ["triton", 1], - ["electron", -1], - ["e-", -1], - ["e+", 1], - ["positron", 1], - ["n", 0], - ["neutron", 0], - ["p-", -1], - ["antiproton", -1], - ["fads", InvalidParticleError], - ["H++", InvalidParticleError], - ["h+", InvalidParticleError], - ["fe 1+", InvalidParticleError], - ["d+", InvalidParticleError], - ["Fe 29+", InvalidParticleError], - ["H-1", ChargeError], - ["H---", ParticleWarning], - ["Fe -26", ParticleWarning], - ["Og 10-", ParticleWarning], +table_functions_args_kwargs_output = [ + [ + atomic_symbol, + [ + 1, + ], + {}, + "H", + ], + [atomic_symbol, [1], {}, "H"], + [atomic_symbol, ["H"], {}, "H"], + [atomic_symbol, ["p"], {}, "H"], + [atomic_symbol, ["T"], {}, "H"], + [atomic_symbol, ["deuterium"], {}, "H"], + [atomic_symbol, ["deuteron"], {}, "H"], + [atomic_symbol, ["Tritium"], {}, "H"], + [atomic_symbol, ["triton"], {}, "H"], + [atomic_symbol, ["H-2"], {}, "H"], + [atomic_symbol, ["D"], {}, "H"], + [atomic_symbol, ["T"], {}, "H"], + [atomic_symbol, ["H-3"], {}, "H"], + [atomic_symbol, ["Hydrogen-3"], {}, "H"], + [atomic_symbol, ["helium"], {}, "He"], + [atomic_symbol, [2], {}, "He"], + [atomic_symbol, ["alpha"], {}, "He"], + [atomic_symbol, ["gold"], {}, "Au"], + [atomic_symbol, ["Gold"], {}, "Au"], + [atomic_symbol, [79], {}, "Au"], + [atomic_symbol, ["79"], {}, "Au"], + [atomic_symbol, ["P"], {}, "P"], + [atomic_symbol, [118], {}, "Og"], + [atomic_symbol, ["N-14"], {}, "N"], + [atomic_symbol, ["N"], {}, "N"], + [atomic_symbol, ["H +1"], {}, "H"], + [atomic_symbol, ["H 1+"], {}, "H"], + [atomic_symbol, ["hydrogen 1+"], {}, "H"], + [atomic_symbol, ["deuterium 1+"], {}, "H"], + [atomic_symbol, ["Fe 24+"], {}, "Fe"], + [atomic_symbol, ["Fe +24"], {}, "Fe"], + [atomic_symbol, ["Fe 2-"], {}, "Fe"], + [atomic_symbol, ["Fe -2"], {}, "Fe"], + [atomic_symbol, ["Fe+"], {}, "Fe"], + [atomic_symbol, ["Fe++"], {}, "Fe"], + [atomic_symbol, ["Fe-"], {}, "Fe"], + [atomic_symbol, ["Fe++++++++++++++"], {}, "Fe"], + [isotope_symbol, ("He", 4), {}, "He-4"], + [isotope_symbol, ("helium-4",), {}, "He-4"], + [isotope_symbol, ("H-2",), {}, "D"], + [isotope_symbol, ("Deuterium",), {}, "D"], + [isotope_symbol, ("deuterium",), {}, "D"], + [isotope_symbol, ("deuteron",), {}, "D"], + [isotope_symbol, ("tritium",), {}, "T"], + [isotope_symbol, ("triton",), {}, "T"], + [isotope_symbol, ("Hydrogen-3",), {}, "T"], + [isotope_symbol, ("hydrogen-3",), {}, "T"], + [isotope_symbol, ("H-3",), {}, "T"], + [isotope_symbol, (1, 2), {}, "D"], + [isotope_symbol, ("Hydrogen", 3), {}, "T"], + [isotope_symbol, ("tritium",), {}, "T"], + [isotope_symbol, ("H", 2), {}, "D"], + [isotope_symbol, ("Alpha",), {}, "He-4"], + [isotope_symbol, ("alpha",), {}, "He-4"], + [isotope_symbol, (79, 197), {}, "Au-197"], + [isotope_symbol, ("p",), {}, "H-1"], + [isotope_symbol, ("beryllium-8",), {}, "Be-8"], + [isotope_symbol, ("N-13",), {}, "N-13"], + [isotope_symbol, ("p",), {}, "H-1"], + [isotope_symbol, ("proton",), {}, "H-1"], + [isotope_symbol, ("protium",), {}, "H-1"], + [isotope_symbol, ("N-13 2+",), {}, "N-13"], + [isotope_symbol, ("Hydrogen-3 +1",), {}, "T"], + [atomic_number, ["H"], {}, 1], + [atomic_number, ["D"], {}, 1], + [atomic_number, ["deuterium"], {}, 1], + [atomic_number, ["Deuterium"], {}, 1], + [atomic_number, ["tritium"], {}, 1], + [atomic_number, ["p"], {}, 1], + [atomic_number, ["P"], {}, 15], + [atomic_number, ["Alpha"], {}, 2], + [atomic_number, ["C-12"], {}, 6], + [atomic_number, ["Argon"], {}, 18], + [atomic_number, ["protium"], {}, 1], + [atomic_number, ["H-3"], {}, 1], + [atomic_number, ["p+"], {}, 1], + [atomic_number, ["Be-8"], {}, 4], + [atomic_number, ["N"], {}, 7], + [atomic_number, ["N 2+"], {}, 7], + [atomic_number, ["N +1"], {}, 7], + [atomic_number, ["N+++"], {}, 7], + [mass_number, ["helium-3"], {}, 3], + [mass_number, ["Au-197"], {}, 197], + [mass_number, ["deuterium"], {}, 2], + [mass_number, ["D"], {}, 2], + [mass_number, ["H-2"], {}, 2], + [mass_number, ["tritium"], {}, 3], + [mass_number, ["T"], {}, 3], + [mass_number, ["alpha"], {}, 4], + [mass_number, ["p"], {}, 1], + [mass_number, ["Be-8"], {}, 8], + [mass_number, ["N-13"], {}, 13], + [mass_number, ["N-13 2+"], {}, 13], + [mass_number, ["N-13 +2"], {}, 13], + [mass_number, ["N-13+++"], {}, 13], + [element_name, ["D"], {}, "hydrogen"], + [element_name, ["deuterium"], {}, "hydrogen"], + [element_name, ["Au"], {}, "gold"], + [element_name, ["alpha"], {}, "helium"], + [element_name, ["helium-4"], {}, "helium"], + [element_name, ["H-2"], {}, "hydrogen"], + [element_name, ["Deuterium"], {}, "hydrogen"], + [element_name, ["Hydrogen-3"], {}, "hydrogen"], + [element_name, ["hydrogen-3"], {}, "hydrogen"], + [element_name, ["H-3"], {}, "hydrogen"], + [element_name, ["tritium"], {}, "hydrogen"], + [element_name, ["Alpha"], {}, "helium"], + [element_name, ["alpha"], {}, "helium"], + [element_name, [1], {}, "hydrogen"], + [element_name, [26], {}, "iron"], + [element_name, [79], {}, "gold"], + [element_name, ["p"], {}, "hydrogen"], + [element_name, ["P"], {}, "phosphorus"], + [element_name, ["Be-8"], {}, "beryllium"], + [element_name, ["Li-7"], {}, "lithium"], + [element_name, ["N"], {}, "nitrogen"], + [element_name, ["N+++"], {}, "nitrogen"], + [element_name, ["D-"], {}, "hydrogen"], + [standard_atomic_weight, ["H"], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, [1], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, ["Hydrogen"], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, ["Au"], {}, u.kg], + [particle_mass, ["proton"], {}, const.m_p], + [particle_mass, ["H-1+"], {}, const.m_p], + [particle_mass, ["H-1 +1"], {}, const.m_p], + [particle_mass, ["H-1 1+"], {}, const.m_p], + [particle_mass, ["H-1"], {"Z": 1}, const.m_p], + [particle_mass, ["hydrogen-1"], {"Z": 1}, const.m_p], + [particle_mass, ["p+"], {}, const.m_p], + [particle_mass, ["F-19"], {"Z": 3}, u.kg], + [particle_mass, ["H"], {}, standard_atomic_weight("H")], + [is_stable, ["H-1"], {}, True], + [is_stable, [1, 1], {}, True], + [is_stable, ["N-14"], {}, True], + [is_stable, ["N", 14], {}, True], + [is_stable, ["P-31"], {}, True], + [is_stable, ["P", 31], {}, True], + [is_stable, ["p"], {}, True], + [is_stable, ["alpha"], {}, True], + [is_stable, ["Xe-124"], {}, True], + [is_stable, ("Fe",), {"mass_numb": 56}, True], + [is_stable, ["Fe-56"], {}, True], + [is_stable, ["iron-56"], {}, True], + [is_stable, ["Iron-56"], {}, True], + [is_stable, [26, 56], {}, True], + [is_stable, ["Be-8"], {}, False], + [is_stable, ["U-235"], {}, False], + [is_stable, ["uranium-235"], {}, False], + [is_stable, ["T"], {}, False], + [is_stable, [4, 8], {}, False], + [is_stable, ["tritium"], {}, False], + [is_stable, ["Pb-209"], {}, False], + [is_stable, ["lead-209"], {}, False], + [is_stable, ["Lead-209"], {}, False], + [is_stable, ("Pb",), {"mass_numb": 209}, False], + [is_stable, [82, 209], {}, False], + [charge_number, ["H+"], {}, 1], + [charge_number, ["D +1"], {}, 1], + [charge_number, ["tritium 1+"], {}, 1], + [charge_number, ["H-"], {}, -1], + [charge_number, ["Fe -2"], {}, -2], + [charge_number, ["Fe 2-"], {}, -2], + [charge_number, ["N--"], {}, -2], + [charge_number, ["N++"], {}, 2], + [charge_number, ["alpha"], {}, 2], + [charge_number, ["proton"], {}, 1], + [charge_number, ["deuteron"], {}, 1], + [charge_number, ["triton"], {}, 1], + [charge_number, ["electron"], {}, -1], + [charge_number, ["e-"], {}, -1], + [charge_number, ["e+"], {}, 1], + [charge_number, ["positron"], {}, 1], + [charge_number, ["n"], {}, 0], + [charge_number, ["neutron"], {}, 0], + [charge_number, ["p-"], {}, -1], + [charge_number, ["antiproton"], {}, -1], + [electric_charge, ["p"], {}, u.C], + [electric_charge, ["p"], {}, 1.6021766208e-19 * u.C], + [electric_charge, ["e"], {}, -1.6021766208e-19 * u.C], + [electric_charge, ["alpha"], {}, 3.2043532416e-19 * u.C], + [electric_charge, ["n"], {}, 0 * u.C], + [half_life, ["H-1"], {}, u.s], + [half_life, ["tritium"], {}, u.s], + [half_life, ["H-1"], {}, np.inf * u.s], ] -electric_charge_table = [ - ["p", u.C], - ["p", 1.6021766208e-19 * u.C], - ["e", -1.6021766208e-19 * u.C], - ["alpha", 3.2043532416e-19 * u.C], - ["n", 0 * u.C], - ["badinput", InvalidParticleError], - ["h+", InvalidParticleError], - ["Au 81+", InvalidParticleError], - ["Au 81-", ParticleWarning], - ["H---", ParticleWarning], -] -half_life_table = [["H-1", u.s], ["tritium", u.s], ["H-1", np.inf * u.s]] +@pytest.mark.parametrize( + "tested_function, args, kwargs, expected_output", + table_functions_args_kwargs_output, +) +def test_functions_and_values(tested_function, args, kwargs, expected_output): + run_test(tested_function, args, kwargs, expected_output) class TestInvalidPeriodicElement: @@ -404,112 +267,6 @@ def test_periodic_table_group(self): periodic_table_group(("B", "Ti", "Ge")) -# The tables above do not include the function to be tested in order to -# avoid cluttering up the code. The following block of code prepends -# the correct function to each list containing args, kwargs, and the -# expected outcome prior to being passed through to run_test. - - -tables_and_functions = [ - (atomic_symbol, atomic_symbol_table), - (isotope_symbol, isotope_symbol_table), - (atomic_number, atomic_number_table), - (mass_number, mass_number_table), - (element_name, element_name_table), - (standard_atomic_weight, standard_atomic_weight_table), - (is_stable, is_stable_table), - (particle_mass, particle_mass_table), - (charge_number, charge_number_table), - (electric_charge, electric_charge_table), - (half_life, half_life_table), -] - -all_tests = [] - -for func, table in tables_and_functions: - for inputs in table: - inputs.insert(0, func) - if len(inputs) == 3: - inputs.insert(2, {}) - all_tests += table - -# Set up tests for a variety of atomic functions to make sure that bad -# inputs lead to the expected errors. - -atomic_TypeError_funcs_table = [ - atomic_symbol, - isotope_symbol, - atomic_number, - is_stable, - half_life, - mass_number, - element_name, - standard_atomic_weight, - nuclear_binding_energy, - nuclear_reaction_energy, -] - -atomic_TypeError_badargs = [1.1, {"cats": "bats"}, 1 + 1j] - -atomic_ParticleErrors_funcs_table = [ - atomic_symbol, - isotope_symbol, - atomic_number, - is_stable, - half_life, - mass_number, - element_name, - standard_atomic_weight, - particle_mass, - known_isotopes, - stable_isotopes, - common_isotopes, - isotopic_abundance, - charge_number, - electric_charge, -] - -atomic_ParticleError_badargs = [ - -1, - 119, - "grumblemuffins", - "H-0", - "Og-294b", - "H-9343610", - "Fe 2+4", - "Fe+24", - "Fe +59", - "C++++++++++++++++", - "C-++++", - "h", - "d", - "he", - "au", - "alpha 1+", - "alpha-4", -] - -metatable = [ - (atomic_TypeError_funcs_table, atomic_TypeError_badargs, TypeError), - ( - atomic_ParticleErrors_funcs_table, - atomic_ParticleError_badargs, - InvalidParticleError, - ), -] - -for funcs, badargs, error in metatable: - for func in funcs: - for badarg in badargs: - all_tests += [[func, badarg, error]] - - -@pytest.mark.parametrize("inputs", all_tests) -def test_atomic_functions(inputs): - print(inputs) - run_test(inputs) - - # Next we have tests that do not fall nicely into equality comparisons. diff --git a/plasmapy/particles/tests/test_exceptions.py b/plasmapy/particles/tests/test_exceptions.py new file mode 100644 index 0000000000..f2ff0cc82d --- /dev/null +++ b/plasmapy/particles/tests/test_exceptions.py @@ -0,0 +1,1077 @@ +import itertools +import numpy as np +import pytest + +from astropy import units as u + +from plasmapy.particles import ( + atomic_symbol, + IonizationState, + IonizationStateCollection, + nuclear_binding_energy, + nuclear_reaction_energy, +) +from plasmapy.particles.atomic import ( + atomic_number, + common_isotopes, + electric_charge, + half_life, + integer_charge, + is_stable, + isotopic_abundance, + known_isotopes, + mass_number, + particle_mass, + stable_isotopes, + standard_atomic_weight, +) +from plasmapy.particles.exceptions import ( + ChargeError, + InvalidElementError, + InvalidIsotopeError, + InvalidParticleError, + MissingParticleDataError, + ParticleError, + ParticleWarning, +) +from plasmapy.particles.nuclear import nuclear_binding_energy, nuclear_reaction_energy +from plasmapy.particles.symbols import atomic_symbol, element_name, isotope_symbol +from plasmapy.utils.exceptions import PlasmaPyFutureWarning + +tests_for_exceptions = { + "too few nstates": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [1.0]}, + ParticleError, + ), + "too many nstates": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [1, 0, 0, 0]}, + ParticleError, + ), + "ionic fraction < 0": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [-0.1, 0.1, 1]}, + ParticleError, + ), + "ionic fraction > 1": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.1, 0.0, 0.0]}, + ParticleError, + ), + "invalid ionic fraction": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.0, 0.0, "a"]}, + ParticleError, + ), + "bad n_elem units": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [0, 1], "n_elem": 3 * u.m ** 3}, + u.UnitTypeError, + ), + "bad T_e units": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [0, 1], "T_e": 1 * u.m}, + u.UnitTypeError, + ), + "negative n_elem": ( + IonizationState, + [], + { + "particle": "He", + "ionic_fractions": [1.0, 0.0, 0.0], + "n_elem": -1 * u.m ** -3, + }, + ParticleError, + ), + "negative T_e": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.0, 0.0, 0.0], "T_e": -1 * u.K}, + ParticleError, + ), + "redundant ndens": ( + IonizationState, + [], + { + "particle": "H", + "ionic_fractions": np.array([3, 4]) * u.m ** -3, + "n_elem": 4 * u.m ** -3, + }, + ParticleError, + ), + "wrong type": (IonizationStateCollection, [], {"inputs": None}, ParticleError), + "not normalized": ( + IonizationStateCollection, + [], + {"inputs": {"He": [0.4, 0.5, 0.0]}, "tol": 1e-9}, + ParticleError, + ), + "negative ionfrac": ( + IonizationStateCollection, + [], + {"inputs": {"H": [-0.1, 1.1]}}, + ParticleError, + ), + "ion": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "He+": [0.0, 0.9, 0.1]}}, + ParticleError, + ), + "repeat elements": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "hydrogen": [0.2, 0.8]}}, + ParticleError, + ), + "isotope of element": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "D": [0.2, 0.8]}}, + ParticleError, + ), + "negative abundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, + "abundances": {"H": 1, "He": -0.1}, + }, + ParticleError, + ), + "imaginary abundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, + "abundances": {"H": 1, "He": 0.1j}, + }, + ParticleError, + ), + "wrong density units": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -2}, + "abundances": {"H": 1, "He": 0.1}, + }, + ParticleError, + ), + "abundance redundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, + "abundances": {"H": 1, "He": 0.1}, + }, + ParticleError, + ), + "abundance contradiction": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, + "abundances": {"H": 1, "He": 0.11}, + }, + ParticleError, + ), + "kappa too small": ( + IonizationStateCollection, + [], + {"inputs": ["H"], "kappa": 1.499999}, + ParticleError, + ), + "negative n": ( + IonizationStateCollection, + [], + {"inputs": ["H"], "n0": -1 * u.cm ** -3}, + ParticleError, + ), + "negative T_e for collection": ( + IonizationStateCollection, + [], + {"inputs": ["H-1"], "T_e": -1 * u.K}, + ParticleError, + ), +} + + +@pytest.mark.parametrize( + ["tested_object", "args", "kwargs", "expected_exception"], + list(tests_for_exceptions.values()), + ids=list(tests_for_exceptions.keys()), +) +def test_named_tests_for_exceptions(tested_object, args, kwargs, expected_exception): + """ + Test that appropriate exceptions are raised for inappropriate inputs + to `IonizationState` or `IonizationStateCollection` + """ + with pytest.raises(expected_exception) as exc_info: + tested_object(*args, **kwargs) + + assert expected_exception == exc_info.type + + +tests_from_nuclear = [ + [ + nuclear_reaction_energy, + [], + {"reactants": ["n"], "products": 3}, + pytest.raises(TypeError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["n"], "products": ["He-4"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["h"], "products": ["H-1"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["e-", "n"], "products": ["p+"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["e+", "n"], "products": ["p-"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["ksdf"], "products": ["H-3"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["H"], "products": ["H-1"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["p"], "products": ["n", "n", "e-"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + ["p --> p"], + {"reactants": "p", "products": "p"}, + pytest.raises(ParticleError), + ], + [nuclear_binding_energy, ["H"], {}, pytest.raises(ParticleError)], + [nuclear_binding_energy, ["He-99"], {}, pytest.raises(InvalidParticleError)], + [ + nuclear_binding_energy, + ["He"], + {"mass_numb": 99}, + pytest.raises(InvalidParticleError), + ], + [nuclear_binding_energy, [3.1415926535j], {}, pytest.raises(TypeError)], +] + +tests_from_atomic = [ + [ + atomic_symbol, + [ + "H-0", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + 3.14159, + ], + {}, + pytest.raises(TypeError), + ], + [ + atomic_symbol, + [ + "Og-294b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "H-934361079326356530741942970523610389", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe 2+4", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe+24", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe +59", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "C++++++++++++++++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "C-++++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "n-1", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "h", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "he", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "au", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "p-", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + 0, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + 119, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "antiproton", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "H-3934", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "C-12b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + -1.5, + ], + {}, + pytest.raises(TypeError), + ], + [ + atomic_number, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "n-1", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "Neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "t", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "s-36", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "H-359", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "C-12b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + -1.5, + ], + {}, + pytest.raises(TypeError), + ], + [ + mass_number, + [ + "N-13+-+-", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "h-3", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "n", + ], + {}, + pytest.raises(InvalidIsotopeError), + ], + [ + mass_number, + [ + "n-1", + ], + {}, + pytest.raises(InvalidIsotopeError), + ], + [ + element_name, + [ + "vegancupcakes", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "C-+-", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + 1.24, + ], + {}, + pytest.raises(TypeError), + ], + [ + element_name, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + element_name, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + element_name, + [ + 0, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "H++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "t", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "pb", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "h-3", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "Pb-9", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "H 2+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + "H-1", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "help i'm trapped in a unit test", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + 1.1, + ], + {}, + pytest.raises(TypeError), + ], + [ + standard_atomic_weight, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + standard_atomic_weight, + [ + "p", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "alpha", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "deuteron", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "tritium", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Au+", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Fe -2", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Og 2+", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "h", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + "fe", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "badinput", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "h+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "Au 81+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "Au 81-", + ], + {}, + pytest.warns(ParticleWarning), + ], + [ + electric_charge, + [ + "H---", + ], + {}, + pytest.warns(ParticleWarning), + ], + [ + integer_charge, + [ + "fads", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "H++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "h+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "fe 1+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "d+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "Fe 29+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "H-1", + ], + {}, + pytest.raises(ChargeError), + ], + [ + integer_charge, + [ + "H---", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + integer_charge, + [ + "Fe -26", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + integer_charge, + [ + "Og 10-", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + isotope_symbol, + ("Md-260",), + {"mass_numb": 261}, + pytest.raises(InvalidParticleError), + ], + [ + isotope_symbol, + ("protium",), + {"mass_numb": 2}, + pytest.raises(InvalidParticleError), + ], + [isotope_symbol, ("alpha",), {"mass_numb": 3}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("O-18",), {"mass_numb": 19}, pytest.raises(InvalidParticleError)], + [ + isotope_symbol, + ("lead-209",), + {"mass_numb": 511}, + pytest.raises(InvalidParticleError), + ], + [isotope_symbol, ("He-1",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, [24], {"mass_numb": 23}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("H",), {"mass_numb": 0}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("H-1",), {"mass_numb": 2}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("P",), {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, [1], {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, [4], {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, ("hydrogen-444444",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("Fe",), {"mass_numb": 2.1}, pytest.raises(TypeError)], + [isotope_symbol, ("He",), {"mass_numb": "c"}, pytest.raises(TypeError)], + [isotope_symbol, ("He-3",), {"mass_numb": 4}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("D",), {"mass_numb": 3}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("T",), {"mass_numb": 2}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("Fe",), {"mass_numb": None}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, ("He",), {"mass_numb": 99}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("d",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("h-3",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("h",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("d+",), {}, pytest.raises(InvalidParticleError)], + [particle_mass, ["Og 1+"], {}, pytest.raises(MissingParticleDataError)], + [particle_mass, ["Fe-56"], {"Z": 1.4}, pytest.raises(TypeError)], + [particle_mass, ["H-1 +1"], {"Z": 0}, pytest.raises(InvalidParticleError)], + [particle_mass, [26], {"Z": 1, "mass_numb": "a"}, pytest.raises(TypeError)], + [ + particle_mass, + [26], + {"Z": 27, "mass_numb": 56}, + pytest.raises(InvalidParticleError), + ], + [particle_mass, ["Og"], {"Z": 1}, pytest.raises(MissingParticleDataError)], + [ + particle_mass, + ["Og"], + {"mass_numb": 696, "Z": 1}, + pytest.raises(InvalidParticleError), + ], + [particle_mass, ["He 1+"], {"mass_numb": 99}, pytest.raises(InvalidParticleError)], + [particle_mass, ["fe-56 1+"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["hydrogen-444444"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["hydrogen", 0], {}, pytest.raises(InvalidParticleError)], + [is_stable, [""], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["pb-209"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["h"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["He"], {}, pytest.raises(InvalidIsotopeError)], + [is_stable, ["B"], {}, pytest.raises(InvalidIsotopeError)], + [particle_mass, ["H-1"], {"mass_numb": 1, "Z": 1}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("H-1",), {"mass_numb": 1}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("H-2",), {"mass_numb": 2}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("T",), {"mass_numb": 3}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("Li-6",), {"mass_numb": 6}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("lithium-6",), {"mass_numb": 6}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("alpha",), {"mass_numb": 4}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("p",), {"mass_numb": 1}, pytest.warns(ParticleWarning)], +] + + +atomic_TypeError_funcs_table = [ + atomic_symbol, + isotope_symbol, + atomic_number, + is_stable, + half_life, + mass_number, + element_name, + standard_atomic_weight, + nuclear_binding_energy, + nuclear_reaction_energy, +] + +atomic_TypeError_badargs = [1.1, {"cats": "bats"}, 1 + 1j] + +atomic_ParticleErrors_funcs_table = [ + atomic_symbol, + isotope_symbol, + atomic_number, + is_stable, + half_life, + mass_number, + element_name, + standard_atomic_weight, + particle_mass, + known_isotopes, + stable_isotopes, + common_isotopes, + isotopic_abundance, + integer_charge, + electric_charge, +] + +atomic_ParticleError_badargs = [ + -1, + 119, + "grumblemuffins", + "H-0", + "Og-294b", + "H-9343610", + "Fe 2+4", + "Fe+24", + "Fe +59", + "C++++++++++++++++", + "C-++++", + "h", + "d", + "he", + "au", + "alpha 1+", + "alpha-4", +] + +particle_error_tests = [ + (function, [bad_argument], {}, pytest.raises(InvalidParticleError)) + for function, bad_argument in itertools.product( + atomic_ParticleErrors_funcs_table, atomic_ParticleError_badargs + ) +] +type_error_tests = [ + (function, [bad_argument], {}, pytest.raises(TypeError)) + for function, bad_argument in itertools.product( + atomic_TypeError_funcs_table, atomic_TypeError_badargs + ) +] + + +@pytest.mark.parametrize( + ["tested_object", "args", "kwargs", "expectation"], + tests_from_nuclear + tests_from_atomic + particle_error_tests + type_error_tests, +) +def test_unnamed_tests_exceptions(tested_object, args, kwargs, expectation): + """ + Test that appropriate exceptions are raised for inappropriate inputs + to `IonizationState`. + """ + with expectation as exc_info: + tested_object(*args, **kwargs) + + if hasattr(expectation, "expected_exception"): + assert type(expectation.expected_exception()) == exc_info.type + + # TODO tbh given how ugly this is I don't think we should even be doing this check + if hasattr(expectation, "expected_warning"): + for expected_warning, recorded_warning in zip( + exc_info.expected_warning, exc_info.list + ): + assert expected_warning == recorded_warning.category diff --git a/plasmapy/particles/tests/test_ionization_collection.py b/plasmapy/particles/tests/test_ionization_collection.py index 6a85784f02..0ffc217672 100644 --- a/plasmapy/particles/tests/test_ionization_collection.py +++ b/plasmapy/particles/tests/test_ionization_collection.py @@ -16,9 +16,9 @@ mass_number, Particle, particle_symbol, + ParticleList, ) from plasmapy.particles.exceptions import InvalidIsotopeError, ParticleError -from plasmapy.particles.particle_collections import ParticleList from plasmapy.utils.pytest_helpers import run_test @@ -743,75 +743,6 @@ def test_base_particles_equal_ionic_fraction_particles(self): ) -IE = collections.namedtuple("IE", ["inputs", "expected_exception"]) - -tests_for_exceptions = { - "wrong type": IE({"inputs": None}, ParticleError), - "not normalized": IE( - {"inputs": {"He": [0.4, 0.5, 0.0]}, "tol": 1e-9}, ParticleError - ), - "negative ionfrac": IE({"inputs": {"H": [-0.1, 1.1]}}, ParticleError), - "ion": IE({"inputs": {"H": [0.1, 0.9], "He+": [0.0, 0.9, 0.1]}}, ParticleError), - "repeat elements": IE( - {"inputs": {"H": [0.1, 0.9], "hydrogen": [0.2, 0.8]}}, ParticleError - ), - "isotope of element": IE( - {"inputs": {"H": [0.1, 0.9], "D": [0.2, 0.8]}}, ParticleError - ), - "negative abundance": IE( - { - "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, - "abundances": {"H": 1, "He": -0.1}, - }, - ParticleError, - ), - "imaginary abundance": IE( - { - "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, - "abundances": {"H": 1, "He": 0.1j}, - }, - ParticleError, - ), - "wrong density units": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -2}, - "abundances": {"H": 1, "He": 0.1}, - }, - ParticleError, - ), - "abundance redundance": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, - "abundances": {"H": 1, "He": 0.1}, - }, - ParticleError, - ), - "abundance contradiction": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, - "abundances": {"H": 1, "He": 0.11}, - }, - ParticleError, - ), - "kappa too small": IE({"inputs": ["H"], "kappa": 1.499999}, ParticleError), - "negative n": IE({"inputs": ["H"], "n0": -1 * u.cm ** -3}, ParticleError), - "negative T_e": IE({"inputs": ["H-1"], "T_e": -1 * u.K}, ParticleError), -} - - -@pytest.mark.parametrize("test_name", tests_for_exceptions.keys()) -def test_exceptions_upon_instantiation(test_name): - """ - Test that appropriate exceptions are raised for inappropriate inputs - to IonizationStateCollection when first instantiated. - """ - run_test( - IonizationStateCollection, - kwargs=tests_for_exceptions[test_name].inputs, - expected_outcome=tests_for_exceptions[test_name].expected_exception, - ) - - class TestIonizationStateCollectionDensityEqualities: """ Test that IonizationStateCollection instances are equal or not equal to each diff --git a/plasmapy/particles/tests/test_ionization_state.py b/plasmapy/particles/tests/test_ionization_state.py index 683518705a..2a79409f59 100644 --- a/plasmapy/particles/tests/test_ionization_state.py +++ b/plasmapy/particles/tests/test_ionization_state.py @@ -456,52 +456,6 @@ def test_State_equality_and_getitem(self): assert result_from_charge == result_from_symbol -IE = collections.namedtuple("IE", ["inputs", "expected_exception"]) - -tests_for_exceptions = { - "too few nstates": IE({"particle": "H", "ionic_fractions": [1.0]}, ParticleError), - "too many nstates": IE( - {"particle": "H", "ionic_fractions": [1, 0, 0, 0]}, ParticleError - ), - "ionic fraction < 0": IE( - {"particle": "He", "ionic_fractions": [-0.1, 0.1, 1]}, ParticleError - ), - "ionic fraction > 1": IE( - {"particle": "He", "ionic_fractions": [1.1, 0.0, 0.0]}, ParticleError - ), - "invalid ionic fraction": IE( - {"particle": "He", "ionic_fractions": [1.0, 0.0, "a"]}, ParticleError - ), - "bad n_elem units": IE( - {"particle": "H", "ionic_fractions": [0, 1], "n_elem": 3 * u.m ** 3}, - u.UnitTypeError, - ), - "bad T_e units": IE( - {"particle": "H", "ionic_fractions": [0, 1], "T_e": 1 * u.m}, u.UnitTypeError - ), - "negative n_elem": IE( - { - "particle": "He", - "ionic_fractions": [1.0, 0.0, 0.0], - "n_elem": -1 * u.m ** -3, - }, - ParticleError, - ), - "negative T_e": IE( - {"particle": "He", "ionic_fractions": [1.0, 0.0, 0.0], "T_e": -1 * u.K}, - ParticleError, - ), - "redundant ndens": IE( - { - "particle": "H", - "ionic_fractions": np.array([3, 4]) * u.m ** -3, - "n_elem": 4 * u.m ** -3, - }, - ParticleError, - ), -} - - ions = ["Fe 6+", "p", "He-4 0+", "triton", "alpha", "Ne +0"] @@ -515,12 +469,14 @@ def test_IonizationState_ionfracs_from_ion_input(ion): expected_ionic_fractions = np.zeros(ion_particle.atomic_number + 1) expected_ionic_fractions[ion_particle.charge_number] = 1.0 - if not np.allclose(expected_ionic_fractions, actual_ionic_fractions, atol=1e-16): - pytest.fail( - f"The returned ionic fraction for IonizationState({repr(ion)}) " - f"should have entirely been in the Z = {ion_particle.charge_number} " - f"level, but was instead: {ionization_state.ionic_fractions}." - ) + np.testing.assert_allclose( + expected_ionic_fractions, + actual_ionic_fractions, + atol=1e-16, + err_msg=f"The returned ionic fraction for IonizationState({repr(ion)}) " + f"should have entirely been in the Z = {ion_particle.integer_charge} " + f"level.", + ) @pytest.mark.parametrize("ion", ions) @@ -546,19 +502,6 @@ def test_IonizationState_base_particles_from_ion_input(ion): ) -@pytest.mark.parametrize("test", tests_for_exceptions.keys()) -def test_IonizationState_exceptions(test): - """ - Test that appropriate exceptions are raised for inappropriate inputs - to `IonizationState`. - """ - run_test( - IonizationState, - kwargs=tests_for_exceptions[test].inputs, - expected_outcome=tests_for_exceptions[test].expected_exception, - ) - - expected_properties = { "T_e": 5000.0 * u.K, "tol": 2e-14, diff --git a/plasmapy/particles/tests/test_nuclear.py b/plasmapy/particles/tests/test_nuclear.py index ca6f607e5b..f0bd68227c 100644 --- a/plasmapy/particles/tests/test_nuclear.py +++ b/plasmapy/particles/tests/test_nuclear.py @@ -3,6 +3,7 @@ from astropy import constants as const from astropy import units as u +from astropy.tests.helper import assert_quantity_allclose from plasmapy.particles.exceptions import InvalidParticleError, ParticleError from plasmapy.particles.nuclear import ( @@ -12,81 +13,6 @@ ) from plasmapy.utils.pytest_helpers import run_test, run_test_equivalent_calls -test_nuclear_table = [ - [nuclear_binding_energy, "p", {}, 0 * u.J], - [nuclear_binding_energy, "n", {}, 0 * u.J], - [nuclear_binding_energy, "p", {}, 0 * u.J], - [nuclear_binding_energy, "H", {}, ParticleError], - [nuclear_binding_energy, "He-99", {}, InvalidParticleError], - [nuclear_binding_energy, "He", {"mass_numb": 99}, InvalidParticleError], - [nuclear_binding_energy, 3.1415926535j, {}, TypeError], - [mass_energy, "e-", {}, (const.m_e * const.c ** 2).to(u.J)], - [mass_energy, "p+", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "H-1", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "H-1 0+", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "n", {}, (const.m_n * const.c ** 2).to(u.J)], - [nuclear_reaction_energy, (), {"reactants": ["n"], "products": 3}, TypeError], - [ - nuclear_reaction_energy, - (), - {"reactants": ["n"], "products": ["He-4"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["h"], "products": ["H-1"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["e-", "n"], "products": ["p+"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["e+", "n"], "products": ["p-"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["ksdf"], "products": ["H-3"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["H"], "products": ["H-1"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["p"], "products": ["n", "n", "e-"]}, - ParticleError, - ], - [nuclear_reaction_energy, "H + H --> H", {}, ParticleError], - [nuclear_reaction_energy, "H + H", {}, ParticleError], - [nuclear_reaction_energy, 1, {}, TypeError], - [nuclear_reaction_energy, "H-1 + H-1 --> H-1", {}, ParticleError], - [nuclear_reaction_energy, "p --> n", {}, ParticleError], - [ - nuclear_reaction_energy, - "p --> p", - {"reactants": "p", "products": "p"}, - ParticleError, - ], -] - - -@pytest.mark.parametrize("test_inputs", test_nuclear_table) -def test_nuclear(test_inputs): - run_test(*test_inputs, rtol=1e-3) - - test_nuclear_equivalent_calls_table = [ [nuclear_binding_energy, ["He-4", {}], ["alpha", {}], ["He", {"mass_numb": 4}]] ] @@ -178,3 +104,22 @@ def test_nuclear_reaction_energy(): expected = 17.6 * u.MeV actual = nuclear_reaction_energy(reactants=reactants, products=products) assert u.isclose(actual, expected, rtol=1e-3) + + +table_of_nuclear_tests = [ + [nuclear_binding_energy, ["p"], {}, 0 * u.J], + [nuclear_binding_energy, ["n"], {}, 0 * u.J], + [nuclear_binding_energy, ["p"], {}, 0 * u.J], + [mass_energy, ["e-"], {}, (const.m_e * const.c ** 2).to(u.J)], + [mass_energy, ["p+"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["H-1"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["H-1 0+"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["n"], {}, (const.m_n * const.c ** 2).to(u.J)], +] + + +@pytest.mark.parametrize( + ["tested_object", "args", "kwargs", "expected_value"], table_of_nuclear_tests +) +def test_nuclear_table(tested_object, args, kwargs, expected_value): + run_test(tested_object, args, kwargs, expected_value, rtol=1e-3) diff --git a/plasmapy/utils/pytest_helpers/pytest_helpers.py b/plasmapy/utils/pytest_helpers/pytest_helpers.py index 9c18930eb2..8eb2d75cb3 100644 --- a/plasmapy/utils/pytest_helpers/pytest_helpers.py +++ b/plasmapy/utils/pytest_helpers/pytest_helpers.py @@ -218,7 +218,7 @@ def test_func(inputs): if kwargs is None: kwargs = {} - if not isinstance(args, tuple): + if not type(args) in [tuple, list]: args = (args,) if not callable(func): diff --git a/plasmapy/utils/tests/test_roman.py b/plasmapy/utils/tests/test_roman.py index 963f6934f3..c661fd91f2 100644 --- a/plasmapy/utils/tests/test_roman.py +++ b/plasmapy/utils/tests/test_roman.py @@ -139,17 +139,16 @@ (np.int64(14), "XIV"), ] -toRoman_exceptions_table = [ - ("X", TypeError), - (-1, roman.OutOfRangeError), - (0, roman.OutOfRangeError), - (5000, roman.OutOfRangeError), -] - -fromRoman_exceptions_table = [ - ("asdfasd", roman.InvalidRomanNumeralError), - (1, TypeError), - ("xi", roman.InvalidRomanNumeralError), +exceptions_table = [ + (roman.to_roman, "X", TypeError), + (roman.to_roman, -1, roman.OutOfRangeError), + (roman.to_roman, 0, roman.OutOfRangeError), + (roman.to_roman, 5000, roman.OutOfRangeError), + (roman.from_roman, "asdfasd", roman.InvalidRomanNumeralError), + (roman.from_roman, 1, TypeError), + (roman.from_roman, "xi", roman.InvalidRomanNumeralError), + (roman.is_roman_numeral, 1, TypeError), # TODO: tbh I would just return False here? + (roman.is_roman_numeral, ("I", "II"), TypeError), ] @@ -157,50 +156,31 @@ def test_to_roman(integer, roman_numeral): """ Test that `~plasmapy.utils.roman.to_roman` correctly converts - integers to Roman numerals. + integers to Roman numerals, and that the inverse is true as well. """ run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral) - - -@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals) -def test_from_roman(integer, roman_numeral): - """ - Test that `~plasmapy.utils.roman.from_roman` correctly converts - Roman numerals to integers. - """ run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer)) -@pytest.mark.parametrize("input, expected_exception", toRoman_exceptions_table) -def test_to_roman_exceptions(input, expected_exception): - """ - Test that `~plasmapy.utils.roman.to_roman` raises the correct - exceptions when necessary. - """ - run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception) - - -@pytest.mark.parametrize("input, expected_exception", fromRoman_exceptions_table) -def test_from_roman_exceptions(input, expected_exception): +@pytest.mark.parametrize("function, argument, expected_exception", exceptions_table) +def test_to_roman_exceptions(function, argument, expected_exception): """ - Test that `~plasmapy.utils.roman.from_roman` raises the correct + Test that `~plasmapy.utils.roman` functions raise the correct exceptions when necessary. """ - run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception) + run_test(func=function, args=argument, expected_outcome=expected_exception) test_is_roman_numeral_table = [ ("I", True), ("i", False), ("CLXXXVIII", True), - (1, TypeError), ("khjfda", False), ("VIIII", False), ("IXX", False), - (("I", "II"), TypeError), ] -@pytest.mark.parametrize("input, expected", test_is_roman_numeral_table) -def test_is_roman_numeral(input, expected): - run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected) +@pytest.mark.parametrize("argument, expected", test_is_roman_numeral_table) +def test_is_roman_numeral(argument, expected): + run_test(func=roman.is_roman_numeral, args=argument, expected_outcome=expected)
graspologic-org__graspologic-428
update requirements to scipy>=1.4 Scipy 1.4 - has much faster linear assignment problem, making FAQ way faster - has MGC, which we eventually want for new nonpar, signal subgraph
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chu...
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chu...
diff --git a/requirements.txt b/requirements.txt index 55d70431f..68b2d9af3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ networkx>=2.1 numpy>=1.8.1 scikit-learn>=0.19.1 -scipy>=1.1.0 +scipy>=1.4.0 seaborn>=0.9.0 matplotlib>=3.0.0,<=3.3.0 hyppo>=0.1.2 diff --git a/setup.py b/setup.py index 7378a02c1..8f7696ef3 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ "networkx>=2.1", "numpy>=1.8.1", "scikit-learn>=0.19.1", - "scipy>=1.1.0", + "scipy>=1.4.0", "seaborn>=0.9.0", "matplotlib>=3.0.0", "hyppo>=0.1.3",
quantopian__zipline-1625
conflicting CLI flags for `clean` Dear Zipline Maintainers, Before I tell you about my issue, let me describe my environment: # Environment * Operating System: `OS X 10.12.1` * Python Version: `2.7` * Python Bitness: `64` * How did you install Zipline: `conda` * Python packages: _default_ Now that you know a little about me, let me tell you about the issue I am having: # Description of Issue The CLI command `zipline clean` has conflicting flags for `-b, --bundle` and `-b, --before`. ``` (zipline-venv) pgeez$ zipline clean --help Usage: zipline clean [OPTIONS] Clean up data downloaded with the ingest command. Options: -b, --bundle BUNDLE-NAME The data bundle to clean. [default: quantopian- quandl] -b, --before TIMESTAMP Clear all data before TIMESTAMP. This may not be passed with -k / --keep-last -a, --after TIMESTAMP Clear all data after TIMESTAMP This may not be passed with -k / --keep-last -k, --keep-last N Clear all but the last N downloads. This may not be passed with -b / --before or -a / --after --help Show this message and exit. ``` * What happened instead? Because of the conflict, `-b` defaults to `--before`. Sincerely, pgeez
[ { "content": "import errno\nimport os\nfrom functools import wraps\n\nimport click\nimport logbook\nimport pandas as pd\nfrom six import text_type\n\nfrom zipline.data import bundles as bundles_module\nfrom zipline.utils.cli import Date, Timestamp\nfrom zipline.utils.run_algo import _run, load_extensions\n\ntry...
[ { "content": "import errno\nimport os\nfrom functools import wraps\n\nimport click\nimport logbook\nimport pandas as pd\nfrom six import text_type\n\nfrom zipline.data import bundles as bundles_module\nfrom zipline.utils.cli import Date, Timestamp\nfrom zipline.utils.run_algo import _run, load_extensions\n\ntry...
diff --git a/docs/source/whatsnew/1.0.3.txt b/docs/source/whatsnew/1.0.3.txt index fb0cac6f05..e21f02a7ac 100644 --- a/docs/source/whatsnew/1.0.3.txt +++ b/docs/source/whatsnew/1.0.3.txt @@ -56,4 +56,6 @@ None Miscellaneous ~~~~~~~~~~~~~ -None +* Changed the short-opt for ``--before`` in the ``zipline clean`` + entrypoint. The new argument is ``-e``. The old argument, ``-b``, conflicted + with the ``--bundle`` short-opt (:issue:`1625`). diff --git a/zipline/__main__.py b/zipline/__main__.py index 8e719690dc..b42a90ce52 100644 --- a/zipline/__main__.py +++ b/zipline/__main__.py @@ -323,7 +323,7 @@ def ingest(bundle, assets_version, show_progress): help='The data bundle to clean.', ) @click.option( - '-b', + '-e', '--before', type=Timestamp(), help='Clear all data before TIMESTAMP.'
safe-global__safe-config-service-76
Serve static files with Nginx When running the application with Nginx as reverse-proxy, static files (such as Admin CSS) are not correctly collected and served
[ { "content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djang...
[ { "content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djang...
diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index b551593e..bacaa58b 100644 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -2,6 +2,11 @@ set -euo pipefail +echo "==> $(date +%H:%M:%S) ==> Collecting static files..." +python src/manage.py collectstatic --noinput +rm -rf ${DOCKER_NGINX_VOLUME_ROOT}/* +cp -r staticfiles/ ${DOCKER_NGINX_VOLUME_ROOT}/ + echo "==> $(date +%H:%M:%S) ==> Migrating Django models..." python src/manage.py migrate --noinput diff --git a/src/config/settings.py b/src/config/settings.py index aa3da790..ece8cb88 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -171,6 +171,8 @@ STATIC_URL = "/static/" +STATIC_ROOT = "staticfiles" + # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
modin-project__modin-3440
Remove inheritance of Modin DMatrix from xgb.DMatrix Inheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ...
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ...
diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py index 4d569ce5158..769c68b1b39 100644 --- a/modin/experimental/xgboost/xgboost.py +++ b/modin/experimental/xgboost/xgboost.py @@ -25,7 +25,7 @@ LOGGER = logging.getLogger("[modin.xgboost]") -class DMatrix(xgb.DMatrix): +class DMatrix: """ DMatrix holds references to partitions of Modin DataFrame.
facebookresearch__habitat-lab-347
DD-PPO does not all reduce gradients ## 🐛 Bug DD-PPO does not all reduce gradients during the backward call, because `reducer.prepare_for_backward` is not being called during training process. The problem is in this line: https://github.com/facebookresearch/habitat-api/blob/v0.1.4/habitat_baselines/rl/ddppo/algo/ddppo.py#L96 ``` class DecentralizedDistributedMixin: ... def before_backward(self, loss): # ... self.reducer.prepare_for_backward(..) # Mixin goes second that way the PPO __init__ will still be called class DDPPO(PPO, DecentralizedDistributedMixin): # Here PPO and Mixin both have "before_backward" method, # DDPPO will call PPO's not the Mixin's. pass ``` And here is a quick fix: ``` class DecentralizedDistributedMixin: ... # Mixin goes second that way the PPO __init__ will still be called class DDPPO(PPO, DecentralizedDistributedMixin): # Move before_backward to DDPPO def before_backward(self, loss): # ... self.reducer.prepare_for_backward(..) ```
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nimport torch.distributed as distrib\n\nfrom habita...
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nimport torch.distributed as distrib\n\nfrom habita...
diff --git a/habitat_baselines/rl/ddppo/algo/ddppo.py b/habitat_baselines/rl/ddppo/algo/ddppo.py index 4da9e8d6c3..9eb0c581be 100644 --- a/habitat_baselines/rl/ddppo/algo/ddppo.py +++ b/habitat_baselines/rl/ddppo/algo/ddppo.py @@ -92,6 +92,5 @@ def before_backward(self, loss): self.reducer.prepare_for_backward([]) -# Mixin goes second that way the PPO __init__ will still be called -class DDPPO(PPO, DecentralizedDistributedMixin): +class DDPPO(DecentralizedDistributedMixin, PPO): pass
translate__pootle-5666
Silence dev checks in the admin UI as well There's the following section in the Pootle dev config: ``` Python # Silence the DEBUG check on dev servers SILENCED_SYSTEM_CHECKS = [ 'pootle.W004', # python-levenstein not installed 'pootle.W005', # DEBUG = True 'pootle.W010', # DEFAULT_FROM_EMAIL has default setting 'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting ] ``` When running Pootle, I still get these notifications in the admin UI among others: > `/!\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings > > `/!\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form. > > `/!\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings. I think it might make sense to silence them consistently not only in the console, but in the admin UI as well.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\...
diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py index e555c96df0d..7e501c6b23e 100644 --- a/pootle/apps/pootle_app/views/admin/dashboard.py +++ b/pootle/apps/pootle_app/views/admin/dashboard.py @@ -82,7 +82,7 @@ def rq_stats(): def checks(): from django.core.checks.registry import registry - return registry.run_checks() + return [e for e in registry.run_checks() if not e.is_silenced()] @admin_required
nilearn__nilearn-4306
Tests failing on main with pytest 8.1.0 See https://github.com/nilearn/nilearn/actions/runs/8136733065/job/22233621361 ``` test_plotting: install_deps> python -I -m pip install kaleido 'kaleido; platform_system != "Windows"' 'kaleido==0.1.0.post1; platform_system == "Windows"' 'matplotlib>=3.3.0' plotly .pkg: install_requires> python -I -m pip install hatch-vcs hatchling .pkg: _optional_hooks> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build .pkg: get_requires_for_build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build .pkg: freeze> python -m pip freeze --all .pkg: editables==0.5,hatch-vcs==0.4.0,hatchling==1.21.1,packaging==23.2,pathspec==0.12.1,pip==24.0,pluggy==1.4.0,setuptools==69.1.1,setuptools-scm==8.0.4,trove-classifiers==2024.3.3,typing_extensions==4.10.0 .pkg: build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build test_plotting: install_package_deps> python -I -m pip install coverage 'joblib>=1.0.0' lxml 'nibabel>=4.0.0' 'numpy>=1.19.0' packaging 'pandas>=1.1.5' pytest-cov 'pytest>=6.0.0' 'requests>=2.25.0' 'scikit-learn>=1.0.0' 'scipy>=1.8.0' test_plotting: install_package> python -I -m pip install --force-reinstall --no-deps /Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1+gf91de22.tar.gz test_plotting: freeze> python -m pip freeze --all test_plotting: certifi==2024.2.2,charset-normalizer==3.3.2,contourpy==1.2.0,coverage==7.4.3,cycler==0.12.1,fonttools==4.49.0,idna==3.6,iniconfig==2.0.0,joblib==1.3.2,kaleido==0.2.1,kiwisolver==1.4.5,lxml==5.1.0,matplotlib==3.8.3,nibabel==5.2.1,nilearn @ file:///Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1%2Bgf91de22.tar.gz#sha256=b29f617bbb5d9aa3a94aa7518f1006aea9f52d58d945a8b82ed6951bffccb22e,numpy==1.26.4,packaging==23.2,pandas==2.2.1,pillow==10.2.0,pip==24.0,plotly==5.19.0,pluggy==1.4.0,pyparsing==3.1.1,pytest==8.1.0,pytest-cov==4.1.0,python-dateutil==2.9.0.post0,pytz==2024.1,requests==2.31.0,scikit-learn==1.4.1.post1,scipy==1.12.0,six==1.16.0,tenacity==8.2.3,threadpoolctl==3.3.0,tzdata==2024.1,urllib3==2.2.1 test_plotting: commands[0]> pytest --cov=nilearn --cov-report=xml nilearn ============================= test session starts ============================== platform darwin -- Python 3.12.1, pytest-8.1.0, pluggy-1.4.0 -- /Users/runner/work/nilearn/nilearn/.tox/test_plotting/bin/python cachedir: .tox/test_plotting/.pytest_cache rootdir: /Users/runner/work/nilearn/nilearn configfile: pyproject.toml plugins: cov-4.1.0 collecting ... collected 3118 items / 1 error ==================================== ERRORS ==================================== ______________________ ERROR collecting nilearn/externals ______________________ .tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:167: in register self._verify_hook(hook, hookimpl) hook = <HookCaller 'pytest_ignore_collect'> hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>> hookimpl_opts = {'hookwrapper': False, 'optionalhook': False, 'specname': None, 'tryfirst': False, 'trylast': False, 'wrapper': False} method = <function pytest_ignore_collect at 0x12f044680> name = 'pytest_ignore_collect' plugin = <module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'> plugin_name = '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' self = <_pytest.config.PytestPluginManager object at 0x10c395130> .tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:342: in _verify_hook raise PluginValidationError( E pluggy._manager.PluginValidationError: Plugin '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' for hook 'pytest_ignore_collect' E hookimpl definition: pytest_ignore_collect(path, config) E Argument(s) {'path'} are declared in the hookimpl but can not be found in the hookspec hook = <HookCaller 'pytest_ignore_collect'> hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>> notinspec = {'path'} self = <_pytest.config.PytestPluginManager object at 0x10c395130> =============================== warnings summary =============================== nilearn/input_data/__init__.py:23 /Users/runner/work/nilearn/nilearn/nilearn/input_data/__init__.py:23: DeprecationWarning: The import path 'nilearn.input_data' is deprecated in version 0.9. Importing from 'nilearn.input_data' will be possible at least until release 0.13.0. Please import from 'nilearn.maskers' instead. warnings.warn(message, DeprecationWarning) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ---------- coverage: platform darwin, python 3.12.1-final-0 ---------- Coverage XML written to file coverage.xml =========================== short test summary info ============================ ERROR nilearn/externals - pluggy._manager.PluginValidationError: Plugin '/Use... !!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!! ========================= 1 warning, 1 error in 17.43s ========================= test_plotting: exit 2 (22.26 seconds) /Users/runner/work/nilearn/nilearn> pytest --cov=nilearn --cov-report=xml nilearn pid=7328 .pkg: _exit> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build test_plotting: FAIL code 2 (102.15=setup[79.89]+cmd[22.26] seconds) evaluation failed :( (102.25 seconds) ``` The tests run fine locally: - with pytest<8.1.0 - OR removing the conftest in nilearn/nilearn/externals/conftest.py (which suggest to ignore collecting tests, though there are no tests in that subfolder - EDIT: tests are in the __init__.py file).
[ { "content": "# Do not collect any tests in externals. This is more robust than using\n# --ignore because --ignore needs a path and it is not convenient to pass in\n# the externals path (very long install-dependent path in site-packages) when\n# using --pyargs\n\n\ndef pytest_ignore_collect(path, config):\n ...
[ { "content": null, "path": "nilearn/externals/conftest.py" } ]
diff --git a/nilearn/externals/conftest.py b/nilearn/externals/conftest.py deleted file mode 100644 index f3bb9d9e9a..0000000000 --- a/nilearn/externals/conftest.py +++ /dev/null @@ -1,8 +0,0 @@ -# Do not collect any tests in externals. This is more robust than using -# --ignore because --ignore needs a path and it is not convenient to pass in -# the externals path (very long install-dependent path in site-packages) when -# using --pyargs - - -def pytest_ignore_collect(path, config): - return True diff --git a/pyproject.toml b/pyproject.toml index b29619fe8d..3259956486 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,4 +159,5 @@ addopts = "-ra --strict-config --strict-markers --doctest-modules --showlocals - doctest_optionflags = "NORMALIZE_WHITESPACE ELLIPSIS" junit_family = "xunit2" minversion = "6.0" +norecursedirs = "tempita" xfail_strict = true
translate__pootle-5820
Adding a new languages forces you to specify special characters If you add a new language you are blocked with the UI insisting that you add special characters. ![screen shot 2017-01-16 at 16 01 52](https://cloud.githubusercontent.com/assets/647438/21990281/17dee6e4-dc06-11e6-910a-4b58c6c45192.png) The part in red is Afrikaans and say "This field is required" Not all languages require special characters. Also the model allows `blank=True` So somewhere we're blocking.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimp...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimp...
diff --git a/pootle/apps/pootle_app/forms.py b/pootle/apps/pootle_app/forms.py index 87f94710d57..f538ea19836 100644 --- a/pootle/apps/pootle_app/forms.py +++ b/pootle/apps/pootle_app/forms.py @@ -25,7 +25,7 @@ class LanguageForm(forms.ModelForm): - specialchars = forms.CharField(strip=False) + specialchars = forms.CharField(strip=False, required=False) class Meta(object): model = Language diff --git a/tests/forms/language.py b/tests/forms/language.py index ebc47795b27..5dcbd5dc4af 100644 --- a/tests/forms/language.py +++ b/tests/forms/language.py @@ -12,6 +12,7 @@ @pytest.mark.parametrize('specialchars', [ + ' ', ' abcde ', ' ab cd', ' abcde', @@ -52,3 +53,18 @@ def test_clean_specialchars_unique(specialchars, count_char): form = LanguageForm(form_data) assert form.is_valid() assert form.cleaned_data['specialchars'].count(count_char) == 1 + + +@pytest.mark.django_db +def test_specialchars_can_be_blank(): + """Test that a blank special character field is valid.""" + form_data = { + 'code': 'foo', + 'fullname': 'Foo', + 'checkstyle': 'foo', + 'nplurals': '2', + 'specialchars': '', + } + form = LanguageForm(form_data) + assert form.is_valid() + assert form.cleaned_data['specialchars'] == ''
getmoto__moto-1801
Botocore sub-dependency mismatch Running `pipenv install moto` results in: ``` Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies. You can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation. Hint: try $ pipenv lock --pre if it is a pre-release dependency. Could not find a version that matches botocore<1.11,<1.12.0,<2.0.0,==1.10.84,>=1.11.3,>=1.3.0,>=1.9.16 Tried: 0.4.1, 0.4.2, 0.5.0, 0.5.1, 0.5.2, 0.5.3, 0.5.4, 0.6.0, 0.7.0, 0.8.0, 0.8.1, 0.8.2, 0.8.3, 0.9.0, 0.9.1, 0.9.2, 0.10.0, 0.11.0, 0.12.0, 0.13.0, 0.13.1, 0.14.0, 0.15.0, 0.15.1, 0.16.0, 0.17.0, 0.18.0, 0.19.0, 0.20.0, 0.21.0, 0.22.0, 0.23.0, 0.24.0, 0.25.0, 0.26.0, 0.27.0, 0.28.0, 0.29.0, 0.30.0, 0.31.0, 0.32.0, 0.33.0, 0.34.0, 0.35.0, 0.36.0, 0.37.0, 0.38.0, 0.39.0, 0.40.0, 0.41.0, 0.42.0, 0.43.0, 0.44.0, 0.45.0, 0.46.0, 0.47.0, 0.48.0, 0.49.0, 0.50.0, 0.51.0, 0.52.0, 0.53.0, 0.54.0, 0.55.0, 0.56.0, 0.57.0, 0.58.0, 0.59.0, 0.60.0, 0.61.0, 0.62.0, 0.63.0, 0.64.0, 0.65.0, 0.66.0, 0.67.0, 0.68.0, 0.69.0, 0.70.0, 0.71.0, 0.72.0, 0.73.0, 0.74.0, 0.75.0, 0.76.0, 0.77.0, 0.78.0, 0.79.0, 0.80.0, 0.81.0, 0.82.0, 0.83.0, 0.84.0, 0.85.0, 0.86.0, 0.87.0, 0.88.0, 0.89.0, 0.90.0, 0.91.0, 0.92.0, 0.93.0, 0.94.0, 0.95.0, 0.96.0, 0.97.0, 0.98.0, 0.99.0, 0.100.0, 0.101.0, 0.102.0, 0.103.0, 0.104.0, 0.105.0, 0.106.0, 0.107.0, 0.108.0, 0.109.0, 1.0.0, 1.0.0, 1.0.1, 1.0.1, 1.1.0, 1.1.0, 1.1.1, 1.1.1, 1.1.2, 1.1.2, 1.1.3, 1.1.3, 1.1.4, 1.1.4, 1.1.5, 1.1.5, 1.1.6, 1.1.6, 1.1.7, 1.1.7, 1.1.8, 1.1.8, 1.1.9, 1.1.9, 1.1.10, 1.1.10, 1.1.11, 1.1.11, 1.1.12, 1.1.12, 1.2.0, 1.2.0, 1.2.1, 1.2.1, 1.2.2, 1.2.2, 1.2.3, 1.2.3, 1.2.4, 1.2.4, 1.2.5, 1.2.5, 1.2.6, 1.2.6, 1.2.7, 1.2.7, 1.2.8, 1.2.8, 1.2.9, 1.2.9, 1.2.10, 1.2.10, 1.2.11, 1.2.11, 1.3.0, 1.3.0, 1.3.1, 1.3.1, 1.3.2, 1.3.2, 1.3.3, 1.3.3, 1.3.4, 1.3.4, 1.3.5, 1.3.5, 1.3.6, 1.3.6, 1.3.7, 1.3.7, 1.3.8, 1.3.8, 1.3.9, 1.3.9, 1.3.10, 1.3.10, 1.3.11, 1.3.11, 1.3.12, 1.3.12, 1.3.13, 1.3.13, 1.3.14, 1.3.14, 1.3.15, 1.3.15, 1.3.16, 1.3.16, 1.3.17, 1.3.17, 1.3.18, 1.3.18, 1.3.19, 1.3.19, 1.3.20, 1.3.20, 1.3.21, 1.3.21, 1.3.22, 1.3.22, 1.3.23, 1.3.23, 1.3.24, 1.3.24, 1.3.25, 1.3.25, 1.3.26, 1.3.26, 1.3.27, 1.3.27, 1.3.28, 1.3.28, 1.3.29, 1.3.29, 1.3.30, 1.3.30, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.4.4, 1.4.4, 1.4.5, 1.4.5, 1.4.6, 1.4.6, 1.4.7, 1.4.7, 1.4.8, 1.4.8, 1.4.9, 1.4.9, 1.4.10, 1.4.10, 1.4.11, 1.4.11, 1.4.12, 1.4.12, 1.4.13, 1.4.13, 1.4.14, 1.4.14, 1.4.15, 1.4.15, 1.4.16, 1.4.16, 1.4.17, 1.4.17, 1.4.18, 1.4.18, 1.4.19, 1.4.19, 1.4.20, 1.4.20, 1.4.21, 1.4.21, 1.4.22, 1.4.22, 1.4.23, 1.4.23, 1.4.24, 1.4.24, 1.4.25, 1.4.25, 1.4.26, 1.4.26, 1.4.27, 1.4.27, 1.4.28, 1.4.28, 1.4.29, 1.4.29, 1.4.30, 1.4.30, 1.4.31, 1.4.31, 1.4.32, 1.4.32, 1.4.33, 1.4.33, 1.4.34, 1.4.34, 1.4.35, 1.4.35, 1.4.36, 1.4.36, 1.4.37, 1.4.37, 1.4.38, 1.4.38, 1.4.39, 1.4.39, 1.4.40, 1.4.40, 1.4.41, 1.4.41, 1.4.42, 1.4.42, 1.4.43, 1.4.43, 1.4.44, 1.4.44, 1.4.46, 1.4.46, 1.4.47, 1.4.47, 1.4.48, 1.4.48, 1.4.49, 1.4.49, 1.4.50, 1.4.50, 1.4.51, 1.4.51, 1.4.52, 1.4.52, 1.4.53, 1.4.53, 1.4.54, 1.4.54, 1.4.55, 1.4.55, 1.4.56, 1.4.56, 1.4.57, 1.4.57, 1.4.58, 1.4.58, 1.4.59, 1.4.59, 1.4.60, 1.4.60, 1.4.61, 1.4.61, 1.4.62, 1.4.62, 1.4.63, 1.4.63, 1.4.64, 1.4.64, 1.4.65, 1.4.65, 1.4.66, 1.4.66, 1.4.67, 1.4.67, 1.4.68, 1.4.68, 1.4.69, 1.4.69, 1.4.70, 1.4.70, 1.4.71, 1.4.71, 1.4.72, 1.4.72, 1.4.73, 1.4.73, 1.4.74, 1.4.74, 1.4.75, 1.4.75, 1.4.76, 1.4.76, 1.4.77, 1.4.77, 1.4.78, 1.4.78, 1.4.79, 1.4.79, 1.4.80, 1.4.80, 1.4.81, 1.4.81, 1.4.82, 1.4.82, 1.4.83, 1.4.83, 1.4.84, 1.4.84, 1.4.85, 1.4.85, 1.4.86, 1.4.86, 1.4.87, 1.4.87, 1.4.88, 1.4.88, 1.4.89, 1.4.89, 1.4.90, 1.4.90, 1.4.91, 1.4.91, 1.4.92, 1.4.92, 1.4.93, 1.4.93, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.5.2, 1.5.2, 1.5.3, 1.5.3, 1.5.4, 1.5.4, 1.5.5, 1.5.5, 1.5.6, 1.5.6, 1.5.7, 1.5.7, 1.5.8, 1.5.8, 1.5.9, 1.5.9, 1.5.10, 1.5.10, 1.5.11, 1.5.11, 1.5.12, 1.5.12, 1.5.13, 1.5.13, 1.5.14, 1.5.14, 1.5.15, 1.5.15, 1.5.16, 1.5.16, 1.5.17, 1.5.17, 1.5.18, 1.5.18, 1.5.19, 1.5.19, 1.5.20, 1.5.20, 1.5.21, 1.5.21, 1.5.22, 1.5.22, 1.5.23, 1.5.23, 1.5.24, 1.5.24, 1.5.25, 1.5.25, 1.5.26, 1.5.26, 1.5.27, 1.5.27, 1.5.28, 1.5.28, 1.5.29, 1.5.29, 1.5.30, 1.5.30, 1.5.31, 1.5.31, 1.5.32, 1.5.32, 1.5.33, 1.5.33, 1.5.34, 1.5.34, 1.5.35, 1.5.35, 1.5.36, 1.5.36, 1.5.37, 1.5.37, 1.5.38, 1.5.38, 1.5.39, 1.5.39, 1.5.40, 1.5.40, 1.5.41, 1.5.41, 1.5.42, 1.5.42, 1.5.43, 1.5.43, 1.5.44, 1.5.44, 1.5.45, 1.5.45, 1.5.46, 1.5.46, 1.5.47, 1.5.47, 1.5.48, 1.5.48, 1.5.49, 1.5.49, 1.5.50, 1.5.50, 1.5.51, 1.5.51, 1.5.52, 1.5.52, 1.5.53, 1.5.53, 1.5.54, 1.5.54, 1.5.55, 1.5.55, 1.5.56, 1.5.56, 1.5.57, 1.5.57, 1.5.58, 1.5.58, 1.5.59, 1.5.59, 1.5.60, 1.5.60, 1.5.61, 1.5.61, 1.5.62, 1.5.62, 1.5.63, 1.5.63, 1.5.64, 1.5.64, 1.5.65, 1.5.65, 1.5.66, 1.5.66, 1.5.67, 1.5.67, 1.5.68, 1.5.68, 1.5.69, 1.5.69, 1.5.70, 1.5.70, 1.5.71, 1.5.71, 1.5.72, 1.5.72, 1.5.73, 1.5.73, 1.5.74, 1.5.74, 1.5.75, 1.5.75, 1.5.76, 1.5.76, 1.5.77, 1.5.77, 1.5.78, 1.5.78, 1.5.79, 1.5.79, 1.5.80, 1.5.80, 1.5.81, 1.5.81, 1.5.82, 1.5.82, 1.5.83, 1.5.83, 1.5.84, 1.5.84, 1.5.85, 1.5.85, 1.5.86, 1.5.86, 1.5.87, 1.5.87, 1.5.88, 1.5.88, 1.5.89, 1.5.89, 1.5.90, 1.5.90, 1.5.91, 1.5.91, 1.5.92, 1.5.92, 1.5.93, 1.5.93, 1.5.94, 1.5.94, 1.5.95, 1.5.95, 1.6.0, 1.6.0, 1.6.1, 1.6.1, 1.6.2, 1.6.2, 1.6.3, 1.6.3, 1.6.4, 1.6.4, 1.6.5, 1.6.5, 1.6.6, 1.6.6, 1.6.7, 1.6.7, 1.6.8, 1.6.8, 1.7.0, 1.7.0, 1.7.1, 1.7.1, 1.7.2, 1.7.2, 1.7.3, 1.7.3, 1.7.4, 1.7.4, 1.7.5, 1.7.5, 1.7.6, 1.7.6, 1.7.7, 1.7.7, 1.7.8, 1.7.8, 1.7.9, 1.7.9, 1.7.10, 1.7.10, 1.7.11, 1.7.11, 1.7.12, 1.7.12, 1.7.13, 1.7.13, 1.7.14, 1.7.14, 1.7.15, 1.7.15, 1.7.16, 1.7.16, 1.7.17, 1.7.17, 1.7.18, 1.7.18, 1.7.19, 1.7.19, 1.7.20, 1.7.20, 1.7.21, 1.7.21, 1.7.22, 1.7.22, 1.7.23, 1.7.23, 1.7.24, 1.7.24, 1.7.25, 1.7.25, 1.7.26, 1.7.26, 1.7.27, 1.7.27, 1.7.28, 1.7.28, 1.7.29, 1.7.29, 1.7.30, 1.7.30, 1.7.31, 1.7.31, 1.7.32, 1.7.32, 1.7.33, 1.7.33, 1.7.34, 1.7.34, 1.7.35, 1.7.35, 1.7.36, 1.7.36, 1.7.37, 1.7.37, 1.7.38, 1.7.38, 1.7.39, 1.7.39, 1.7.40, 1.7.40, 1.7.41, 1.7.41, 1.7.42, 1.7.42, 1.7.43, 1.7.43, 1.7.44, 1.7.44, 1.7.45, 1.7.45, 1.7.46, 1.7.46, 1.7.47, 1.7.47, 1.7.48, 1.7.48, 1.8.0, 1.8.0, 1.8.1, 1.8.1, 1.8.2, 1.8.2, 1.8.3, 1.8.3, 1.8.4, 1.8.4, 1.8.5, 1.8.5, 1.8.6, 1.8.6, 1.8.7, 1.8.7, 1.8.8, 1.8.8, 1.8.9, 1.8.9, 1.8.10, 1.8.10, 1.8.11, 1.8.11, 1.8.12, 1.8.12, 1.8.13, 1.8.13, 1.8.14, 1.8.14, 1.8.15, 1.8.15, 1.8.16, 1.8.16, 1.8.17, 1.8.17, 1.8.18, 1.8.18, 1.8.19, 1.8.19, 1.8.20, 1.8.20, 1.8.21, 1.8.21, 1.8.22, 1.8.22, 1.8.23, 1.8.23, 1.8.24, 1.8.24, 1.8.25, 1.8.25, 1.8.26, 1.8.26, 1.8.27, 1.8.27, 1.8.28, 1.8.28, 1.8.29, 1.8.29, 1.8.30, 1.8.30, 1.8.31, 1.8.31, 1.8.32, 1.8.32, 1.8.33, 1.8.33, 1.8.34, 1.8.34, 1.8.35, 1.8.35, 1.8.36, 1.8.36, 1.8.37, 1.8.37, 1.8.38, 1.8.38, 1.8.39, 1.8.39, 1.8.40, 1.8.40, 1.8.41, 1.8.41, 1.8.42, 1.8.42, 1.8.43, 1.8.43, 1.8.44, 1.8.44, 1.8.45, 1.8.45, 1.8.46, 1.8.46, 1.8.47, 1.8.47, 1.8.48, 1.8.48, 1.8.49, 1.8.49, 1.8.50, 1.8.50, 1.9.0, 1.9.0, 1.9.1, 1.9.1, 1.9.2, 1.9.2, 1.9.3, 1.9.3, 1.9.4, 1.9.4, 1.9.5, 1.9.5, 1.9.6, 1.9.6, 1.9.7, 1.9.7, 1.9.8, 1.9.8, 1.9.9, 1.9.9, 1.9.10, 1.9.10, 1.9.11, 1.9.11, 1.9.12, 1.9.12, 1.9.13, 1.9.13, 1.9.14, 1.9.14, 1.9.15, 1.9.15, 1.9.16, 1.9.16, 1.9.17, 1.9.17, 1.9.18, 1.9.18, 1.9.19, 1.9.19, 1.9.20, 1.9.20, 1.9.21, 1.9.21, 1.9.22, 1.9.22, 1.9.23, 1.9.23, 1.10.0, 1.10.0, 1.10.1, 1.10.1, 1.10.2, 1.10.2, 1.10.3, 1.10.3, 1.10.4, 1.10.4, 1.10.5, 1.10.5, 1.10.6, 1.10.6, 1.10.7, 1.10.7, 1.10.8, 1.10.8, 1.10.9, 1.10.9, 1.10.10, 1.10.10, 1.10.11, 1.10.11, 1.10.12, 1.10.12, 1.10.13, 1.10.13, 1.10.14, 1.10.14, 1.10.15, 1.10.15, 1.10.16, 1.10.16, 1.10.17, 1.10.17, 1.10.18, 1.10.18, 1.10.19, 1.10.19, 1.10.20, 1.10.20, 1.10.21, 1.10.21, 1.10.22, 1.10.22, 1.10.23, 1.10.23, 1.10.24, 1.10.24, 1.10.25, 1.10.25, 1.10.26, 1.10.26, 1.10.27, 1.10.27, 1.10.28, 1.10.28, 1.10.29, 1.10.29, 1.10.30, 1.10.30, 1.10.31, 1.10.31, 1.10.32, 1.10.32, 1.10.33, 1.10.33, 1.10.34, 1.10.34, 1.10.35, 1.10.35, 1.10.36, 1.10.36, 1.10.37, 1.10.37, 1.10.38, 1.10.38, 1.10.39, 1.10.39, 1.10.40, 1.10.40, 1.10.41, 1.10.41, 1.10.42, 1.10.42, 1.10.43, 1.10.43, 1.10.44, 1.10.44, 1.10.45, 1.10.45, 1.10.46, 1.10.46, 1.10.47, 1.10.47, 1.10.48, 1.10.48, 1.10.49, 1.10.49, 1.10.50, 1.10.50, 1.10.51, 1.10.51, 1.10.52, 1.10.52, 1.10.53, 1.10.53, 1.10.54, 1.10.54, 1.10.55, 1.10.55, 1.10.56, 1.10.56, 1.10.57, 1.10.57, 1.10.58, 1.10.58, 1.10.59, 1.10.59, 1.10.60, 1.10.60, 1.10.61, 1.10.61, 1.10.62, 1.10.62, 1.10.63, 1.10.63, 1.10.64, 1.10.64, 1.10.65, 1.10.65, 1.10.66, 1.10.66, 1.10.67, 1.10.67, 1.10.68, 1.10.68, 1.10.69, 1.10.69, 1.10.70, 1.10.70, 1.10.71, 1.10.71, 1.10.72, 1.10.72, 1.10.73, 1.10.73, 1.10.74, 1.10.74, 1.10.75, 1.10.75, 1.10.76, 1.10.76, 1.10.77, 1.10.77, 1.10.78, 1.10.78, 1.10.79, 1.10.79, 1.10.80, 1.10.80, 1.10.81, 1.10.81, 1.10.82, 1.10.82, 1.10.83, 1.10.83, 1.10.84, 1.10.84, 1.11.0, 1.11.0, 1.11.1, 1.11.1, 1.11.2, 1.11.2, 1.11.3, 1.11.3 There are incompatible versions in the resolved dependencies. ``` This is due to the fact that [this line in moto](https://github.com/spulec/moto/blob/master/setup.py#L12) requires less than version 1.11 of botocore, and [this line in boto3](https://github.com/boto/boto3/blob/develop/setup.py#L17) requires greater than or equal to 1.11.3. This is installing moto 1.3.5, the expectation is no warning for mismatches in dependencies.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.9.16,<1.11\",\n \"cookies\",\n \"cryp...
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cookies\",\n \...
diff --git a/setup.py b/setup.py index 16aaf145294f..bcb48a967142 100755 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ install_requires = [ "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.6.16", + "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", "cookies", "cryptography>=2.0.0",
geopandas__geopandas-372
bbox filter from read_file doesn't take advantage of fiona filtering In line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28 The function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same. Line 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.
[ { "content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative pat...
[ { "content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative pat...
diff --git a/geopandas/io/file.py b/geopandas/io/file.py index d407615347..1ac2f88635 100644 --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -25,7 +25,7 @@ def read_file(filename, **kwargs): f_filt = f.filter(bbox=bbox) else: f_filt = f - gdf = GeoDataFrame.from_features(f, crs=crs) + gdf = GeoDataFrame.from_features(f_filt, crs=crs) return gdf diff --git a/geopandas/io/tests/test_io.py b/geopandas/io/tests/test_io.py index e8e87f9d73..c6a1cf5e94 100644 --- a/geopandas/io/tests/test_io.py +++ b/geopandas/io/tests/test_io.py @@ -54,3 +54,15 @@ def test_read_file(self): df = self.df.rename(columns=lambda x: x.lower()) validate_boro_df(self, df) self.assert_(df.crs == self.crs) + + def test_filtered_read_file(self): + full_df_shape = self.df.shape + nybb_filename, nybb_zip_path = download_nybb() + vfs = 'zip://' + nybb_filename + bbox = (1031051.7879884212, 224272.49231459625, 1047224.3104931959, 244317.30894023244) + filtered_df = read_file(nybb_zip_path, vfs=vfs, bbox=bbox) + filtered_df_shape = filtered_df.shape + assert(full_df_shape != filtered_df_shape) + assert(filtered_df_shape == (2, 5)) + +
ibis-project__ibis-8364
bug: `Scalar.isin(Column)` returns a Column, not a Scalar ### What happened? ```python import ibis needle = ibis.literal(2) haystack = ibis.memtable({"x": [1, 2, 3]}).x type(needle.isin(haystack)) # ibis.expr.types.logical.BooleanColumn ``` ### What version of ibis are you using? main ### What backend(s) are you using, if any? _No response_ ### Relevant log output _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core imp...
[ { "content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core imp...
diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py index 76f3cd8a9cc6..229c5bb56c61 100644 --- a/ibis/expr/operations/subqueries.py +++ b/ibis/expr/operations/subqueries.py @@ -53,7 +53,7 @@ class InSubquery(Subquery): needle: Value dtype = dt.boolean - shape = ds.columnar + shape = rlz.shape_like("needle") def __init__(self, rel, needle): if len(rel.schema) != 1: diff --git a/ibis/tests/expr/test_value_exprs.py b/ibis/tests/expr/test_value_exprs.py index ebb86c7a4d11..aec143083ff5 100644 --- a/ibis/tests/expr/test_value_exprs.py +++ b/ibis/tests/expr/test_value_exprs.py @@ -1713,3 +1713,13 @@ def test_deferred_doesnt_convert_callables(): b=t.b.split(",").filter(lambda pp: ~pp.isin(("word1", "word2"))) ) assert expr.equals(expected) + + +def test_in_subquery_shape(): + t = ibis.table([("a", "int64"), ("b", "string")]) + + expr = t.a.cast("string").isin(t.b) + assert expr.op().shape.is_columnar() + + expr = ibis.literal(2).isin(t.a) + assert expr.op().shape.is_scalar()
mlcommons__GaNDLF-753
All training is failing with a `timm` error **Describe the bug** Unable to train on current master. **To Reproduce** Steps to reproduce the behavior: 1. Try to start any segmentation training. 2. See error: ```python-traceback Traceback (most recent call last): File "/software/gandlf_personal/gandlf_run", line 11, in <module> from GANDLF.cli import main_run, copyrightMessage File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/__init__.py", line 2, in <module> from .main_run import main_run File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/main_run.py", line 4, in <module> from GANDLF.training_manager import TrainingManager, TrainingManager_split File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/training_manager.py", line 6, in <module> from GANDLF.compute import training_loop File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/__init__.py", line 1, in <module> from .training_loop import training_loop File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py", line 30, in <module> from .generic import create_pytorch_objects File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/generic.py", line 3, in <module> from GANDLF.models import get_model File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/__init__.py", line 32, in <module> from .imagenet_unet import imagenet_unet_wrapper File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/imagenet_unet.py", line 7, in <module> from segmentation_models_pytorch.base import ( File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/__init__.py", line 2, in <module> from . import encoders File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/encoders/__init__.py", line 1, in <module> import timm File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/__init__.py", line 2, in <module> from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/__init__.py", line 28, in <module> from .maxxvit import * File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/maxxvit.py", line 225, in <module> @dataclass ^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1230, in dataclass return wrap(cls) ^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1220, in wrap return _process_class(cls, init, repr, eq, order, unsafe_hash, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 958, in _process_class cls_fields.append(_get_field(cls, name, type, kw_only)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 815, in _get_field raise ValueError(f'mutable default {type(f.default)} for field ' ValueError: mutable default <class 'timm.models.maxxvit.MaxxVitConvCfg'> for field conv_cfg is not allowed: use default_factory ``` **Expected behavior** It should work. **Screenshots** N.A. **GaNDLF Version** <!-- Put the output of the following command: python -c 'import GANDLF as g;print(g.__version__)' --> 0.0.18-dev **Desktop (please complete the following information):** N.A. **Additional context** N.A.
[ { "content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"REA...
[ { "content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"REA...
diff --git a/setup.py b/setup.py index 46323e207..464f7a603 100644 --- a/setup.py +++ b/setup.py @@ -110,7 +110,7 @@ def run(self): "pydicom", "onnx", "torchinfo==1.7.0", - "segmentation-models-pytorch==0.3.2", + "segmentation-models-pytorch==0.3.3", "ACSConv==0.1.1", "docker", "dicom-anonymizer",
Lightning-Universe__lightning-flash-1667
`ObjectDetectionData.from_images` raise an error ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> after pass the datamodule into finetune, at the end of the epoch it throws an error message: ``` /opt/conda/lib/python3.7/site-packages/flash/core/data/utilities/classification.py in _strip(x) 44 45 def _strip(x: str) -> str: ---> 46 return x.strip(", ") 47 48 AttributeError: 'int' object has no attribute 'strip' ``` ### To Reproduce #### Code sample datamodule snipset code was from documentation ```py from flash.image import ObjectDetectionData, ObjectDetector from PIL import Image import numpy as np from flash import Trainer datamodule = ObjectDetectionData.from_images( train_images=[ Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8")), Image.fromarray(np.random.randint(0, 255,(512, 512, 3), dtype="uint8")), Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8")), ], train_targets=[["cat"], ["cat"], ["cat"]], train_bboxes=[ [{"xmin": 10, "ymin": 20, "width": 5, "height": 10}], [{"xmin": 20, "ymin": 30, "width": 10, "height": 10}], [{"xmin": 10, "ymin": 20, "width": 5, "height": 25}], ], predict_images=[Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8"))], transform_kwargs=dict(image_size=(512, 512)), batch_size=2, ) model=ObjectDetector( head="efficientdet", backbone="d0", image_size=512, labels=datamodule.labels, ) trainer = Trainer(max_epochs=20,accelerator="gpu") trainer.finetune(model, datamodule=datamodule, strategy="freeze") ``` ### Expected behavior It should not throw an error after the end of an epoch ### Environment - OS (e.g., Linux): Ubuntu - Python version: 3.7.111 - PyTorch/Lightning/Flash Version : pytorch-lightning==1.7.0, lightning-flash==0.8.0 - GPU models and configuration: Quadro T2000, Cuda Version 11.4
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
diff --git a/src/flash/core/data/utilities/classification.py b/src/flash/core/data/utilities/classification.py index 19a40e0449..6bd6992a5a 100644 --- a/src/flash/core/data/utilities/classification.py +++ b/src/flash/core/data/utilities/classification.py @@ -42,8 +42,11 @@ def _as_list(x: Union[List, Tensor, np.ndarray]) -> List: return x -def _strip(x: str) -> str: - return x.strip(", ") +def _strip(x: Union[str, int]) -> str: + """Replace both ` ` and `,` from str.""" + if isinstance(x, str): + return x.strip(", ") + return str(x) @dataclass
scrapy__scrapy-742
Sort spider names on 'scrapy list' command Hey there, i think the spider names on the `scrapy list` command should be order by name in the console output.
[ { "content": "from __future__ import print_function\nfrom scrapy.command import ScrapyCommand\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def short_desc(self):\n return \"List available spiders\"\n\n def run(self, args, opts):\n...
[ { "content": "from __future__ import print_function\nfrom scrapy.command import ScrapyCommand\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def short_desc(self):\n return \"List available spiders\"\n\n def run(self, args, opts):\n...
diff --git a/scrapy/commands/list.py b/scrapy/commands/list.py index f08d57f6f8a..0ea9c2313e4 100644 --- a/scrapy/commands/list.py +++ b/scrapy/commands/list.py @@ -11,5 +11,5 @@ def short_desc(self): def run(self, args, opts): crawler = self.crawler_process.create_crawler() - for s in crawler.spiders.list(): + for s in sorted(crawler.spiders.list()): print(s)
scikit-image__scikit-image-3650
tifffile: try to use the one in the user's install first Should we try importing tifffile before using the one we versionned it?
[ { "content": "from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifie...
[ { "content": "try:\n from tifffile import TiffFile, imsave, parse_kwargs\nexcept ImportError:\n from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n...
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py index 21499ffb12d..92752d43c4e 100644 --- a/skimage/io/_plugins/tifffile_plugin.py +++ b/skimage/io/_plugins/tifffile_plugin.py @@ -1,4 +1,7 @@ -from ...external.tifffile import TiffFile, imsave, parse_kwargs +try: + from tifffile import TiffFile, imsave, parse_kwargs +except ImportError: + from ...external.tifffile import TiffFile, imsave, parse_kwargs def imread(fname, dtype=None, **kwargs):
Parsl__parsl-140
Do not import `parsl` before requirements are setup ``` [annawoodard@midway001 parsl]$ python setup.py install Traceback (most recent call last): File "setup.py", line 2, in <module> from parsl.version import VERSION File "/home/annawoodard/parsl/parsl/__init__.py", line 35, in <module> from parsl.executors.ipp import IPyParallelExecutor File "/home/annawoodard/parsl/parsl/executors/ipp.py", line 4, in <module> from ipyparallel import Client ModuleNotFoundError: No module named 'ipyparallel' ``` Setuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this.
[ { "content": "from setuptools import setup, find_packages\nfrom parsl.version import VERSION\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple d...
[ { "content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n ...
diff --git a/setup.py b/setup.py index c8f4f73db1..3127db154e 100755 --- a/setup.py +++ b/setup.py @@ -1,5 +1,7 @@ from setuptools import setup, find_packages -from parsl.version import VERSION + +with open('parsl/version.py') as f: + exec(f.read()) with open('requirements.txt') as f: install_requires = f.readlines()
pex-tool__pex-1905
AtomicDirectory masks UUID4 collisions Right now, either with an exclusive lock where file locking fails or with a non-exclusive lock, two attempts at creating an atomic directory might collide in the case UUID4 hits a collision: https://github.com/pantsbuild/pex/blob/9901a05d0ec8aee9b8a6e05c6f2a00999df3bab6/pex/common.py#L331-L335 If that ever happened, it would happen silently and the two processes would race each other filling out the work_dir leading to a final os.rename of the work_dir to the target_dir with unexpected contents: https://github.com/pantsbuild/pex/blob/9901a05d0ec8aee9b8a6e05c6f2a00999df3bab6/pex/common.py#L469-L471 Even though this scenario is highly unlikely, it would be better to get an Exception raised than to get directory corruption.
[ { "content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport atexit\nimport contextlib\nimport errno\nimport fcntl\nimport itertools\nimport os\nimport re\nimp...
[ { "content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport atexit\nimport contextlib\nimport errno\nimport fcntl\nimport itertools\nimport os\nimport re\nimp...
diff --git a/pex/common.py b/pex/common.py index 0cf30958a..36a768e11 100644 --- a/pex/common.py +++ b/pex/common.py @@ -466,7 +466,7 @@ def unlock(): return try: - safe_mkdir(atomic_dir.work_dir) + os.makedirs(atomic_dir.work_dir) yield atomic_dir atomic_dir.finalize(source=source) finally:
pre-commit__pre-commit-167
npmrc causes npm to install to home directory instead of nodeenv Here is what happened when I tried to get eslint installed: ``` $ pre-commit run --all-files eslint..............................................................................................................................................................................................................................................................................................................Failed hookid: eslint xargs: eslint: No such file or directory ``` Moving .npmrc to nope.npmrc fixed the issue.
[ { "content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2....
[ { "content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2....
diff --git a/setup.py b/setup.py index cff71ebe6..62ea21198 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ 'aspy.yaml', 'cached-property', 'jsonschema', - 'nodeenv>=0.9.4', + 'nodeenv>=0.11.1', 'ordereddict', 'plumbum', 'pyyaml',
conan-io__conan-8167
[bug] YCM generator uses deprecated FlagsForFile method instead of Settings <!-- Please don't forget to update the issue title. Include all applicable information to help us reproduce your problem. To help us debug your issue please explain: --> ### Environment Details (include every applicable attribute) * Operating System+version: macOS 10.14.5 * Compiler+version: clang 10.0.1 * Conan version: 1.31.4 * Python version: 3.9.0 ### Steps to reproduce (Include if Applicable) Follow instructions at https://docs.conan.io/en/latest/integrations/ide/youcompleteme.html#youcompleteme-integration to configure `.ycm_extra_conf` and `conan_ycm_flags.json`: conanfile.txt ``` [generators] ycm ``` ```bash # from your base folder $ cp build/conan_ycm_extra_conf.py .ycm_extra_conf.py $ ln -s build/conan_ycm_flags.json conan_ycm_flags.json ``` Install `gtest` as a package, and then import it in a source file. ### Logs (Executed commands with output) (Include/Attach if Applicable) <!-- Your log content should be related to the bug description, it can be: - Conan command output - Server output (Artifactory, conan_server) --> YCM was unable to find the gtest package as installed by conan. YCM Debug Info: ``` Printing YouCompleteMe debug information... -- Resolve completions: Up front -- Client logfile: /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycm_x9dk66na.log -- Server Python interpreter: /usr/local/opt/python@3.9/bin/python3.9 -- Server Python version: 3.9.0 -- Server has Clang support compiled in: True -- Clang version: clang version 10.0.0 -- Extra configuration file found and loaded -- Extra configuration path: /Users/username/home/projects/project/.ycm_extra_conf.py -- C-family completer debug information: -- Clangd running -- Clangd process ID: 56305 -- Clangd executable: ['/Users/username/.vim/plugged/YouCompleteMe/third_party/ycmd/third_party/clangd/output/bin/clangd', '-header-insertion-decorators=0', '-resource-dir=/Users/ username/.vim/plugged/YouCompleteMe/third_party/ycmd/third_party/clang/lib/clang/10.0.0', '-limit-results=500', '-log=verbose'] -- Clangd logfiles: -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/clangd_stderr615mhccn.log -- Clangd Server State: Initialized -- Clangd Project Directory: /Users/username/home/projects/project -- Clangd Settings: {} -- Clangd Compilation Command: False -- Server running at: http://127.0.0.1:50225 -- Server process ID: 56303 -- Server logfiles: -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycmd_50225_stdout_nstboyjy.log -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycmd_50225_stderr_ey11rfes.log ``` As can be seen, `clangd` is not using the flags `'-x', 'c++'` as defined in the default `flags` list in the generated `.ycm_extra_conf.py`, or the `gtest` package as installed by conan. The generated `conan_ycm_flags.json` file contains the following: ``` { "includes": [ "-isystem/Users/username/.conan/data/gtest/1.10.0/_/_/package/03ad53d73db1da068548d1d6a87ac3219077b5c0/include", "-isystem/Users/username/.conan/data/rapidjson/1.1.0/_/_/package/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9/include" ], "defines": [], "flags": [] } ``` These flags are also not included in the compilation arguments. The issue appears to be caused by the fact that the [generator](https://github.com/conan-io/conan/blob/develop/conans/client/generators/ycm.py) uses the deprecated `FlagsForFile` method instead of it's replacement, `Settings`. This can be resolved by modifying line 143 from: ```python def FlagsForFile( filename, **kwargs ): ``` to ```python def Settings( filename, **kwargs): ``` As a new user of YCM and conan, this took an inordinate amount of time to troubleshoot, though it is relatively trivial.
[ { "content": "import json\n\nfrom conans.model import Generator\n\n\nclass YouCompleteMeGenerator(Generator):\n template = '''\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered ...
[ { "content": "import json\n\nfrom conans.model import Generator\n\n\nclass YouCompleteMeGenerator(Generator):\n template = '''\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered ...
diff --git a/conans/client/generators/ycm.py b/conans/client/generators/ycm.py index 1a147d01ca4..3e391be6345 100644 --- a/conans/client/generators/ycm.py +++ b/conans/client/generators/ycm.py @@ -140,7 +140,7 @@ def GetCompilationInfoForFile( filename ): return database.GetCompilationInfoForFile( filename ) -def FlagsForFile( filename, **kwargs ): +def Settings( filename, **kwargs ): relative_to = None compiler_flags = None
streamlit__streamlit-4724
streamlit's webserver not working when using pdm for installation ### Summary When I'm using [pdm](https://pdm.fming.dev/) to install streamlit, I cannot connect to streamlit's webserver. From pdm's web site: PDM is a modern Python package manager with PEP 582 support. ### Steps to reproduce 1. Install PDM: `pipx install pdm` 2. Create new directory and go there: `mkdir st_test; cd st_test` 3. Init pdm: `pdm init` (and use the defaults) 4. Install streamlit: `pdm add streamlit` 5. Use any minimal streamlit example and run streamlit: `pdm run streamlit run hello_world.py` **Expected behavior:** Browser opens correct web page served by streamlit. **Actual behavior:** Browser tries to connect to `localhost:3000` (which is also mentioned in the logs), but when I look at the logs, the server actually runs on port 8501. When I try this port, I get a 404, so I _can_ connect to the server on this port, but something's broken. ### Is this a regression? That is, did this use to work the way you expected in the past? never tried before ### Debug info - Streamlit version: 0.80.0 - Python version: 3.8.8 - Using pdm - OS version: Linux 5.11.11 - Browser version: Chrome 89.0.4389.114 (Official Build) (64-bit) ### Additional information jupyter-lab show a similar issue with pdm, but it can be fixed by running `pdm run jupyter-lab --core-mode` streamlit's webserver not working when using pdm for installation ### Summary When I'm using [pdm](https://pdm.fming.dev/) to install streamlit, I cannot connect to streamlit's webserver. From pdm's web site: PDM is a modern Python package manager with PEP 582 support. ### Steps to reproduce 1. Install PDM: `pipx install pdm` 2. Create new directory and go there: `mkdir st_test; cd st_test` 3. Init pdm: `pdm init` (and use the defaults) 4. Install streamlit: `pdm add streamlit` 5. Use any minimal streamlit example and run streamlit: `pdm run streamlit run hello_world.py` **Expected behavior:** Browser opens correct web page served by streamlit. **Actual behavior:** Browser tries to connect to `localhost:3000` (which is also mentioned in the logs), but when I look at the logs, the server actually runs on port 8501. When I try this port, I get a 404, so I _can_ connect to the server on this port, but something's broken. ### Is this a regression? That is, did this use to work the way you expected in the past? never tried before ### Debug info - Streamlit version: 0.80.0 - Python version: 3.8.8 - Using pdm - OS version: Linux 5.11.11 - Browser version: Chrome 89.0.4389.114 (Official Build) (64-bit) ### Additional information jupyter-lab show a similar issue with pdm, but it can be fixed by running `pdm run jupyter-lab --core-mode`
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
diff --git a/lib/streamlit/config.py b/lib/streamlit/config.py index b1733341d2f9..b763ff4ba4bc 100644 --- a/lib/streamlit/config.py +++ b/lib/streamlit/config.py @@ -263,6 +263,7 @@ def _global_development_mode() -> bool: not env_util.is_pex() and "site-packages" not in __file__ and "dist-packages" not in __file__ + and "__pypackages__" not in __file__ )
spyder-ide__spyder-4602
Move to support only Rope 0.10.5+ That's because 0.10.5 is the first version to support Python 2 and 3 in the same package.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific PYthon Development EnviRonment\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimp...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific PYthon Development EnviRonment\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimp...
diff --git a/README.md b/README.md index ebdc37c1983..91636309080 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ a Python version greater than 2.7 (Python 3.2 is not supported anymore). * **Python** 2.7 or 3.3+ * **PyQt5** 5.2+ or **PyQt4** 4.6+: PyQt5 is recommended. * **qtconsole** 4.2.0+: Enhanced Python interpreter. -* **Rope** and **Jedi**: Editor code completion, calltips +* **Rope** 0.10.5+ and **Jedi** 0.9.0+: Editor code completion, calltips and go-to-definition. * **Pyflakes**: Real-time code analysis. * **Sphinx**: Rich text mode for the Help pane. diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 1d019baf85c..4f208a914bc 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -18,10 +18,9 @@ requirements: - python.app # [osx] - pyqt >=5.6.0 # [osx] - pyqt # [not osx] - - rope 0.9.* # [py34 or py35] - - rope # [py27] + - rope >=0.10.5 - pyflakes - - jedi + - jedi >=0.9.0 - qtconsole >=4.2.0 - nbconvert - pygments >=2.0 diff --git a/doc/installation.rst b/doc/installation.rst index 1b255ddae28..788107fd528 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -160,7 +160,7 @@ The requirements to run Spyder are: * `Qtconsole <http://jupyter.org/qtconsole/stable/>`_ >=4.2.0 -- for an enhanced Python interpreter. -* `Rope <http://rope.sourceforge.net/>`_ >=0.9.4 and +* `Rope <http://rope.sourceforge.net/>`_ >=0.10.5 and `Jedi <http://jedi.jedidjah.ch/en/latest/>`_ >=0.9.0 -- for code completion, go-to-definition and calltips on the Editor. diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a94aacd1b16..5d335425ef0 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,4 +1,4 @@ -rope>=0.9.4 +rope>=0.10.5 jedi>=0.9.0 pyflakes pygments>=2.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..3c6e79cf31d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/setup.py b/setup.py index a34462ed253..c340679da93 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def run(self): import setuptools # analysis:ignore install_requires = [ - 'rope_py3k' if PY3 else 'rope>=0.9.4', + 'rope>=0.10.5', 'jedi>=0.9.0', 'pyflakes', 'pygments>=2.0',
google__flax-2407
Outdated `rich` dependency version The version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages. https://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33 Should be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.
[ { "content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by ap...
[ { "content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by ap...
diff --git a/setup.py b/setup.py index 8e4a6e8ba..c0a508e8f 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ "matplotlib", # only needed for tensorboard export "msgpack", "optax", - "rich~=11.1", + "rich>=11.1", "typing_extensions>=4.1.1", "PyYAML>=5.4.1", ]
pyca__cryptography-1599
Update year in copyright notice for vectors Refs #1597
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__...
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__...
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py index aa6fce09e639..f17d7b8e2f8f 100644 --- a/vectors/cryptography_vectors/__about__.py +++ b/vectors/cryptography_vectors/__about__.py @@ -20,4 +20,4 @@ __email__ = "cryptography-dev@python.org" __license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2013-2014 %s" % __author__ +__copyright__ = "Copyright 2013-2015 %s" % __author__
googleapis__google-api-python-client-871
AttributeError: module 'googleapiclient' has no attribute '__version__' When importing new version of google-api-python-client `from apiclient import discovery` i'm getting the error `AttributeError: module 'googleapiclient' has no attribute '__version__'` https://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22 i guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py` https://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15 can you please fix? @busunkim96 @mik-laj @crwilcox Traceback: ``` from apiclient import discovery File "/usr/local/lib/python3.7/site-packages/apiclient/__init__.py", line 22, in <module> __version__ = googleapiclient.__version__ AttributeError: module 'googleapiclient' has no attribute '__version__'
[ { "content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mime...
[ { "content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mime...
diff --git a/apiclient/__init__.py b/apiclient/__init__.py index 38dd24b111c..8d9c4ecb8f3 100644 --- a/apiclient/__init__.py +++ b/apiclient/__init__.py @@ -19,8 +19,6 @@ sample_tools = None from googleapiclient import schema -__version__ = googleapiclient.__version__ - _SUBMODULES = { "channel": channel, "discovery": discovery,
scikit-hep__awkward-1830
`ak.fill_none(axis=None)` does nothing ### Version of Awkward Array main ### Description and code to reproduce The `apply` function for this case does not return (or recurse)
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in whi...
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in whi...
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py index 0e9c90d745..8b6e7ce3c8 100644 --- a/src/awkward/operations/ak_fill_none.py +++ b/src/awkward/operations/ak_fill_none.py @@ -107,8 +107,8 @@ def maybe_fillna(layout): if axis is None: - def action(layout, depth, depth_context, **kwargs): - layout = maybe_fillna(layout) + def action(layout, continuation, **kwargs): + return maybe_fillna(continuation()) else: diff --git a/tests/test_1823-fill-none-axis-none.py b/tests/test_1823-fill-none-axis-none.py new file mode 100644 index 0000000000..82ca883b57 --- /dev/null +++ b/tests/test_1823-fill-none-axis-none.py @@ -0,0 +1,15 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +import numpy as np # noqa: F401 +import pytest # noqa: F401 + +import awkward as ak # noqa: F401 + + +def test(): + array = ak.Array([None, [1, 2, 3, [None, {"x": [None, 2], "y": [1, 4]}]]]) + + assert ak.fill_none(array, -1.0, axis=None).to_list() == [ + -1.0, + [1, 2, 3, [-1.0, {"x": [-1.0, 2], "y": [1, 4]}]], + ]
ibis-project__ibis-3710
bug: repr for interval literals doesn't show unit information `interval` literal values don't show any information about their units, so all values show up looking the same: ``` In [4]: import ibis In [5]: ibis.interval(1, unit="s") Out[5]: 1 In [6]: ibis.interval(1, unit="h") Out[6]: 1 ```
[ { "content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ...
[ { "content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ...
diff --git a/ibis/expr/format.py b/ibis/expr/format.py index a590de6455bb..abe0b7260d30 100644 --- a/ibis/expr/format.py +++ b/ibis/expr/format.py @@ -534,6 +534,8 @@ def _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str: @fmt_value.register def _fmt_value_literal(op: ops.Literal, **_: Any) -> str: + if isinstance(op.dtype, dt.Interval): + return f"{op.value} {op.dtype.unit}" return repr(op.value)
feast-dev__feast-1742
Dependency PyYAML 5.3.* has vulnerability issues ## Expected Behavior According to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343): > A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343. ## Current Behavior Feast Python SDK requires `PyYAML==5.3.*` version. This not only affects Feast, but also any app depending on it, since dependencies are shared. ## Steps to reproduce N/A ### Specifications N/A ## Possible Solution Bump PyYAML to a ">=5.4" version.
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
diff --git a/sdk/python/setup.py b/sdk/python/setup.py index cae6c1d802b..1b8cfc0e687 100644 --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -52,7 +52,7 @@ "protobuf>=3.10", "pyarrow>=2.0.0", "pydantic>=1.0.0", - "PyYAML==5.3.*", + "PyYAML>=5.4.*", "tabulate==0.8.*", "tenacity>=7.*", "toml==0.10.*",
bokeh__bokeh-10106
[BUG] `cd sphinx; make serve` doesn't work #### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages) Bokeh 2.0.2-76-ga417746c9 #### Description of expected behavior and the observed behavior The page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in: ``` Exception in thread Thread-2: Traceback (most recent call last): File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 917, in _bootstrap_inner self.run() File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 865, in run self._target(*self._args, **self._kwargs) File "docserver.py", line 43, in open_browser webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab") File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 78, in open if browser.open(url, new, autoraise): File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 251, in open "expected 0, 1, or 2, got %s" % new) webbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab ``` Not sure where `"tab"` has come from, but it has been there forever.
[ { "content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask...
[ { "content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask...
diff --git a/sphinx/docserver.py b/sphinx/docserver.py index 74c780c5fc5..fbfdd3ffff0 100644 --- a/sphinx/docserver.py +++ b/sphinx/docserver.py @@ -40,7 +40,7 @@ def send_docs(filename): def open_browser(): # Child process time.sleep(0.5) - webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab") + webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new=2) data = {}
getredash__redash-1118
When archiving a query, delete related alerts Related: #731 . When archiving a query, delete related alerts Related: #731 .
[ { "content": "import json\nfrom flask_login import UserMixin, AnonymousUserMixin\nimport hashlib\nimport logging\nimport os\nimport threading\nimport time\nimport datetime\nimport itertools\nfrom funcy import project\n\nimport peewee\nfrom passlib.apps import custom_app_context as pwd_context\nfrom playhouse.gf...
[ { "content": "import json\nfrom flask_login import UserMixin, AnonymousUserMixin\nimport hashlib\nimport logging\nimport os\nimport threading\nimport time\nimport datetime\nimport itertools\nfrom funcy import project\n\nimport peewee\nfrom passlib.apps import custom_app_context as pwd_context\nfrom playhouse.gf...
diff --git a/rd_ui/app/views/query.html b/rd_ui/app/views/query.html index 9be65037af..812d37076d 100644 --- a/rd_ui/app/views/query.html +++ b/rd_ui/app/views/query.html @@ -8,7 +8,7 @@ <h4 class="modal-title">Query Archive</h4> </div> <div class="modal-body"> Are you sure you want to archive this query? - <br/> All dashboard widgets created with its visualizations will be deleted. + <br/> All alerts and dashboard widgets created with its visualizations will be deleted. </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">No</button> diff --git a/redash/models.py b/redash/models.py index 5cd51abf21..9b9071d0fd 100644 --- a/redash/models.py +++ b/redash/models.py @@ -641,6 +641,9 @@ def archive(self): for w in vis.widgets: w.delete_instance() + for alert in self.alerts: + alert.delete_instance(recursive=True) + self.save() @classmethod diff --git a/tests/factories.py b/tests/factories.py index b3dfae46a4..c91c584293 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -185,6 +185,14 @@ def create_alert(self, **kwargs): args.update(**kwargs) return alert_factory.create(**args) + def create_alert_subscription(self, **kwargs): + args = { + 'user': self.user + } + + args.update(**kwargs) + return alert_subscription_factory.create(**args) + def create_data_source(self, **kwargs): args = { 'org': self.org @@ -274,6 +282,3 @@ def create_api_key(self, **kwargs): def create_destination(self, **kwargs): return destination_factory.create(**kwargs) - - def create_alert_subscription(self, **kwargs): - return alert_subscription_factory.create(**kwargs) diff --git a/tests/test_models.py b/tests/test_models.py index 12c5d5ae24..3304613391 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -276,6 +276,16 @@ def test_removes_scheduling(self): self.assertEqual(None, query.schedule) + def test_deletes_alerts(self): + subscription = self.factory.create_alert_subscription() + query = subscription.alert.query + + query.archive() + + self.assertRaises(models.Alert.DoesNotExist, models.Alert.get_by_id, subscription.alert.id) + self.assertRaises(models.AlertSubscription.DoesNotExist, models.AlertSubscription.get_by_id, subscription.id) + + class DataSourceTest(BaseTestCase): def test_get_schema(self): return_value = [{'name': 'table', 'columns': []}]
apache__airflow-14774
[Smart sensor] Runtime error: dictionary changed size during iteration <!-- Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions. Don't worry if they're not all applicable; just try to include what you can :-) If you need to include code snippets or logs, please put them in fenced code blocks. If they're super-long, please use the details tag like <details><summary>super-long log</summary> lots of stuff </details> Please delete these comment blocks before submitting the issue. --> <!-- IMPORTANT!!! PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE NEXT TO "SUBMIT NEW ISSUE" BUTTON!!! PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!! Please complete the next sections or the issue will be closed. These questions are the first thing we need to know to understand the context. --> **What happened**: <!-- (please include exact error messages if you can) --> Smart Sensor TI crashes with a Runtime error. Here's the logs: ``` RuntimeError: dictionary changed size during iteration File "airflow/sentry.py", line 159, in wrapper return func(task_instance, *args, session=session, **kwargs) File "airflow/models/taskinstance.py", line 1112, in _run_raw_task self._prepare_and_execute_task_with_callbacks(context, task) File "airflow/models/taskinstance.py", line 1285, in _prepare_and_execute_task_with_callbacks result = self._execute_task(context, task_copy) File "airflow/models/taskinstance.py", line 1315, in _execute_task result = task_copy.execute(context=context) File "airflow/sensors/smart_sensor.py", line 736, in execute self.flush_cached_sensor_poke_results() File "airflow/sensors/smart_sensor.py", line 681, in flush_cached_sensor_poke_results for ti_key, sensor_exception in self.cached_sensor_exceptions.items(): ``` **What you expected to happen**: <!-- What do you think went wrong? --> Smart sensor should always execute without any runtime error. **How to reproduce it**: I haven't been able to reproduce it consistently since it sometimes works and sometimes errors. **Anything else we need to know**: It's a really noisy error in Sentry. In just 4 days, 3.8k events were reported in Sentry. <!-- How often does this problem occur? Once? Every time etc? Any relevant logs to include? Put them here in side a detail tag: <details><summary>x.log</summary> lots of stuff </details> -->
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (th...
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (th...
diff --git a/airflow/sensors/smart_sensor.py b/airflow/sensors/smart_sensor.py index 9d0a28c65ae00..6c1c16b98f78c 100644 --- a/airflow/sensors/smart_sensor.py +++ b/airflow/sensors/smart_sensor.py @@ -446,6 +446,7 @@ def mark_state(ti, sensor_instance): TI = TaskInstance count_marked = 0 + query_result = [] try: query_result = ( session.query(TI, SI)
tensorflow__tensor2tensor-198
wmt_encs_tokens_32k - Datagen error I am trying to start the d "datagen" for the "wmt_encs_tokens_32k" and I receive the following error: ``` INFO:tensorflow:Generating problems: * wmt_encs_tokens_32k INFO:tensorflow:Generating training data for wmt_encs_tokens_32k. Traceback (most recent call last): File "/home/ahmed/tensorflow/bin/t2t-datagen", line 290, in <module> tf.app.run() File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run _sys.exit(main(_sys.argv[:1] + flags_passthrough)) File "/home/ahmed/tensorflow/bin/t2t-datagen", line 239, in main generate_data_for_registered_problem(problem) File "/home/ahmed/tensorflow/bin/t2t-datagen", line 286, in generate_data_for_registered_problem task_id=task_id) File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 383, in generate_data self.train_generator(data_dir, tmp_dir, True), File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/wmt.py", line 589, in train_generator data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size, File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 371, in vocab_file return "%s.%d" % (self.vocab_name, self.targeted_vocab_size) File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 343, in targeted_vocab_size raise NotImplementedError() # Not needed if self.is_character_level. NotImplementedError ```
[ { "content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#...
[ { "content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#...
diff --git a/README.md b/README.md index edd6460d0..bb0f6f534 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ t2t-datagen \ --problem=$PROBLEM # Train -# * If you run out of memory, add --hparams='batch_size=2048' or even 1024. +# * If you run out of memory, add --hparams='batch_size=1024'. t2t-trainer \ --data_dir=$DATA_DIR \ --problems=$PROBLEM \ @@ -166,7 +166,7 @@ python -c "from tensor2tensor.models.transformer import Transformer" with `Modality` objects, which are specified per-feature in the dataset/task specification. * Support for multi-GPU machines and synchronous (1 master, many workers) and - asynchrounous (independent workers synchronizing through a parameter server) + asynchronous (independent workers synchronizing through a parameter server) [distributed training](https://github.com/tensorflow/tensor2tensor/tree/master/docs/distributed_training.md). * Easily swap amongst datasets and models by command-line flag with the data generation script `t2t-datagen` and the training script `t2t-trainer`. diff --git a/tensor2tensor/data_generators/wmt.py b/tensor2tensor/data_generators/wmt.py index 456f36321..bcd29e1d4 100644 --- a/tensor2tensor/data_generators/wmt.py +++ b/tensor2tensor/data_generators/wmt.py @@ -574,7 +574,7 @@ class WMTEnCsTokens32k(WMTProblem): """Problem spec for WMT English-Czech translation.""" @property - def target_vocab_size(self): + def targeted_vocab_size(self): return 2**15 # 32768 @property
facebookresearch__xformers-819
TypeError: Trainer.__init__() got an unexpected keyword argument 'gpus' # 🐛 Bug When running [xformers_mingpt.ipynb](https://colab.research.google.com/github/facebookresearch/xformers/blob/main/docs/source/xformers_mingpt.ipynb) in colab there is an arror raised during creation an instance of `Trainer`. ## Command ```python trainer = Trainer( gpus=1, max_epochs=EPOCHS, precision=16, gradient_clip_val=1, log_every_n_steps=1, detect_anomaly=True, accumulate_grad_batches=REF_BATCH // BATCH, ) ``` ## To Reproduce Open [xformers_mingpt.ipynb](https://colab.research.google.com/github/facebookresearch/xformers/blob/main/docs/source/xformers_mingpt.ipynb) in colab and: "Kernel" -> "Run all cells" `TypeError: Trainer.__init__() got an unexpected keyword argument 'gpus'` ![image](https://github.com/facebookresearch/xformers/assets/36787333/9e6e64f6-54d9-4809-80ae-80eb91e2f414) ## Expected behavior `Trainer` object created successfully. ## Environment Default colab env: ``` Collecting environment information... PyTorch version: 2.0.1+cu118 Is debug build: False CUDA used to build PyTorch: 11.8 ROCM used to build PyTorch: N/A OS: Ubuntu 22.04.2 LTS (x86_64) GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 Clang version: 14.0.0-1ubuntu1.1 CMake version: version 3.25.2 Libc version: glibc-2.35 Python version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime) Python platform: Linux-5.15.109+-x86_64-with-glibc2.35 Is CUDA available: True CUDA runtime version: 11.8.89 CUDA_MODULE_LOADING set to: LAZY GPU models and configuration: GPU 0: Tesla T4 Nvidia driver version: 525.105.17 cuDNN version: Probably one of the following: /usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.0 HIP runtime version: N/A MIOpen runtime version: N/A Is XNNPACK available: True CPU: Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Address sizes: 46 bits physical, 48 bits virtual Byte Order: Little Endian CPU(s): 2 On-line CPU(s) list: 0,1 Vendor ID: GenuineIntel Model name: Intel(R) Xeon(R) CPU @ 2.30GHz CPU family: 6 Model: 63 Thread(s) per core: 2 Core(s) per socket: 1 Socket(s): 1 Stepping: 0 BogoMIPS: 4599.99 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities Hypervisor vendor: KVM Virtualization type: full L1d cache: 32 KiB (1 instance) L1i cache: 32 KiB (1 instance) L2 cache: 256 KiB (1 instance) L3 cache: 45 MiB (1 instance) NUMA node(s): 1 NUMA node0 CPU(s): 0,1 Vulnerability Itlb multihit: Not affected Vulnerability L1tf: Mitigation; PTE Inversion Vulnerability Mds: Vulnerable; SMT Host state unknown Vulnerability Meltdown: Vulnerable Vulnerability Mmio stale data: Vulnerable Vulnerability Retbleed: Vulnerable Vulnerability Spec store bypass: Vulnerable Vulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers Vulnerability Spectre v2: Vulnerable, IBPB: disabled, STIBP: disabled, PBRSB-eIBRS: Not affected Vulnerability Srbds: Not affected Vulnerability Tsx async abort: Not affected Versions of relevant libraries: [pip3] mypy-extensions==1.0.0 [pip3] numpy==1.22.4 [pip3] pytorch-lightning==2.0.6 [pip3] torch==2.0.1+cu118 [pip3] torchaudio==2.0.2+cu118 [pip3] torchdata==0.6.1 [pip3] torchmetrics==1.0.2 [pip3] torchsummary==1.5.1 [pip3] torchtext==0.15.2 [pip3] torchvision==0.15.2+cu118 ``` ## Additional context The same error will arise in [microGPT.p](https://github.com/facebookresearch/xformers/blob/main/examples/microGPT.py#L313) in newer versions of pytorch_lightning
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)\n# This is an homm...
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)\n# This is an homm...
diff --git a/docs/source/xformers_mingpt.ipynb b/docs/source/xformers_mingpt.ipynb index 6f24372d4c..875fe10410 100644 --- a/docs/source/xformers_mingpt.ipynb +++ b/docs/source/xformers_mingpt.ipynb @@ -433,7 +433,7 @@ ")\n", "\n", "trainer = Trainer(\n", - " gpus=1,\n", + " devices=1, accelerator=\"gpu\",\n", " max_epochs=EPOCHS,\n", " precision=16,\n", " gradient_clip_val=1,\n", diff --git a/examples/microGPT.py b/examples/microGPT.py index 6f78643155..72831c0948 100644 --- a/examples/microGPT.py +++ b/examples/microGPT.py @@ -310,7 +310,7 @@ def top_k_logits(logits, k): print(model) trainer = Trainer( - gpus=1, + gpusdevices=1, accelerator="gpu", max_epochs=EPOCHS, precision=16, log_every_n_steps=1,
ocadotechnology__codeforlife-portal-412
Update models search field values in admin
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of...
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of...
diff --git a/portal/models.py b/portal/models.py index daa12713de..7dcc17e237 100644 --- a/portal/models.py +++ b/portal/models.py @@ -126,7 +126,7 @@ def class_(self): return None def __unicode__(self): - return '%s %s' % (self.user.first_name, self.user.last_name) + return '%s %s' % (self.new_user.first_name, self.new_user.last_name) class Class(models.Model):
dj-stripe__dj-stripe-1964
Creating WebhooksEndpoint locally raises tolerance constraint failed error **Describe the bug** Attempting to create new WebhookEndpoint via django admin fails with `djstripe.models.webhooks.WebhookEndpoint.DoesNotExist: WebhookEndpoint matching query does not exist.` above error is caused by failure to create local object: sqlite: `sqlite3.IntegrityError: NOT NULL constraint failed: djstripe_webhookendpoint.tolerance` mysql: `MySQLdb.IntegrityError: (1048, "Column 'tolerance' cannot be null")` The WebhookEndpoint is successfully created in stripe, but not in the local DB. **To Reproduce** Steps to reproduce the behavior: 1. Fresh django installation with dj-stripe 2. `python3 manage.py migrate && python3 manage.py runserver` 3. Add an API key via Django admin 4. [optionally] run `python3 manage.py djstripe_sync_models` 5. Add a new WebhookEndpoint via django admin **Expected behavior** A new WebhookEndpoint is created in local django app **Environment** - dj-stripe version: 2.8.1 - stripe API version: "2022-11-15" - Database: [any] - Python version: 3.11.4 - Django version: 4.2.3 **Can you reproduce the issue with the latest version of master?** Yes It appears that the API does not return the `tolerance` key, so somewhere along the line this field is explicitly set to `None`, which eventually causes the error `IntegrityError: NOT NULL constraint failed: djstripe_webhookendpoint.tolerance` (as opposed to the default value being used as defined in the model). This issue can be easily reproduced by modifying the webhook fixture (`tests/fixtures/webhook_endpoint_fake0001.json`) to remove the `tolerance` key, and running pytest. I'm very new to dj-stripe, but issue can be resolved by adding a check to `models.webhooks.WebhookEndpoint._attach_objects_hook`: ```python def _attach_objects_hook( self, cls, data, current_ids=None, api_key=djstripe_settings.STRIPE_SECRET_KEY ): ... self.djstripe_uuid = data.get("metadata", {}).get("djstripe_uuid") if not isinstance(data.get("tolerance", None), (int, float)): self.tolerance = djstripe_settings.WEBHOOK_TOLERANCE ``` If this is actually a bug, and the proposed fix looks good, I'd be happy to contribute a PR.
[ { "content": "\"\"\"\nModule for dj-stripe Webhook models\n\"\"\"\n\nimport json\nimport warnings\nfrom traceback import format_exc\nfrom uuid import uuid4\n\nimport stripe\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.datastructures import CaseInsensitiveMapping\nfrom djang...
[ { "content": "\"\"\"\nModule for dj-stripe Webhook models\n\"\"\"\n\nimport json\nimport warnings\nfrom traceback import format_exc\nfrom uuid import uuid4\n\nimport stripe\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.datastructures import CaseInsensitiveMapping\nfrom djang...
diff --git a/djstripe/models/webhooks.py b/djstripe/models/webhooks.py index 045ae48fa5..da2053407a 100644 --- a/djstripe/models/webhooks.py +++ b/djstripe/models/webhooks.py @@ -83,6 +83,7 @@ def _attach_objects_hook( ) self.djstripe_uuid = data.get("metadata", {}).get("djstripe_uuid") + self.tolerance = data.get("tolerance", djstripe_settings.WEBHOOK_TOLERANCE) def _get_version():
django-import-export__django-import-export-613
NumberWidget.is_empty() should strip the value if string type At the moment `NumberWidget.is_empty()` check doesn't strip the value before making the `value == ""` comparison. As a consequence, if the value happens to a be a string comprised entirely of spaces e.g `u' '`, the `is_empty()` check evaluates to False. This in effect can cause value errors (e.g `ValueError: could not convert string to float: ` ) in child widgets (`IntegerWidget`, etc) which do a type conversion. To resolve this, I think we should strip the value (if it's a string type) before the comparison. ```Python class NumberWidget(Widget): """ """ def is_empty(self, value): if isinstance(value, six.string_types): value = value.strip() # 0 is not empty return value is None or value == "" ```
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nfrom datetime import datetime, date\nfrom django.utils import datetime_safe, timezone, six\nfrom django.utils.encoding import smart_text\nfrom django.conf import settings\n\ntry:\n from django.utils...
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nfrom datetime import datetime, date\nfrom django.utils import datetime_safe, timezone, six\nfrom django.utils.encoding import smart_text\nfrom django.conf import settings\n\ntry:\n from django.utils...
diff --git a/import_export/widgets.py b/import_export/widgets.py index 72cf7b755..04cb98a61 100644 --- a/import_export/widgets.py +++ b/import_export/widgets.py @@ -56,6 +56,8 @@ class NumberWidget(Widget): """ def is_empty(self, value): + if isinstance(value, six.string_types): + value = value.strip() # 0 is not empty return value is None or value == "" diff --git a/tests/core/tests/widgets_tests.py b/tests/core/tests/widgets_tests.py index ee7c3fbfb..29aec7f70 100644 --- a/tests/core/tests/widgets_tests.py +++ b/tests/core/tests/widgets_tests.py @@ -131,6 +131,28 @@ def test_clean(self): self.assertEqual(self.widget.clean("1:57:00"), self.duration) +class FloatWidgetTest(TestCase): + + def setUp(self): + self.value = 11.111 + self.widget = widgets.FloatWidget() + + def test_clean(self): + self.assertEqual(self.widget.clean(11.111), self.value) + + def test_render(self): + self.assertEqual(self.widget.render(self.value), self.value) + + def test_clean_string_zero(self): + self.assertEqual(self.widget.clean("0"), 0.0) + self.assertEqual(self.widget.clean("0.0"), 0.0) + + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\r\n\t"), None) + + class DecimalWidgetTest(TestCase): def setUp(self): @@ -147,6 +169,11 @@ def test_clean_string_zero(self): self.assertEqual(self.widget.clean("0"), Decimal("0")) self.assertEqual(self.widget.clean("0.0"), Decimal("0")) + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\r\n\t"), None) + class IntegerWidgetTest(TestCase): @@ -161,6 +188,11 @@ def test_clean_string_zero(self): self.assertEqual(self.widget.clean("0"), self.value) self.assertEqual(self.widget.clean("0.0"), self.value) + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\n\t\r"), None) + class ForeignKeyWidgetTest(TestCase):
scrapy__scrapy-4311
Consider making METAREFRESH_IGNORE_TAGS an empty list by default As a way to allow users to fix #1422, #3768 introduced the `METAREFRESH_IGNORE_TAGS` setting. To keep backward compatibility, the setting was introduced with `['script', 'noscript']` as default value. However, to reproduce the behavior of web browsers, it seems the right value would be `[]`. Should we switch the default value of the `METAREFRESH_IGNORE_TAGS` setting to `[]`, even though the change breaks backward compatibility?
[ { "content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group...
[ { "content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group...
diff --git a/docs/topics/downloader-middleware.rst b/docs/topics/downloader-middleware.rst index 8a760e53be0..3ec6e0c17cc 100644 --- a/docs/topics/downloader-middleware.rst +++ b/docs/topics/downloader-middleware.rst @@ -868,7 +868,7 @@ Whether the Meta Refresh middleware will be enabled. METAREFRESH_IGNORE_TAGS ^^^^^^^^^^^^^^^^^^^^^^^ -Default: ``['script', 'noscript']`` +Default: ``[]`` Meta tags within these tags are ignored. diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py index c10dc1a1cb3..1a7d35b130e 100644 --- a/scrapy/settings/default_settings.py +++ b/scrapy/settings/default_settings.py @@ -225,7 +225,7 @@ MEMUSAGE_WARNING_MB = 0 METAREFRESH_ENABLED = True -METAREFRESH_IGNORE_TAGS = ['script', 'noscript'] +METAREFRESH_IGNORE_TAGS = [] METAREFRESH_MAXDELAY = 100 NEWSPIDER_MODULE = '' diff --git a/tests/test_downloadermiddleware_redirect.py b/tests/test_downloadermiddleware_redirect.py index e7faf14a7f0..e0f145d0efe 100644 --- a/tests/test_downloadermiddleware_redirect.py +++ b/tests/test_downloadermiddleware_redirect.py @@ -300,19 +300,21 @@ def test_ignore_tags_default(self): body = ('''<noscript><meta http-equiv="refresh" ''' '''content="0;URL='http://example.org/newpage'"></noscript>''') rsp = HtmlResponse(req.url, body=body.encode()) - response = self.mw.process_response(req, rsp, self.spider) - assert isinstance(response, Response) + req2 = self.mw.process_response(req, rsp, self.spider) + assert isinstance(req2, Request) + self.assertEqual(req2.url, 'http://example.org/newpage') - def test_ignore_tags_empty_list(self): - crawler = get_crawler(Spider, {'METAREFRESH_IGNORE_TAGS': []}) + def test_ignore_tags_1_x_list(self): + """Test that Scrapy 1.x behavior remains possible""" + settings = {'METAREFRESH_IGNORE_TAGS': ['script', 'noscript']} + crawler = get_crawler(Spider, settings) mw = MetaRefreshMiddleware.from_crawler(crawler) req = Request(url='http://example.org') body = ('''<noscript><meta http-equiv="refresh" ''' '''content="0;URL='http://example.org/newpage'"></noscript>''') rsp = HtmlResponse(req.url, body=body.encode()) - req2 = mw.process_response(req, rsp, self.spider) - assert isinstance(req2, Request) - self.assertEqual(req2.url, 'http://example.org/newpage') + response = mw.process_response(req, rsp, self.spider) + assert isinstance(response, Response) if __name__ == "__main__": unittest.main()
streamlit__streamlit-6507
pandas 2.0 support ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary ``` The conflict is caused by: The user requested pandas==2.0.0 streamlit 1.20.0 depends on pandas<2 and >=0.25 ``` ### Reproducible Code Example ```Python pip install pandas==2.0.0 pip install streamlit==1.20.0 ``` ### Steps To Reproduce _No response_ ### Expected Behavior _No response_ ### Current Behavior _No response_ ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: 1.20.0 - Python version: 3.11.1 - Operating System: - Browser: - Virtual environment: ### Additional Information the following line should be updated [https://github.com/streamlit/streamlit/blob/11950acfa537475109b421fea6da43c9d410542c/lib/setup.py#L40](url) ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR!
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2...
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2...
diff --git a/lib/setup.py b/lib/setup.py index 7e9a41d5acb9..74af9808bc31 100644 --- a/lib/setup.py +++ b/lib/setup.py @@ -37,7 +37,7 @@ "importlib-metadata>=1.4", "numpy", "packaging>=14.1", - "pandas<2,>=0.25", + "pandas<3,>=0.25", "pillow>=6.2.0", "protobuf<4,>=3.12", "pyarrow>=4.0", diff --git a/lib/tests/streamlit/elements/data_editor_test.py b/lib/tests/streamlit/elements/data_editor_test.py index e90fe44cbbc8..6540d6f49e41 100644 --- a/lib/tests/streamlit/elements/data_editor_test.py +++ b/lib/tests/streamlit/elements/data_editor_test.py @@ -420,7 +420,7 @@ def test_with_supported_index(self, index: pd.Index): self.assertIsInstance(return_df, pd.DataFrame) @unittest.skipIf( - is_pandas_version_less_than("2.0.0rc1") is False, + is_pandas_version_less_than("2.0.0") is False, "This test only runs if pandas is < 2.0.0", ) def test_with_old_supported_index(self):
dask__distributed-3672
Adding zoom tools to performance_report In some cases ["the devil is in the detail"]( https://en.wikipedia.org/wiki/The_devil_is_in_the_detail ), it would be useful to have the zoom tooltips included in other [`performance_report`]( https://distributed.dask.org/en/latest/diagnosing-performance.html#performance-reports ) panels to allow closer investigation of where time is being spent.
[ { "content": "\"\"\" This module contains utility functions to construct and manipulate counting\ndata structures for frames.\n\nWhen performing statistical profiling we obtain many call stacks. We aggregate\nthese call stacks into data structures that maintain counts of how many times\neach function in that c...
[ { "content": "\"\"\" This module contains utility functions to construct and manipulate counting\ndata structures for frames.\n\nWhen performing statistical profiling we obtain many call stacks. We aggregate\nthese call stacks into data structures that maintain counts of how many times\neach function in that c...
diff --git a/distributed/profile.py b/distributed/profile.py index 5bf071e20da..1bf81ad6ff0 100644 --- a/distributed/profile.py +++ b/distributed/profile.py @@ -383,7 +383,7 @@ def plot_figure(data, **kwargs): source = ColumnDataSource(data=data) - fig = figure(tools="tap", **kwargs) + fig = figure(tools="tap,box_zoom,xwheel_zoom,reset", **kwargs) r = fig.quad( "left", "right",
dask__dask-5627
Support chunksize parameter for read_parquet with a single file I'd like to be able to read a single parquet file into multiple partitions, determined by the chunksize. Without chunksize ```python import pandas as pd import dask.dataframe as dd ​ df = pd.DataFrame({"a":range(100000), "b":range(100000)}) df.to_parquet("out.parquet") ​ df = dd.read_parquet("out.parquet", gather_statistics=True, split_row_groups=True) df.npartitions 1 ``` With chunksize ```python import pandas as pd import dask.dataframe as dd df = pd.DataFrame({"a":range(100000), "b":range(100000)}) df.to_parquet("out.parquet") df = dd.read_parquet("out.parquet", chunksize="10 MiB", gather_statistics=True, split_row_groups=True) --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-20-25c43bb02cd0> in <module> 7 df = dd.read_parquet("out.parquet", 8 chunksize="10 MiB", ----> 9 gather_statistics=True, split_row_groups=True) /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs) 229 # Parse dataset statistics from metadata (if available) 230 parts, divisions, index, index_in_columns = process_statistics( --> 231 parts, statistics, filters, index, chunksize 232 ) 233 /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in process_statistics(parts, statistics, filters, index, chunksize) 619 # Aggregate parts/statistics if we are splitting by row-group 620 if chunksize: --> 621 parts, statistics = aggregate_row_groups(parts, statistics, chunksize) 622 623 out = sorted_columns(statistics) /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in aggregate_row_groups(parts, stats, chunksize) 722 723 def aggregate_row_groups(parts, stats, chunksize): --> 724 if not stats[0]["file_path_0"]: 725 return parts, stats 726 KeyError: 'file_path_0' ```
[ { "content": "from distutils.version import LooseVersion\n\nimport toolz\nimport warnings\nfrom ....bytes import core # noqa\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\n\nfrom ...core import DataFrame, new_dd_object\nfrom ....base import tokenize\nfrom ....utils import...
[ { "content": "from distutils.version import LooseVersion\n\nimport toolz\nimport warnings\nfrom ....bytes import core # noqa\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\n\nfrom ...core import DataFrame, new_dd_object\nfrom ....base import tokenize\nfrom ....utils import...
diff --git a/dask/dataframe/io/parquet/core.py b/dask/dataframe/io/parquet/core.py index 7cee9105cb3..1032fe30a6a 100644 --- a/dask/dataframe/io/parquet/core.py +++ b/dask/dataframe/io/parquet/core.py @@ -721,7 +721,7 @@ def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed def aggregate_row_groups(parts, stats, chunksize): - if not stats[0]["file_path_0"]: + if not stats[0].get("file_path_0", None): return parts, stats parts_agg = [] diff --git a/dask/dataframe/io/tests/test_parquet.py b/dask/dataframe/io/tests/test_parquet.py index c43fd66d898..5a7abdbb7a7 100644 --- a/dask/dataframe/io/tests/test_parquet.py +++ b/dask/dataframe/io/tests/test_parquet.py @@ -2170,3 +2170,22 @@ def test_chunksize(tmpdir, chunksize, engine, metadata): remainder = (df_byte_size % parse_bytes(chunksize)) > 0 expected += int(remainder) * nparts assert ddf2.npartitions == max(nparts, expected) + + +@write_read_engines() +def test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine): + path = str(tmpdir.join("test.parquet")) + pdf = df.copy() + pdf.index.name = "index" + pdf.to_parquet(path, engine=write_engine) + + ddf_read = dd.read_parquet( + path, + engine=read_engine, + chunksize="10 kiB", + gather_statistics=True, + split_row_groups=True, + index="index", + ) + + assert_eq(pdf, ddf_read)
django-json-api__django-rest-framework-json-api-833
Add DRF 3.12 support See https://www.django-rest-framework.org/community/3.12-announcement/
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths an...
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths an...
diff --git a/.travis.yml b/.travis.yml index ad495df9..65266132 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,40 +20,34 @@ matrix: env: TOXENV=docs - python: 3.5 - env: TOXENV=py35-django22-drf310 - - python: 3.5 - env: TOXENV=py35-django22-drf311 + env: TOXENV=py35-django22-drf312 - python: 3.5 env: TOXENV=py35-django22-drfmaster - python: 3.6 - env: TOXENV=py36-django22-drf310 - - python: 3.6 - env: TOXENV=py36-django22-drf311 + env: TOXENV=py36-django22-drf312 - python: 3.6 env: TOXENV=py36-django22-drfmaster - python: 3.6 - env: TOXENV=py36-django30-drf311 + env: TOXENV=py36-django30-drf312 - python: 3.6 env: TOXENV=py36-django30-drfmaster - python: 3.7 - env: TOXENV=py37-django22-drf310 - - python: 3.7 - env: TOXENV=py37-django22-drf311 + env: TOXENV=py37-django22-drf312 - python: 3.7 env: TOXENV=py37-django22-drfmaster - python: 3.7 - env: TOXENV=py37-django30-drf311 + env: TOXENV=py37-django30-drf312 - python: 3.7 env: TOXENV=py37-django30-drfmaster - python: 3.8 - env: TOXENV=py38-django22-drf311 + env: TOXENV=py38-django22-drf312 - python: 3.8 env: TOXENV=py38-django22-drfmaster - python: 3.8 - env: TOXENV=py38-django30-drf311 + env: TOXENV=py38-django30-drf312 - python: 3.8 env: TOXENV=py38-django30-drfmaster diff --git a/CHANGELOG.md b/CHANGELOG.md index f4b110ee..4b872109 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,15 @@ any parts of the framework not mentioned in the documentation should generally b * Removed support for Django 1.11. * Removed support for Django 2.1. +* Removed support for Django REST framework 3.10, 3.11 + +### Added +* Added support for Django REST framework 3.12 + ## [3.2.0] - 2020-08-26 -This is the last release supporting Django 1.11 and Django 2.1. +This is the last release supporting Django 1.11, Django 2.1, DRF 3.10 and DRF 3.11. ### Added diff --git a/README.rst b/README.rst index 07f18a8d..89656f22 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ Requirements 1. Python (3.5, 3.6, 3.7, 3.8) 2. Django (2.2, 3.0) -3. Django REST Framework (3.10, 3.11) +3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/docs/getting-started.md b/docs/getting-started.md index 39ef6a88..d6c88a3d 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -53,7 +53,7 @@ like the following: 1. Python (3.5, 3.6, 3.7, 3.8) 2. Django (2.2, 3.0) -3. Django REST Framework (3.10, 3.11) +3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/setup.py b/setup.py index 42d7d8c4..19c1fa74 100755 --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ def get_package_data(package): ], install_requires=[ 'inflection>=0.3.0', - 'djangorestframework>=3.10,<3.12', + 'djangorestframework>=3.12,<3.13', 'django>=2.2,<3.1', ], extras_require={ diff --git a/tox.ini b/tox.ini index 58956ee5..e4d1bb15 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,14 @@ [tox] envlist = - py{35,36,37}-django22-drf{310,311,master}, - py38-django22-drf{311,master}, - py{36,37,38}-django30-drf{311,master}, + py{35,36,37,38}-django22-drf{312,master}, + py{36,37,38}-django30-drf{312,master}, lint,docs [testenv] deps = django22: Django>=2.2,<2.3 django30: Django>=3.0,<3.1 - drf310: djangorestframework>=3.10.2,<3.11 - drf311: djangorestframework>=3.11,<3.12 + drf312: djangorestframework>=3.12,<3.13 drfmaster: https://github.com/encode/django-rest-framework/archive/master.zip -rrequirements/requirements-testing.txt -rrequirements/requirements-optionals.txt
doccano__doccano-363
New user signup page question Hi i'm trying to understand the user structure. I see a few posts about only being able to assign users to specific projects through the django admin screen, but my question is about the 'sign up' page you get offered when you click login, is this totally non functional? That is, is the *only* way to make new users of any kind through the django admin page? Thanks, Z
[ { "content": "from django.shortcuts import render\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom .tokens imp...
[ { "content": "from django.shortcuts import render\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom .tokens imp...
diff --git a/app/authentification/tests/test_template.py b/app/authentification/tests/test_template.py index c20539543d..85a909fbf0 100644 --- a/app/authentification/tests/test_template.py +++ b/app/authentification/tests/test_template.py @@ -1,7 +1,7 @@ from django.test import SimpleTestCase, TestCase, RequestFactory, override_settings from django.http import HttpRequest from ..views import SignupView -from app import settings +from django.conf import settings from api.tests.test_config import setenv @override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage') diff --git a/app/authentification/views.py b/app/authentification/views.py index 718e44f3a0..d57df65078 100644 --- a/app/authentification/views.py +++ b/app/authentification/views.py @@ -9,7 +9,7 @@ from django.views.generic import TemplateView from django.shortcuts import redirect -from app import settings +from django.conf import settings class SignupView(TemplateView):
e2nIEE__pandapower-221
pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3 pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3
[ { "content": "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros,...
[ { "content": "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros,...
diff --git a/pandapower/build_gen.py b/pandapower/build_gen.py index cc014f43b..8be4edbcc 100644 --- a/pandapower/build_gen.py +++ b/pandapower/build_gen.py @@ -460,4 +460,4 @@ def _different_values_at_one_bus(buses, values): # have the voltage of the first generator at that bus values_equal = first_values[buses] - return not np.array_equal(values, values_equal) + return not np.allclose(values, values_equal)
pypi__warehouse-3568
Set samesite=lax on session cookies This is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
diff --git a/tests/unit/test_sessions.py b/tests/unit/test_sessions.py index 0baee1c117b5..8bc57b3c27b0 100644 --- a/tests/unit/test_sessions.py +++ b/tests/unit/test_sessions.py @@ -497,7 +497,7 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, ) response = pretend.stub( set_cookie=pretend.call_recorder( - lambda cookie, data, max_age, httponly, secure: None + lambda cookie, data, max_age, httponly, secure, samesite: None ) ) session_factory._process_response(pyramid_request, response) @@ -532,6 +532,7 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, max_age=12 * 60 * 60, httponly=True, secure=False, + samesite=b"lax", ), ] diff --git a/warehouse/sessions.py b/warehouse/sessions.py index a52318f0eb7c..548f760c757a 100644 --- a/warehouse/sessions.py +++ b/warehouse/sessions.py @@ -263,6 +263,7 @@ def _process_response(self, request, response): max_age=self.max_age, httponly=True, secure=request.scheme == "https", + samesite=b"lax" )
microsoft__Qcodes-997
Bug: experiment id not properly attributed when calling the load_experiment_by_name method Steps to reproduce: ```python from qcodes.dataset.measurements import Measurement # Start with a clean data base db_location = qcodes.config["core"]["db_location"] db = DataSet(db_location) exp = new_experiment("test", "test1") exp_loaded = load_experiment_by_name("test", "test1") # The following will work meas = SweepMeasurement(exp=exp_loaded) with meas.run() as datasaver: pass # This time we will have an error with meas.run() as datasaver: pass ``` If the experiment was already there in the database, the first measurement will also fail.
[ { "content": "import json\nimport logging\nfrom time import monotonic\nfrom collections import OrderedDict\nfrom typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,\n MutableMapping, MutableSequence, Optional)\nfrom inspect import signature\nfrom numbers import Number\n\nimport...
[ { "content": "import json\nimport logging\nfrom time import monotonic\nfrom collections import OrderedDict\nfrom typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,\n MutableMapping, MutableSequence, Optional)\nfrom inspect import signature\nfrom numbers import Number\n\nimport...
diff --git a/qcodes/dataset/measurements.py b/qcodes/dataset/measurements.py index e44b2a7b19a..f475a461d79 100644 --- a/qcodes/dataset/measurements.py +++ b/qcodes/dataset/measurements.py @@ -219,7 +219,7 @@ def __enter__(self) -> DataSaver: # next set up the "datasaver" if self.experiment: - eid = self.experiment.id + eid = self.experiment.exp_id else: eid = None diff --git a/qcodes/tests/dataset/test_experiment_container.py b/qcodes/tests/dataset/test_experiment_container.py new file mode 100644 index 00000000000..b096b7f1175 --- /dev/null +++ b/qcodes/tests/dataset/test_experiment_container.py @@ -0,0 +1,41 @@ +import pytest +import tempfile +import os + +import qcodes as qc +from qcodes.dataset.experiment_container import load_experiment_by_name, \ + new_experiment +from qcodes.dataset.sqlite_base import connect, init_db +from qcodes.dataset.measurements import Measurement + + +@pytest.fixture(scope="function") +def empty_temp_db(): + # create a temp database for testing + with tempfile.TemporaryDirectory() as tmpdirname: + qc.config["core"]["db_location"] = os.path.join(tmpdirname, 'temp.db') + qc.config["core"]["db_debug"] = True + # this is somewhat annoying but these module scope variables + # are initialized at import time so they need to be overwritten + qc.dataset.experiment_container.DB = qc.config["core"]["db_location"] + qc.dataset.data_set.DB = qc.config["core"]["db_location"] + qc.dataset.experiment_container.debug_db = qc.config["core"]["db_debug"] + _c = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) + init_db(_c) + _c.close() + yield + + +def test_run_loaded_experiment(empty_temp_db): + """ + Test that we can resume a measurement after loading by name + """ + new_experiment("test", "test1") + exp_loaded = load_experiment_by_name("test", "test1") + + meas = Measurement(exp=exp_loaded) + with meas.run(): + pass + + with meas.run(): + pass
opendatacube__datacube-core-348
Unnecessary dependency on `pathlib` when running in python3 ### Expected behaviour Datacube shouldn't depend on unnecessary packages when running in Python 3. ### Actual behaviour There's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed. This causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them. ### Steps to reproduce the behaviour ``` module load agdc-py3-prod agdc_statistics dra547@raijin4:~ $ python Python 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56) [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import datacube_stats.statistics Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py", line 769, in <module> STATS[entry_point.name] = entry_point.load() File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2404, in load self.require(*args, **kwargs) File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2427, in require items = working_set.resolve(reqs, env, installer, extras=self.extras) File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 870, in resolve raise DistributionNotFound(req, requirers) pkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube >>> ``` ### The Fix Modify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.
[ { "content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interact...
[ { "content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interact...
diff --git a/setup.py b/setup.py index ea8870e48b..52cea8ff41 100755 --- a/setup.py +++ b/setup.py @@ -76,7 +76,7 @@ 'jsonschema', 'netcdf4', 'numpy', - 'pathlib', + 'pathlib;python_version<"3"', 'psycopg2', 'pypeg2', 'python-dateutil',
django__channels-1614
asgiref dependency should be updated; channels 3.0.3 requires min. 3.2.10, which doesn't work for background workers Channels 3.0.3 depends on `asgiref>=3.2.10`, however with that version, background workers will fail with `TypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'` when receiving a message, even if declared with `.as_asgi()`.
[ { "content": "from setuptools import find_packages, setup\nfrom channels import __version__\n\nsetup(\n name='channels',\n version=__version__,\n url='http://github.com/django/channels',\n author='Django Software Foundation',\n author_email='foundation@djangoproject.com',\n description=\"Bring...
[ { "content": "from setuptools import find_packages, setup\nfrom channels import __version__\n\nsetup(\n name='channels',\n version=__version__,\n url='http://github.com/django/channels',\n author='Django Software Foundation',\n author_email='foundation@djangoproject.com',\n description=\"Bring...
diff --git a/setup.py b/setup.py index c9bde0c7c..89f221fd2 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ python_requires='>=3.6', install_requires=[ 'Django>=2.2', - 'asgiref>=3.2.10,<4', + 'asgiref>=3.3.1,<4', 'daphne>=3.0,<4', ], extras_require={
angr__angr-1303
Cachetools broke their API There's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate. My guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr...
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr...
diff --git a/setup.py b/setup.py index cc8a67d3758..1e2e8a1bba6 100644 --- a/setup.py +++ b/setup.py @@ -122,7 +122,7 @@ def run(self, *args): install_requires=[ 'ana', 'sortedcontainers', - 'cachetools<3', + 'cachetools', 'capstone>=3.0.5rc2', 'cooldict', 'dpkt',
Parsl__parsl-1046
ugly status message in local provider This log message should probably unwrap the dict_values and [list] - in commit b9ecc1342e1b6ce795d942c4b9df4c841f00193d ``` 2019-06-11 08:40:29.773 parsl.providers.local.local:92 [DEBUG] Checking status of: dict_values([50510]) ```
[ { "content": "\"\"\"HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution\n\"\"\"\n\nfrom concurrent.futures import Future\nimport typeguard\nimport logging\nimport threading\nimport queue\nimport pickle\nfrom multiprocessing import Process, Queue\nfrom typing imp...
[ { "content": "\"\"\"HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution\n\"\"\"\n\nfrom concurrent.futures import Future\nimport typeguard\nimport logging\nimport threading\nimport queue\nimport pickle\nfrom multiprocessing import Process, Queue\nfrom typing imp...
diff --git a/parsl/executors/high_throughput/executor.py b/parsl/executors/high_throughput/executor.py index 63706d8bda..cabd3ce501 100644 --- a/parsl/executors/high_throughput/executor.py +++ b/parsl/executors/high_throughput/executor.py @@ -597,7 +597,7 @@ def status(self): status = [] if self.provider: - status = self.provider.status(self.blocks.values()) + status = self.provider.status(list(self.blocks.values())) return status
bridgecrewio__checkov-1905
Bump boto3 to the latest version **Describe the bug** I am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.* Could you please bump boto3 to the latest version?
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
diff --git a/Pipfile b/Pipfile index 8de488fb19..9125af1a31 100644 --- a/Pipfile +++ b/Pipfile @@ -16,6 +16,7 @@ GitPython = "*" bandit = "*" urllib3-mock = "*" jsonschema = "*" +importlib-resources = ">=1.3" atomicwrites = "*" responses = "*" types-requests = "*" @@ -32,7 +33,7 @@ termcolor="*" junit-xml = ">=1.9" dpath = ">=1.5.0,<2" pyyaml = ">=5.4.1" -boto3 = "==1.17.*" +boto3 = ">=1.17" GitPython = "*" jmespath = "*" tqdm = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 4fa7da71cd..9ed91c7c18 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "7a65ce09459bcf33efcd37b8e228736309abf7eeb79d520181b3b2ca56eccc97" + "sha256": "701841bd573e6846533946dd963bb9ca8a01630961d75a98b5f1fc9b08065547" }, "pipfile-spec": 6, "requires": { @@ -34,19 +34,19 @@ }, "boto3": { "hashes": [ - "sha256:08b6dacbe7ebe57ae8acfb7106b2728d946ae1e0c3da270caee1deb79ccbd8af", - "sha256:8716465313c50ad9e5c2ac1767642ca0ddf7d1729c3d5c884d82880c1a15a310" + "sha256:57ee38d02772f44a52d2d836cee61d039d405f6eaefc68f92ae0d80e0260c097", + "sha256:79c982c5930f989292ca849b0caaa1ffeb9eb9d27c32992c3b2f6736b3b14ad2" ], "index": "pypi", - "version": "==1.17.112" + "version": "==1.19.10" }, "botocore": { "hashes": [ - "sha256:6d51de0981a3ef19da9e6a3c73b5ab427e3c0c8b92200ebd38d087299683dd2b", - "sha256:d0b9b70b6eb5b65bb7162da2aaf04b6b086b15cc7ea322ddc3ef2f5e07944dcf" + "sha256:543cd69e9b248be942d181a097a4715312939ec998602a7b4b07e9fda36d30e9", + "sha256:dffa1e7e7e3a8da73bbdead3aeff7d52fd5a159a1a93b2896ac67b2aa79a461c" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==1.20.112" + "markers": "python_version >= '3.6'", + "version": "==1.22.10" }, "cached-property": { "hashes": [ @@ -429,10 +429,11 @@ }, "s3transfer": { "hashes": [ - "sha256:9b3752887a2880690ce628bc263d6d13a3864083aeacff4890c1c9839a5eb0bc", - "sha256:cb022f4b16551edebbb31a377d3f09600dbada7363d8c5db7976e7f47732e1b2" + "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c", + "sha256:9c1dc369814391a6bda20ebbf4b70a0f34630592c9aa520856bf384916af2803" ], - "version": "==0.4.2" + "markers": "python_version >= '3.6'", + "version": "==0.5.0" }, "schema": { "hashes": [ @@ -467,11 +468,11 @@ }, "soupsieve": { "hashes": [ - "sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc", - "sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b" + "sha256:617ffc4d0dfd39c66f4d1413a6e165663a34eca86be9b54f97b91756300ff6df", + "sha256:e4860f889dfa88774c07da0b276b70c073b6470fa1a4a8350800bb7bce3dcc76" ], "markers": "python_version >= '3.6'", - "version": "==2.2.1" + "version": "==2.3" }, "tabulate": { "hashes": [ @@ -687,13 +688,13 @@ "markers": "python_version >= '3'", "version": "==3.3" }, - "importlib-metadata": { + "importlib-resources": { "hashes": [ - "sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15", - "sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1" + "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45", + "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b" ], "index": "pypi", - "version": "==4.8.1" + "version": "==5.4.0" }, "iniconfig": { "hashes": [ @@ -704,11 +705,11 @@ }, "jsonschema": { "hashes": [ - "sha256:166870c8ab27bd712a8627e0598de4685bd8d199c4d7bd7cacc3d941ba0c6ca0", - "sha256:5c1a282ee6b74235057421fd0f766ac5f2972f77440927f6471c9e8493632fac" + "sha256:2b563117f3659a7f433dffe1371c88f52115b79133493f376f15724b9caa7efa", + "sha256:e2d3601321ac74d38214e2853300ae740cd07e53d919a15862b8c71f9d840574" ], "index": "pypi", - "version": "==4.1.2" + "version": "==4.2.0" }, "packaging": { "hashes": [ @@ -720,11 +721,11 @@ }, "pbr": { "hashes": [ - "sha256:42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd", - "sha256:c68c661ac5cc81058ac94247278eeda6d2e6aecb3e227b0387c30d277e7ef8d4" + "sha256:4651ca1445e80f2781827305de3d76b3ce53195f2227762684eb08f17bc473b7", + "sha256:60002958e459b195e8dbe61bf22bcf344eedf1b4e03a321a5414feb15566100c" ], "markers": "python_version >= '2.6'", - "version": "==5.6.0" + "version": "==5.7.0" }, "pluggy": { "hashes": [ @@ -736,11 +737,11 @@ }, "py": { "hashes": [ - "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", - "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" + "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719", + "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.10.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==1.11.0" }, "pyparsing": { "hashes": [ diff --git a/setup.py b/setup.py index a6a91c4541..6eceae4022 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ "junit-xml>=1.9", "dpath>=1.5.0,<2", "pyyaml>=5.4.1", - "boto3==1.17.*", + "boto3>=1.17", "GitPython", "jmespath", "tqdm",
internetarchive__openlibrary-4557
/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template) Patron is reporting the following error: `/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template)` ### Evidence / Screenshot (if possible) ### Relevant url? https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ... https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan 2. Do ... view error. <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: * Expected: ### Details - **Logged in (Y/N)?** - **Browser type/version?** - **Operating system?** - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug -->
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\...
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\...
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py index de212412998..b4ec1476453 100644 --- a/openlibrary/core/helpers.py +++ b/openlibrary/core/helpers.py @@ -101,7 +101,7 @@ def get_nofollow(name, event): def json_encode(d, **kw): """Same as json.dumps. """ - return json.dumps(d, **kw) + return json.dumps(d or {}, **kw) def safesort(iterable, key=None, reverse=False):
elastic__apm-agent-python-1558
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 369: invalid start byte **Describe the bug**: Sending PUT request to FastAPI with binary file encoded, its returns a error. **To Reproduce** 1. Configure FastAPI with elasticapm.contrib.starlette 2. Send a PUT request with binary file **Environment (please complete the following information)** - OS: Linux - Python version: 3.8 - Framework and version: fastapi 0.61.2 - APM Server version: 8 - Agent version: 8 The same problema was resolved in the main code : [#344](https://github.com/elastic/apm-agent-python/issues/334)
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain...
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain...
diff --git a/elasticapm/contrib/starlette/utils.py b/elasticapm/contrib/starlette/utils.py index f06c19055..ec2eaef5f 100644 --- a/elasticapm/contrib/starlette/utils.py +++ b/elasticapm/contrib/starlette/utils.py @@ -129,7 +129,7 @@ async def get_body(request: Request) -> str: request._stream_consumed = False - return body.decode("utf-8") + return body.decode("utf-8", errors="replace") async def query_params_to_dict(query_params: str) -> dict:
benoitc__gunicorn-806
Fix utils.is_fileobj for streaming responses from requests Turns out that the utils.is_fileobj breaks for streamed responses from the requests library due to how we check for a fileno() function and the exception raised by urllib3. https://github.com/benoitc/gunicorn/blob/19.0/gunicorn/util.py#L511-L521 https://github.com/kennethreitz/requests/blob/v2.3.0/requests/packages/urllib3/response.py#L294-L301 Obvious fix is to add the IOError type to the exception list. PR to follow shortly.
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n\nimport email.utils\nimport fcntl\nimport io\nimport os\nimport pkg_resources\nimport random\nimport resource\nimport socket\nimport sys\nimport textwrap\nimport ti...
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n\nimport email.utils\nimport fcntl\nimport io\nimport os\nimport pkg_resources\nimport random\nimport resource\nimport socket\nimport sys\nimport textwrap\nimport ti...
diff --git a/gunicorn/util.py b/gunicorn/util.py index 9ef79a3a1..b7d3a67a9 100644 --- a/gunicorn/util.py +++ b/gunicorn/util.py @@ -515,7 +515,7 @@ def is_fileobject(obj): # check BytesIO case and maybe others try: obj.fileno() - except io.UnsupportedOperation: + except (IOError, io.UnsupportedOperation): return False return True
ivy-llc__ivy-15926
log
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_array...
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_array...
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py index 66f96ec019fa8..791d4468a085f 100644 --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -46,3 +46,9 @@ def acosh(x, name=None): @to_ivy_arrays_and_back def asin(x, name=None): return ivy.asin(x) + + +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def log(x, name=None): + return ivy.log(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py index 044f4d4ebea35..ff61aebfb7526 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py @@ -190,3 +190,29 @@ def test_paddle_asin( on_device=on_device, x=x[0], ) + + +# log +@handle_frontend_test( + fn_tree="paddle.log", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), +) +def test_paddle_log( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + )
internetarchive__openlibrary-4591
Adding to lists broken Adding an item to a list no longer works as of 12-02-2021. ### Evidence / Screenshot (if possible) ### Relevant url? <!-- `https://openlibrary.org/...` --> ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ...an edition, etc. 2. Do ...add item to list. <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: List link loads list page. * Expected: Item should be added to list. ### Details - **Logged in (Y/N)?** Y - **Browser type/version?** Chrome Version 88.0.4324.150 (Official Build) (x86_64) - **Operating system?** Mac Big Sur - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug --> @cclauss
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\...
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\...
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py index b4ec1476453..de212412998 100644 --- a/openlibrary/core/helpers.py +++ b/openlibrary/core/helpers.py @@ -101,7 +101,7 @@ def get_nofollow(name, event): def json_encode(d, **kw): """Same as json.dumps. """ - return json.dumps(d or {}, **kw) + return json.dumps(d, **kw) def safesort(iterable, key=None, reverse=False):
numpy__numpy-4666
`column_stack()`: error in documentation in `numy/lib/shape_base.py`, around line 277: ``` This function is equivalent to ``np.vstack(tup).T``. ``` If I'm not mistaken, this was true for the old behaviour. Currently, inputs with >= 2 dimensions are not transposed (which is good!) and therefore it is not equivalent anymore. Here is an example, the commented line gives an error: ``` python a = array([[1, 2], [3, 4], [5, 6]]) b = array([11, 12, 13]) column_stack((a, b)) #vstack((a, b)).T ```
[ { "content": "from __future__ import division, absolute_import, print_function\n\n__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',\n 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap']\n\nimport warn...
[ { "content": "from __future__ import division, absolute_import, print_function\n\n__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',\n 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap']\n\nimport warn...
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 38b928d57605..43e98ae3d338 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -287,10 +287,6 @@ def column_stack(tup): -------- hstack, vstack, concatenate - Notes - ----- - This function is equivalent to ``np.vstack(tup).T``. - Examples -------- >>> a = np.array((1,2,3))
certbot__certbot-5941
V2 order ready status not recognized, causes deserialization error ## I installed Certbot with (certbot-auto, OS package manager, pip, etc): Cloned from git: ``` $> git rev-parse HEAD 6b29d159a2f221c3437770bdb43924ee6f953c4b ``` ## I ran this command and it produced this output: `certbot_test --server http://localhost:4001/directory certonly --standalone -d one.wtf --preferred-challenges http-01 ` Note: This is against a Boulder instance configured with the `OrderReadyStatus` feature flag enabled (See https://github.com/letsencrypt/boulder/pull/3644). ## Certbot's behavior differed from what I expected because: Certbot POSTed `newOrder`. In response an order object with `"status": "ready"` was returned. This caused a `DeserializationError` indicating "Could not decode 'status' (u'ready'): Deserialization error: Status not recognized". The "ready" status was added to the ACME specification in draft-10 before Let's Encrypt launched its production ACMEv2 endpoint. Boulder does not use this new status in staging/production yet but we will in the near future (~next month). Draft-10 says: > Once all of the authorizations listed in the order object are in the "valid" state, the order transitions to the "ready" state. This state is used to indicate that an order is ready for finalization. Previously the order would remain in "processing" when all of its authorizations were in the "valid" state. ## Here is a Certbot log showing the issue (if available): ``` http://localhost:4001 "POST /acme/new-order HTTP/1.1" 201 323 Received response: HTTP 201 Boulder-Requester: 2141 Cache-Control: public, max-age=0, no-cache Content-Type: application/json Location: http://localhost:4001/acme/order/2141/932 Replay-Nonce: Aeop9czyFGXSMBH0TfD4MwI5klCloEnml8AFsRzBPDU Date: Thu, 12 Apr 2018 17:06:51 GMT Content-Length: 323 { "status": "ready", "expires": "2018-04-19T17:06:51.98458014Z", "identifiers": [ { "type": "dns", "value": "one.wtf" } ], "authorizations": [ "http://localhost:4001/acme/authz/qklYRnxxHtf8PAaR8IpgK2ex7uPqWYzWgPEQrPiqEKc" ], "finalize": "http://localhost:4001/acme/finalize/2141/932" } Storing nonce: Aeop9czyFGXSMBH0TfD4MwI5klCloEnml8AFsRzBPDU Exiting abnormally: Traceback (most recent call last): File "/home/daniel/Code/certbot/venv/bin/certbot", line 11, in <module> load_entry_point('certbot', 'console_scripts', 'certbot')() File "/home/daniel/Code/certbot/certbot/main.py", line 1266, in main return config.func(config, plugins) File "/home/daniel/Code/certbot/certbot/main.py", line 1157, in certonly lineage = _get_and_save_cert(le_client, config, domains, certname, lineage) File "/home/daniel/Code/certbot/certbot/main.py", line 113, in _get_and_save_cert renewal.renew_cert(config, domains, le_client, lineage) File "/home/daniel/Code/certbot/certbot/renewal.py", line 297, in renew_cert new_cert, new_chain, new_key, _ = le_client.obtain_certificate(domains) File "/home/daniel/Code/certbot/certbot/client.py", line 294, in obtain_certificate orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names) File "/home/daniel/Code/certbot/certbot/client.py", line 326, in _get_order_and_authorizations orderr = self.acme.new_order(csr_pem) File "/home/daniel/Code/certbot/acme/acme/client.py", line 779, in new_order return self.client.new_order(csr_pem) File "/home/daniel/Code/certbot/acme/acme/client.py", line 606, in new_order body = messages.Order.from_json(response.json()) File "/home/daniel/Code/certbot/venv/local/lib/python2.7/site-packages/josepy/json_util.py", line 289, in from_json return cls(**cls.fields_from_json(jobj)) File "/home/daniel/Code/certbot/venv/local/lib/python2.7/site-packages/josepy/json_util.py", line 284, in fields_from_json slot, value, error)) DeserializationError: Deserialization error: Could not decode 'status' (u'ready'): Deserialization error: Status not recognized Please see the logfiles in /tmp/leitSN33/logs for more details. ```
[ { "content": "\"\"\"ACME protocol messages.\"\"\"\nimport collections\nimport six\n\nimport josepy as jose\n\nfrom acme import challenges\nfrom acme import errors\nfrom acme import fields\nfrom acme import util\n\nOLD_ERROR_PREFIX = \"urn:acme:error:\"\nERROR_PREFIX = \"urn:ietf:params:acme:error:\"\n\nERROR_CO...
[ { "content": "\"\"\"ACME protocol messages.\"\"\"\nimport collections\nimport six\n\nimport josepy as jose\n\nfrom acme import challenges\nfrom acme import errors\nfrom acme import fields\nfrom acme import util\n\nOLD_ERROR_PREFIX = \"urn:acme:error:\"\nERROR_PREFIX = \"urn:ietf:params:acme:error:\"\n\nERROR_CO...
diff --git a/acme/acme/messages.py b/acme/acme/messages.py index a69b3bbc4d0..03dbc325579 100644 --- a/acme/acme/messages.py +++ b/acme/acme/messages.py @@ -145,6 +145,7 @@ class Status(_Constant): STATUS_VALID = Status('valid') STATUS_INVALID = Status('invalid') STATUS_REVOKED = Status('revoked') +STATUS_READY = Status('ready') class IdentifierType(_Constant):
python-pillow__Pillow-6834
Endianness is inconsistent Image class methods: | Method | Parameter | Endianness | |---------------|----------------|-------------| | blend | alpha | native | | new | color | native | | frombytes | data | mode | | frombuffer | data | mode | | getcolors | | native | | getdata all | | mode | | getdata band | | native | | getextrema | | native | | getpalette | | mode | | getpixel | | native | | point | | unsupported | | putdata | data | native | | putpalette | data | mode | | putpixel | value | native | | remap_palette | source_palette | mode | | tobytes | | mode | Methods that use one of the above methods: `eval` calls `point` `fromarray` calls `frombuffer` `rotate` calls `transform` `transform` calls `new` Related: #2228
[ { "content": "#\n# The Python Imaging Library\n# Pillow fork\n#\n# Python implementation of the PixelAccess Object\n#\n# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1995-2009 by Fredrik Lundh.\n# Copyright (c) 2013 Eric Soroos\n#\n# See the README file for information on us...
[ { "content": "#\n# The Python Imaging Library\n# Pillow fork\n#\n# Python implementation of the PixelAccess Object\n#\n# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1995-2009 by Fredrik Lundh.\n# Copyright (c) 2013 Eric Soroos\n#\n# See the README file for information on us...
diff --git a/Tests/test_image_access.py b/Tests/test_image_access.py index 4079d935800..027af5d56fa 100644 --- a/Tests/test_image_access.py +++ b/Tests/test_image_access.py @@ -275,15 +275,10 @@ def test_get_vs_c(self): # self._test_get_access(hopper('PA')) # PA -- how do I make a PA image? self._test_get_access(hopper("F")) - im = Image.new("I;16", (10, 10), 40000) - self._test_get_access(im) - im = Image.new("I;16L", (10, 10), 40000) - self._test_get_access(im) - im = Image.new("I;16B", (10, 10), 40000) - self._test_get_access(im) - - im = Image.new("I", (10, 10), 40000) - self._test_get_access(im) + for mode in ("I;16", "I;16L", "I;16B", "I;16N", "I"): + im = Image.new(mode, (10, 10), 40000) + self._test_get_access(im) + # These don't actually appear to be modes that I can actually make, # as unpack sets them directly into the I mode. # im = Image.new('I;32L', (10, 10), -2**10) @@ -322,15 +317,10 @@ def test_set_vs_c(self): # self._test_set_access(i, (128, 128)) #PA -- undone how to make self._test_set_access(hopper("F"), 1024.0) - im = Image.new("I;16", (10, 10), 40000) - self._test_set_access(im, 45000) - im = Image.new("I;16L", (10, 10), 40000) - self._test_set_access(im, 45000) - im = Image.new("I;16B", (10, 10), 40000) - self._test_set_access(im, 45000) + for mode in ("I;16", "I;16L", "I;16B", "I;16N", "I"): + im = Image.new(mode, (10, 10), 40000) + self._test_set_access(im, 45000) - im = Image.new("I", (10, 10), 40000) - self._test_set_access(im, 45000) # im = Image.new('I;32L', (10, 10), -(2**10)) # self._test_set_access(im, -(2**13)+1) # im = Image.new('I;32B', (10, 10), 2**10) diff --git a/Tests/test_lib_pack.py b/Tests/test_lib_pack.py index 979806cae99..de3e7d1569b 100644 --- a/Tests/test_lib_pack.py +++ b/Tests/test_lib_pack.py @@ -207,6 +207,9 @@ def test_I(self): 0x01000083, ) + def test_I16(self): + self.assert_pack("I;16N", "I;16N", 2, 0x0201, 0x0403, 0x0605) + def test_F_float(self): self.assert_pack("F", "F;32F", 4, 1.539989614439558e-36, 4.063216068939723e-34) @@ -761,10 +764,12 @@ def test_I16(self): self.assert_unpack("I;16", "I;16N", 2, 0x0201, 0x0403, 0x0605) self.assert_unpack("I;16B", "I;16N", 2, 0x0201, 0x0403, 0x0605) self.assert_unpack("I;16L", "I;16N", 2, 0x0201, 0x0403, 0x0605) + self.assert_unpack("I;16N", "I;16N", 2, 0x0201, 0x0403, 0x0605) else: self.assert_unpack("I;16", "I;16N", 2, 0x0102, 0x0304, 0x0506) self.assert_unpack("I;16B", "I;16N", 2, 0x0102, 0x0304, 0x0506) self.assert_unpack("I;16L", "I;16N", 2, 0x0102, 0x0304, 0x0506) + self.assert_unpack("I;16N", "I;16N", 2, 0x0102, 0x0304, 0x0506) def test_CMYK16(self): self.assert_unpack("CMYK", "CMYK;16L", 8, (2, 4, 6, 8), (10, 12, 14, 16)) diff --git a/Tests/test_mode_i16.py b/Tests/test_mode_i16.py index dcdee3d416d..1786dba3847 100644 --- a/Tests/test_mode_i16.py +++ b/Tests/test_mode_i16.py @@ -88,10 +88,7 @@ def tobytes(mode): def test_convert(): im = original.copy() - verify(im.convert("I;16")) - verify(im.convert("I;16").convert("L")) - verify(im.convert("I;16").convert("I")) - - verify(im.convert("I;16B")) - verify(im.convert("I;16B").convert("L")) - verify(im.convert("I;16B").convert("I")) + for mode in ("I;16", "I;16B", "I;16N"): + verify(im.convert(mode)) + verify(im.convert(mode).convert("L")) + verify(im.convert(mode).convert("I")) diff --git a/docs/releasenotes/9.5.0.rst b/docs/releasenotes/9.5.0.rst index bd6e586932a..13c99313a22 100644 --- a/docs/releasenotes/9.5.0.rst +++ b/docs/releasenotes/9.5.0.rst @@ -63,3 +63,10 @@ Added support for saving PDFs in RGBA mode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using the JPXDecode filter, PDFs can now be saved in RGBA mode. + + +Improved I;16N support +^^^^^^^^^^^^^^^^^^^^^^ + +Support has been added for I;16N access, packing and unpacking. Conversion to +and from L mode has also been added. diff --git a/src/PIL/PyAccess.py b/src/PIL/PyAccess.py index e9cb34ceda1..39747b4f311 100644 --- a/src/PIL/PyAccess.py +++ b/src/PIL/PyAccess.py @@ -320,6 +320,7 @@ def set_pixel(self, x, y, color): "1": _PyAccess8, "L": _PyAccess8, "P": _PyAccess8, + "I;16N": _PyAccessI16_N, "LA": _PyAccess32_2, "La": _PyAccess32_2, "PA": _PyAccess32_2, diff --git a/src/libImaging/Access.c b/src/libImaging/Access.c index 83860c38a7e..f00939da0b3 100644 --- a/src/libImaging/Access.c +++ b/src/libImaging/Access.c @@ -13,7 +13,7 @@ /* use make_hash.py from the pillow-scripts repository to calculate these values */ #define ACCESS_TABLE_SIZE 27 -#define ACCESS_TABLE_HASH 3078 +#define ACCESS_TABLE_HASH 33051 static struct ImagingAccessInstance access_table[ACCESS_TABLE_SIZE]; @@ -92,6 +92,12 @@ get_pixel_16B(Imaging im, int x, int y, void *color) { #endif } +static void +get_pixel_16(Imaging im, int x, int y, void *color) { + UINT8 *in = (UINT8 *)&im->image[y][x + x]; + memcpy(color, in, sizeof(UINT16)); +} + static void get_pixel_32(Imaging im, int x, int y, void *color) { memcpy(color, &im->image32[y][x], sizeof(INT32)); @@ -186,6 +192,7 @@ ImagingAccessInit() { ADD("I;16", get_pixel_16L, put_pixel_16L); ADD("I;16L", get_pixel_16L, put_pixel_16L); ADD("I;16B", get_pixel_16B, put_pixel_16B); + ADD("I;16N", get_pixel_16, put_pixel_16L); ADD("I;32L", get_pixel_32L, put_pixel_32L); ADD("I;32B", get_pixel_32B, put_pixel_32B); ADD("F", get_pixel_32, put_pixel_32); diff --git a/src/libImaging/Convert.c b/src/libImaging/Convert.c index b03bd02af2b..7fe24a63939 100644 --- a/src/libImaging/Convert.c +++ b/src/libImaging/Convert.c @@ -990,6 +990,13 @@ static struct { {"I;16L", "L", I16L_L}, {"L", "I;16B", L_I16B}, {"I;16B", "L", I16B_L}, +#ifdef WORDS_BIGENDIAN + {"L", "I;16N", L_I16B}, + {"I;16N", "L", I16B_L}, +#else + {"L", "I;16N", L_I16L}, + {"I;16N", "L", I16L_L}, +#endif {"I;16", "F", I16L_F}, {"I;16L", "F", I16L_F}, diff --git a/src/libImaging/Pack.c b/src/libImaging/Pack.c index 01760e742be..14c8f1461aa 100644 --- a/src/libImaging/Pack.c +++ b/src/libImaging/Pack.c @@ -664,6 +664,7 @@ static struct { #endif {"I;16B", "I;16B", 16, copy2}, {"I;16L", "I;16L", 16, copy2}, + {"I;16N", "I;16N", 16, copy2}, {"I;16", "I;16N", 16, packI16N_I16}, // LibTiff native->image endian. {"I;16L", "I;16N", 16, packI16N_I16}, {"I;16B", "I;16N", 16, packI16N_I16B}, diff --git a/src/libImaging/Unpack.c b/src/libImaging/Unpack.c index e426ed74fce..7eeadf944ea 100644 --- a/src/libImaging/Unpack.c +++ b/src/libImaging/Unpack.c @@ -1762,6 +1762,7 @@ static struct { {"I;16", "I;16", 16, copy2}, {"I;16B", "I;16B", 16, copy2}, {"I;16L", "I;16L", 16, copy2}, + {"I;16N", "I;16N", 16, copy2}, {"I;16", "I;16N", 16, unpackI16N_I16}, // LibTiff native->image endian. {"I;16L", "I;16N", 16, unpackI16N_I16}, // LibTiff native->image endian.
craiga__will-of-the-prophets-26
Clean up login form
[ { "content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangopro...
[ { "content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangopro...
diff --git a/Pipfile b/Pipfile index 9480b879..06aa4b45 100644 --- a/Pipfile +++ b/Pipfile @@ -11,6 +11,7 @@ pillow = "*" "psycopg2-binary" = "*" gunicorn = "*" raven = "*" +django-widget-tweaks = "*" [dev-packages] pycodestyle = "*" diff --git a/Pipfile.lock b/Pipfile.lock index e478903a..c8554e78 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "8657ca5d7cc006bba82b0f66005a8da9a167bb5cbafa1f6e9eeab9b88dd15868" + "sha256": "5f583f1faeb177c9474ddbc3be05cf923e6b0c86fb780d6ade8989509c18061c" }, "pipfile-spec": 6, "requires": { @@ -46,6 +46,14 @@ "index": "pypi", "version": "==0.7" }, + "django-widget-tweaks": { + "hashes": [ + "sha256:a31c8a2b88af98dba6471db4722a416d1c643c87efecf9a7f17f983a2a553632", + "sha256:f9961162c8ed272162e22e5877d29c7780476970441dce605118ef66da685e71" + ], + "index": "pypi", + "version": "==1.4.2" + }, "gunicorn": { "hashes": [ "sha256:7ef2b828b335ed58e3b64ffa84caceb0a7dd7c5ca12f217241350dec36a1d5dc", diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py index 7421ded6..4e000b87 100644 --- a/will_of_the_prophets/settings/__init__.py +++ b/will_of_the_prophets/settings/__init__.py @@ -42,6 +42,7 @@ 'django.contrib.messages', 'django.contrib.staticfiles', 'sass_processor', + 'widget_tweaks', 'bootstrap', 'will_of_the_prophets', ] diff --git a/will_of_the_prophets/templates/registration/login.html b/will_of_the_prophets/templates/registration/login.html index c8f2ff5c..235d8138 100644 --- a/will_of_the_prophets/templates/registration/login.html +++ b/will_of_the_prophets/templates/registration/login.html @@ -1,38 +1,58 @@ {% extends 'base.html' %} +{% load widget_tweaks %} {% block content %} - {% if form.errors %} - {% for field in form %} - {% for error in field.errors %} +<div class="row pt-3"> + <div class="col-12"> + <p class="h3 text-orange">The Greatest Generation Presents</p> + <h1 class="text-blue">Game of Buttholes:<br>The Will of the Prophets</h1> + + {% if form.errors %} + {% for field in form %} + {% for error in field.errors %} + <div class="alert alert-danger"> + <strong>{{ error|escape }}</strong> + </div> + {% endfor %} + {% endfor %} + {% for error in form.non_field_errors %} <div class="alert alert-danger"> <strong>{{ error|escape }}</strong> </div> {% endfor %} - {% endfor %} - {% for error in form.non_field_errors %} + {% endif %} + + {% if next and user.is_authenticated %} <div class="alert alert-danger"> - <strong>{{ error|escape }}</strong> + <strong>Your account doesn't have access to this page. To proceed, + please login with an account that has access.</strong> </div> - {% endfor %} - {% endif %} - - {% if next %} - {% if user.is_authenticated %} - <p>Your account doesn't have access to this page. To proceed, - please login with an account that has access.</p> - {% else %} - <p>Please login to see this page.</p> {% endif %} - {% endif %} - <form method="post" action="{% url 'login' %}"> - {% csrf_token %} - <div>{{ form.username.label_tag }} {{ form.username }}</div> - <div>{{ form.password.label_tag }} {{ form.password }}</div> - <input type="submit" value="login" /> - <input type="hidden" name="next" value="{{ next }}" /> - </form> + <form method="post" action="{% url 'login' %}"> + {% csrf_token %} + <input type="hidden" name="next" value="{{ next }}" /> + <div class="form-row"> + <div class="form-group col-sm-6"> + {{ form.username.label_tag }} + {{ form.username|add_class:'form-control'|attr:'placeholder:Username' }} + </div> + <div class="form-group col-sm-6"> + {{ form.password.label_tag }} + {{ form.password|add_class:'form-control'|attr:'placeholder:Username' }} + </div> + </div> + <div class="form-row"> + <div class="form-group col-12"> + <button type="submit" class="btn btn-primary">Log In</button> + </div> + </div> + </form> + + <p><a href="{% url 'password_reset' %}">Lost password?</a></p> + + </div> +</div> - <p><a href="{% url 'password_reset' %}">Lost password?</a></p> {% endblock %}
django-wiki__django-wiki-400
Django 1.7 migrations are incomplete, fail after makemigrations My project uses Django 1.7.5 and Python 3.4. I installed `django-wiki` from the current `master` branch, added the necessary settings, and ran `manage.py migrate`, and everything worked. But my project's model definitions are in constant flux right now, so I re-create my initial migrations fairly often. The most recent time I did this, it generated a second migration for `django-wiki`: ``` $ manage.py makemigrations Migrations for 'wiki': 0002_auto_20150308_0558.py: - Remove field article from articleplugin - Remove field articleplugin_ptr from reusableplugin - Remove field articles from reusableplugin - Delete model ReusablePlugin - Remove field articleplugin_ptr from revisionplugin - Remove field current_revision from revisionplugin - Remove field plugin from revisionpluginrevision - Delete model RevisionPlugin - Remove field previous_revision from revisionpluginrevision - Remove field user from revisionpluginrevision - Delete model RevisionPluginRevision - Remove field article_revision from simpleplugin - Remove field articleplugin_ptr from simpleplugin - Delete model ArticlePlugin - Delete model SimplePlugin ``` And this migration failed to run, with the following error: ``` Running migrations: Applying wiki.0002_auto_20150308_0558...Traceback (most recent call last): ... django.db.migrations.state.InvalidBasesError: Cannot resolve bases for [<ModelState: 'wiki.SimplePlugin'>] This can happen if you are inheriting models from an app with migrations (e.g. contrib.auth) in an app with no migrations; see https://docs.djangoproject.com/en/1.7/topics/migrations/#dependencies for more ``` Are those models really intended to be concrete? Or should they be abstract mixins instead?
[ { "content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\n\nfrom django import VERSION\nfrom django.conf import settings as django_settings\nfrom django.core.exceptions import ImproperlyConfigured\nimport warnings\nfrom six import string_types\n\n# T...
[ { "content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\n\nfrom django import VERSION\nfrom django.conf import settings as django_settings\nfrom django.core.exceptions import ImproperlyConfigured\nimport warnings\nfrom six import string_types\n\n# T...
diff --git a/wiki/models/__init__.py b/wiki/models/__init__.py index 266bef60b..e8c307490 100644 --- a/wiki/models/__init__.py +++ b/wiki/models/__init__.py @@ -10,6 +10,7 @@ # TODO: Don't use wildcards from .article import * +from .pluginbase import * from .urlpath import * # TODO: Should the below stuff be executed a more logical place?
scikit-hep__pyhf-336
bumpversion missing from setup.py[develop] # Description As titled, `bumpversion` is not in list of develop dependencies. # Expected Behavior Installing `pyhf` installs `bumpversion`. # Actual Behavior It does not install `bumpversion`. # Steps to Reproduce `pip install pyhf[develop]` # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptoo...
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptoo...
diff --git a/setup.py b/setup.py index 2b0ae3e678..c26875d832 100644 --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ 'uproot>=3.0.0', 'papermill', 'graphviz', + 'bumpversion', 'sphinx', 'sphinxcontrib-bibtex', 'sphinxcontrib-napoleon',
lutris__lutris-2472
Don't show Steam Linux Runtime when importing games Link to the tool on steamdb: https://steamdb.info/app/1070560/
[ { "content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_ga...
[ { "content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_ga...
diff --git a/lutris/services/steam.py b/lutris/services/steam.py index 53aba8daf8..2e672a4cd6 100644 --- a/lutris/services/steam.py +++ b/lutris/services/steam.py @@ -20,6 +20,7 @@ class SteamGame(ServiceGame): installer_slug = "steam" excluded_appids = [ "228980", # Steamworks Common Redistributables + "1070560", # Steam Linux Runtime ] @classmethod
optuna__optuna-1088
[RFC] CI for examples/visualization It'll be better if we can validate that all the visualization examples run without failures with CI. I guess https://github.com/computationalmodelling/nbval/ might be useful.
[ { "content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optu...
[ { "content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optu...
diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 389a100b8e..faecf271c6 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -53,6 +53,15 @@ jobs: done env: OMP_NUM_THREADS: 1 + - name: Run examples + run: | + for file in `find examples -name '*.ipynb'` + do + echo $file + pytest --nbval-lax $file > /dev/null + done + env: + OMP_NUM_THREADS: 1 - name: Run multi-node examples run: | STORAGE_URL=sqlite:///example.db diff --git a/examples/visualization/plot_study.ipynb b/examples/visualization/plot_study.ipynb index e60829cf3a..07bdc6f71d 100644 --- a/examples/visualization/plot_study.ipynb +++ b/examples/visualization/plot_study.ipynb @@ -136,7 +136,7 @@ "def objective(trial):\n", " \n", " clf = MLPClassifier(\n", - " hidden_layer_sizes=tuple([trial.suggest_int(f'n_units_l{i}', 32, 64) for i in range(3)]),\n", + " hidden_layer_sizes=tuple([trial.suggest_int('n_units_l{}'.format(i), 32, 64) for i in range(3)]),\n", " learning_rate_init=trial.suggest_loguniform('lr_init', 1e-5, 1e-1),\n", " )\n", "\n", diff --git a/setup.py b/setup.py index aacb468d71..7f4fdcf78f 100644 --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ def get_extras_require() -> Dict[str, List[str]]: "mlflow", "mpi4py", "mxnet", + "nbval", "pytorch-ignite", "scikit-image", "scikit-learn",
learningequality__kolibri-8048
Context not transferred from Crowdin <!-- Instructions: * Fill out the sections below, replace …'s with information about your issue * Use the 'preview' function above this text box to verify formatting before submitting --> ### Observed behavior <!-- Description of the behavior that was observed, including screenshots or other references when applicable --> Initially noted by @radinamatic This [Vue SFC](https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue#L68) did not have the [context for its translation](https://crowdin.com/translate/kolibri/3798/en-es?filter=basic&value=0#275962) transferred into the code. This issue should be fixed in the context of 0.15. This work may ultimately be blocked by some necessary work in https://github.com/learningequality/kolibri/issues/7709 ### Expected behavior <!-- Description of what behavior was expected but did not occur --> `yarn transfercontext` should transfer all context from Crowdin into the components associated with the translation strings. ### User-facing consequences <!-- Implications and real-world consequences for learners, coaches, admins, and other users of the application --> Internal facing only - but context will not be available in the code, which can result in losing context when uploading the next round of translations. ### Context <!-- Tell us about your environment, including: * Kolibri version * Operating system * Browser --> Kolibri 0.15
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nFor usage instructions, see:\n https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport argparse\nimport base64\nimport io\nimport json\nimport logging\nimport mimetypes\nimport os\nimport re\n...
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nFor usage instructions, see:\n https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport argparse\nimport base64\nimport io\nimport json\nimport logging\nimport mimetypes\nimport os\nimport re\n...
diff --git a/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue b/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue index 2cd7cd7d47e..44b9381e6c1 100644 --- a/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue +++ b/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue @@ -71,7 +71,11 @@ streak: 'Get {count, number, integer} questions in a row correct', mOfN: 'Get {M, number, integer} of the last {N, number, integer} questions correct', doAll: 'Get every question correct', - unknown: 'Unknown mastery model', + unknown: { + message: 'Unknown mastery model', + context: + "Mastery model refers to the 'number of correct answers that need to be given by learners' for an exercise to be considered \"mastered\". This particular one (unknown) tries to cover for cases when the mastery is not clearly defined as 'answered X of Y questions'.", + }, }, }; diff --git a/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue b/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue index b239555dc89..5972639099b 100644 --- a/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue +++ b/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue @@ -32,7 +32,10 @@ $trs: { twoItems: '{item1}, {item2}', threeItems: '{item1}, {item2}, {item3}', - manyItems: '{item1}, {item2}, and {count, number, integer} others', + manyItems: { + message: '{item1}, {item2}, and {count, number, integer} others', + context: "'item' will be replaced by the name of the coach(es) in the list of classes.", + }, }, }; diff --git a/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js b/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js index bd9bcb7b516..b14adad4019 100644 --- a/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js +++ b/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js @@ -13,10 +13,16 @@ Strings variations below are defined based on the following construction: export const learnerProgressTranslators = { completed: createTranslator('LearnersCompleted', { label: '{count, plural, one {Completed by learner} other {Completed by learners}}', - labelShort: '{count, plural, other {Completed}}', + labelShort: { + message: '{count, plural, other {Completed}}', + context: 'Refers to learners:\n1 (learner) completed\n4 (learners) completed', + }, count: '{count, plural, other {Completed by}} {count, number, integer} {count, plural, one {learner} other {learners}}', - countShort: '{count, number, integer} {count, plural, other {completed}}', + countShort: { + message: '{count, number, integer} {count, plural, other {completed}}', + context: 'Refers to number of learners that completed a activity\n', + }, allOfMoreThanTwo: 'Completed by all {total, number, integer} {total, plural, one {learner} other {learners}}', allOfMoreThanTwoShort: 'Completed by all {total, number, integer}', diff --git a/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue b/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue index e9bf1a5f201..217acabb303 100644 --- a/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue +++ b/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue @@ -191,7 +191,11 @@ 'Refers to values in a column of the CSV (comma separated values) file used to import and export users.', }, listClassesAssignedL2: 'List of class names, separated by commas', - listClassesAssignedL3: 'If an existing class does not match by name, it will be created', + listClassesAssignedL3: { + message: 'If an existing class does not match by name, it will be created', + context: + 'Explanation that when a CSV file is used to import users and classes they are assigned to, and the CSV file contains a class name that is not already present in a facility, a new class with the name listed in the CSV file will be created. ', + }, columnNameHeader: 'Column', columnIDHeader: 'Identifier', columnInfoHeader: 'Information', diff --git a/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue b/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue index e48e5a99977..e3986089cd7 100644 --- a/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue +++ b/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue @@ -44,8 +44,8 @@ adminLink: 'As an administrator, you can import channels', learnerText: 'Please ask your coach or administrator for assistance', documentTitle: { - message: 'Resource unavailable', - context: '\nSimilar to a 404 not-found error for resources', + message: 'Content Unavailable', + context: '\nThis string should actually say "Resource unavailable"', }, }, }; diff --git a/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue b/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue index dbec5362b6c..294cd1fdf9b 100644 --- a/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue +++ b/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue @@ -66,7 +66,11 @@ }, }, $trs: { - documentTitle: 'Class assignments', + documentTitle: { + message: 'Class assignments', + context: + 'Page/tab title displayed for the Learn page when the learner is enrolled in a class. This is where the learners can see the list of lessons and quizzes coaches have opened and made available for them.', + }, }, }; diff --git a/kolibri/plugins/user/assets/src/views/AuthBase.vue b/kolibri/plugins/user/assets/src/views/AuthBase.vue index 46ba48648b7..d66d9a39ed7 100644 --- a/kolibri/plugins/user/assets/src/views/AuthBase.vue +++ b/kolibri/plugins/user/assets/src/views/AuthBase.vue @@ -254,7 +254,8 @@ whatsThis: "What's this?", restrictedAccess: { message: 'Access to Kolibri has been restricted for external devices', - context: 'Error message description', + context: + 'This warning is displayed when somebody in the same network tries to connect to Kolibri running as a standalone app on a tablet or a smartphone. It indicates that Kolibri is accessible only to the person(s) physically using that tablet or a phone, and no other devices in the network can access and use Kolibri.', }, restrictedAccessDescription: { message: diff --git a/packages/kolibri-tools/lib/i18n/SyncContext.js b/packages/kolibri-tools/lib/i18n/SyncContext.js index 12d53b38ab6..898b3c380e2 100644 --- a/packages/kolibri-tools/lib/i18n/SyncContext.js +++ b/packages/kolibri-tools/lib/i18n/SyncContext.js @@ -1,5 +1,6 @@ // Import packages const fs = require('fs'); +const os = require('os'); const path = require('path'); const glob = require('glob'); const recast = require('recast'); @@ -17,15 +18,23 @@ const CONTEXT_LINE = require('./ExtractStrings').CONTEXT_LINE; const reScriptOpen = /^[ ]*<script[^>]*>/; const reScriptClose = /^[ ]*<\/script>/; +let CROWDIN_PROJECT = get(os.env, 'CROWDIN_PROJECT', null); + +if (!CROWDIN_PROJECT) { + logging.info( + 'No env var set for CROWDIN_PROJECT. Will default to `kolibri`. If you are working with Kolibri Studio, please set this environment variable to `contentcuration` - the name of the root folder for the Django app.' + ); + CROWDIN_PROJECT = 'kolibri'; +} + // Glob path patterns // All JS files not in node_modules -const JS_GLOB = path.resolve('./kolibri') + '/**/*.js'; +const JS_GLOB = path.resolve(CROWDIN_PROJECT) + '/**/*.js'; // All Vue files not in node_modules -const VUE_GLOB = path.resolve('./kolibri') + '/**/*.vue'; -// We only need one set of languages - since we have the ACH -// which is a Crowdin placeholder language, we'll go there to -// get the Context. -const CSV_PATH = path.resolve('./kolibri/locale/CSV_FILES/ach/'); +const VUE_GLOB = path.resolve(CROWDIN_PROJECT) + '/**/*.vue'; +// We must select a language which will be fully translated - so we use fr-fr. +// Fully translated langauges are the only ones with full context in the CSV +const CSV_PATH = path.resolve(`./${CROWDIN_PROJECT}/locale/CSV_FILES/fr/`); // -------------------- // // Processing Functions // diff --git a/packages/kolibri-tools/lib/i18n/fonts.py b/packages/kolibri-tools/lib/i18n/fonts.py index 17ef640865d..3e4960d24e0 100644 --- a/packages/kolibri-tools/lib/i18n/fonts.py +++ b/packages/kolibri-tools/lib/i18n/fonts.py @@ -35,6 +35,8 @@ os.path.dirname(__file__), os.pardir, os.pardir, + os.pardir, + os.pardir, "kolibri", "core", "static", diff --git a/packages/kolibri-tools/lib/i18n/intl_code_gen.js b/packages/kolibri-tools/lib/i18n/intl_code_gen.js index e21c311f937..d2a4bb2fd8f 100644 --- a/packages/kolibri-tools/lib/i18n/intl_code_gen.js +++ b/packages/kolibri-tools/lib/i18n/intl_code_gen.js @@ -2,7 +2,7 @@ const path = require('path'); const fs = require('fs'); const { lint } = require('kolibri-tools/lib/lint'); -const languageInfo = require('../../kolibri/locale/language_info.json'); +const languageInfo = require('./language_info.json'); const commonHeader = ` /* @@ -41,11 +41,7 @@ const vueIntlFooter = ` const vueIntlModule = commonHeader + vueIntlHeader + languageInfo.map(generateVueIntlItems).join('') + vueIntlFooter; -const vueIntlModulePath = path.resolve( - __dirname, - '../../kolibri/core/assets/src/utils/vue-intl-locale-data.js' -); - +const vueIntlModulePath = path.resolve(__dirname, 'vue-intl-locale-data.js'); const intlHeader = `module.exports = function(locale) { switch (locale) {`; @@ -123,11 +119,7 @@ const intlFooter = ` const intlModule = commonHeader + intlHeader + languageInfo.map(generateIntlItems).join('') + intlFooter; -const intlModulePath = path.resolve( - __dirname, - '../../kolibri/core/assets/src/utils/intl-locale-data.js' -); - +const intlModulePath = path.resolve(__dirname, 'intl-locale-data.js'); fs.writeFileSync(vueIntlModulePath, vueIntlModule, { encoding: 'utf-8' }); fs.writeFileSync(intlModulePath, intlModule, { encoding: 'utf-8' }); diff --git a/vue-intl-locale-data.js b/vue-intl-locale-data.js new file mode 100644 index 00000000000..8f212e22cc5 --- /dev/null +++ b/vue-intl-locale-data.js @@ -0,0 +1,37 @@ +/* + * This is an auto-generated file, any manual edits will be overridden. + * + * To regenerate, see instructions here: + * https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html + * + * This file was generated by frontend_build/src/intl_code_gen.js + */ +module.exports = function() { + const data = []; + data.push(require('vue-intl/locale-data/ar.js')); + data.push(require('vue-intl/locale-data/bg.js')); + data.push(require('vue-intl/locale-data/bn.js')); + data.push(require('vue-intl/locale-data/de.js')); + data.push(require('vue-intl/locale-data/en.js')); + data.push(require('vue-intl/locale-data/es.js')); + data.push(require('vue-intl/locale-data/es.js')); + data.push(require('vue-intl/locale-data/fa.js')); + data.push(require('vue-intl/locale-data/fr.js')); + data.push(require('vue-intl/locale-data/ff.js')); + data.push(require('vue-intl/locale-data/gu.js')); + data.push(require('vue-intl/locale-data/hi.js')); + data.push(require('vue-intl/locale-data/it.js')); + data.push(require('vue-intl/locale-data/km.js')); + data.push(require('vue-intl/locale-data/ko.js')); + data.push(require('vue-intl/locale-data/mr.js')); + data.push(require('vue-intl/locale-data/my.js')); + data.push(require('vue-intl/locale-data/nyn.js')); + data.push(require('vue-intl/locale-data/pt.js')); + data.push(require('vue-intl/locale-data/sw.js')); + data.push(require('vue-intl/locale-data/te.js')); + data.push(require('vue-intl/locale-data/ur.js')); + data.push(require('vue-intl/locale-data/vi.js')); + data.push(require('vue-intl/locale-data/yo.js')); + data.push(require('vue-intl/locale-data/zh.js')); + return data; +};
twisted__twisted-11966
twisted.internet.cfreactor not importable on Python 3.8 **Describe the incorrect behavior you saw** fails with: ``` src/twisted/internet/cfreactor.py:474:24: error: X | Y syntax for unions requires Python 3.10 [syntax] _currentSimulator: object | None = None ``` ``` >>> import twisted.internet.cfreactor Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/graingert/projects/twisted/src/twisted/internet/cfreactor.py", line 92, in <module> class CFReactor(PosixReactorBase): File "/home/graingert/projects/twisted/src/twisted/internet/cfreactor.py", line 474, in CFReactor _currentSimulator: object | None = None TypeError: unsupported operand type(s) for |: 'type' and 'NoneType' ``` **Describe how to cause this behavior** import it on Python 3.8 Preferable a [Short, Self Contained, Correct (Compilable), Example](http://www.sscce.org/) on a branch or on [a gist](https://gist.github.com). Automated tests that are demonstrating the failure would be awesome. **Describe the correct behavior you'd like to see** no import error **Testing environment** N/A
[ { "content": "# -*- test-case-name: twisted.internet.test.test_core -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nA reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the\nCoreFoundation main loop used by macOS.\n\nThis is useful for integrating Twiste...
[ { "content": "# -*- test-case-name: twisted.internet.test.test_core -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nA reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the\nCoreFoundation main loop used by macOS.\n\nThis is useful for integrating Twiste...
diff --git a/src/twisted/internet/cfreactor.py b/src/twisted/internet/cfreactor.py index 333ab497604..142c0472ef4 100644 --- a/src/twisted/internet/cfreactor.py +++ b/src/twisted/internet/cfreactor.py @@ -9,6 +9,7 @@ This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>} applications. """ +from __future__ import annotations __all__ = ["install", "CFReactor"] diff --git a/src/twisted/newsfragments/11965.bugfix b/src/twisted/newsfragments/11965.bugfix new file mode 100644 index 00000000000..b8a16d2e9a8 --- /dev/null +++ b/src/twisted/newsfragments/11965.bugfix @@ -0,0 +1 @@ +Fix TypeError on t.i.cfreactor due to 3.10 type annotation syntax
nilearn__nilearn-507
Add test for compatibility of old version of six For the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.
[ { "content": "import sys\n\nDEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(package, '__...
[ { "content": "import sys\n\nDEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(packa...
diff --git a/.travis.yml b/.travis.yml index 184464c0e4..43e11f4db7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: - DISTRIB="conda" PYTHON_VERSION="2.6" NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0" SCIKIT_LEARN_VERSION="0.12.1" MATPLOTLIB_VERSION="1.1.1" - NIBABEL_VERSION="1.1.0" + NIBABEL_VERSION="1.1.0" SIX_VERSION="1.4.1" # Most recent versions - DISTRIB="conda" PYTHON_VERSION="3.4" NUMPY_VERSION="*" SCIPY_VERSION="*" diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 04a3c8e198..4af1c80a98 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -35,7 +35,7 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn" + TO_INSTALL_MAYBE="python six numpy scipy matplotlib scikit-learn" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py index 1822dd172e..6134c7a47b 100644 --- a/continuous_integration/show-python-packages-versions.py +++ b/continuous_integration/show-python-packages-versions.py @@ -1,6 +1,6 @@ import sys -DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel'] +DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel'] def print_package_version(package_name, indent=' '):
pydantic__pydantic-3707
subclasses of bytes converted to bytes See https://github.com/duo-labs/py_webauthn/issues/113#issuecomment-1017816575 In short (I think) cython is converting subclasses of bytes to raw bytes in here: https://github.com/samuelcolvin/pydantic/blob/9d631a3429a66f30742c1a52c94ac18ec6ba848d/pydantic/validators.py#L79 Fix should be as simple as changing the type hint.
[ { "content": "import re\nfrom collections import OrderedDict, deque\nfrom collections.abc import Hashable as CollectionsHashable\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal, DecimalException\nfrom enum import Enum, IntEnum\nfrom ipaddress import IPv4Address, IPv4Interface,...
[ { "content": "import re\nfrom collections import OrderedDict, deque\nfrom collections.abc import Hashable as CollectionsHashable\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal, DecimalException\nfrom enum import Enum, IntEnum\nfrom ipaddress import IPv4Address, IPv4Interface,...
diff --git a/changes/3706-samuelcolvin.md b/changes/3706-samuelcolvin.md new file mode 100644 index 00000000000..3a22afee678 --- /dev/null +++ b/changes/3706-samuelcolvin.md @@ -0,0 +1 @@ +Prevent subclasses of bytes being converted to bytes diff --git a/pydantic/validators.py b/pydantic/validators.py index 63b7a59e080..d4783d97b12 100644 --- a/pydantic/validators.py +++ b/pydantic/validators.py @@ -76,7 +76,7 @@ def strict_str_validator(v: Any) -> Union[str]: raise errors.StrError() -def bytes_validator(v: Any) -> bytes: +def bytes_validator(v: Any) -> Union[bytes]: if isinstance(v, bytes): return v elif isinstance(v, bytearray): diff --git a/tests/test_edge_cases.py b/tests/test_edge_cases.py index dd07eb3d37b..5da62257040 100644 --- a/tests/test_edge_cases.py +++ b/tests/test_edge_cases.py @@ -1906,3 +1906,29 @@ class Config: arbitrary_types_allowed = True assert Model().x == Foo() + + +def test_bytes_subclass(): + class MyModel(BaseModel): + my_bytes: bytes + + class BytesSubclass(bytes): + def __new__(cls, data: bytes): + self = bytes.__new__(cls, data) + return self + + m = MyModel(my_bytes=BytesSubclass(b'foobar')) + assert m.my_bytes.__class__ == BytesSubclass + + +def test_int_subclass(): + class MyModel(BaseModel): + my_int: int + + class IntSubclass(int): + def __new__(cls, data: int): + self = int.__new__(cls, data) + return self + + m = MyModel(my_int=IntSubclass(123)) + assert m.my_int.__class__ == IntSubclass