id
int64
0
755k
file_name
stringlengths
3
109
file_path
stringlengths
13
185
content
stringlengths
31
9.38M
size
int64
31
9.38M
language
stringclasses
1 value
extension
stringclasses
11 values
total_lines
int64
1
340k
avg_line_length
float64
2.18
149k
max_line_length
int64
7
2.22M
alphanum_fraction
float64
0
1
repo_name
stringlengths
6
65
repo_stars
int64
100
47.3k
repo_forks
int64
0
12k
repo_open_issues
int64
0
3.4k
repo_license
stringclasses
9 values
repo_extraction_date
stringclasses
92 values
exact_duplicates_redpajama
bool
2 classes
near_duplicates_redpajama
bool
2 classes
exact_duplicates_githubcode
bool
2 classes
exact_duplicates_stackv2
bool
1 class
exact_duplicates_stackv1
bool
2 classes
near_duplicates_githubcode
bool
2 classes
near_duplicates_stackv1
bool
2 classes
near_duplicates_stackv2
bool
1 class
11,042
print.cc
NixOS_nix/src/libexpr-tests/value/print.cc
#include "tests/libexpr.hh" #include "value.hh" #include "print.hh" namespace nix { using namespace testing; struct ValuePrintingTests : LibExprTest { template<class... A> void test(Value v, std::string_view expected, A... args) { std::stringstream out; v.print(state, out, args...); ASSERT_EQ(out.str(), expected); } }; TEST_F(ValuePrintingTests, tInt) { Value vInt; vInt.mkInt(10); test(vInt, "10"); } TEST_F(ValuePrintingTests, tBool) { Value vBool; vBool.mkBool(true); test(vBool, "true"); } TEST_F(ValuePrintingTests, tString) { Value vString; vString.mkString("some-string"); test(vString, "\"some-string\""); } TEST_F(ValuePrintingTests, tPath) { Value vPath; vPath.mkString("/foo"); test(vPath, "\"/foo\""); } TEST_F(ValuePrintingTests, tNull) { Value vNull; vNull.mkNull(); test(vNull, "null"); } TEST_F(ValuePrintingTests, tAttrs) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("one"), &vOne); builder.insert(state.symbols.create("two"), &vTwo); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ one = 1; two = 2; }"); } TEST_F(ValuePrintingTests, tList) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); auto list = state.buildList(3); list.elems[0] = &vOne; list.elems[1] = &vTwo; Value vList; vList.mkList(list); test(vList, "[ 1 2 «nullptr» ]"); } TEST_F(ValuePrintingTests, vThunk) { Value vThunk; vThunk.mkThunk(nullptr, nullptr); test(vThunk, "«thunk»"); } TEST_F(ValuePrintingTests, vApp) { Value vApp; vApp.mkApp(nullptr, nullptr); test(vApp, "«thunk»"); } TEST_F(ValuePrintingTests, vLambda) { Env env { .up = nullptr, .values = { } }; PosTable::Origin origin = state.positions.addOrigin(std::monostate(), 1); auto posIdx = state.positions.add(origin, 0); auto body = ExprInt(0); auto formals = Formals {}; ExprLambda eLambda(posIdx, createSymbol("a"), &formals, &body); Value vLambda; vLambda.mkLambda(&env, &eLambda); test(vLambda, "«lambda @ «none»:1:1»"); eLambda.setName(createSymbol("puppy")); test(vLambda, "«lambda puppy @ «none»:1:1»"); } TEST_F(ValuePrintingTests, vPrimOp) { Value vPrimOp; PrimOp primOp{ .name = "puppy" }; vPrimOp.mkPrimOp(&primOp); test(vPrimOp, "«primop puppy»"); } TEST_F(ValuePrintingTests, vPrimOpApp) { PrimOp primOp{ .name = "puppy" }; Value vPrimOp; vPrimOp.mkPrimOp(&primOp); Value vPrimOpApp; vPrimOpApp.mkPrimOpApp(&vPrimOp, nullptr); test(vPrimOpApp, "«partially applied primop puppy»"); } TEST_F(ValuePrintingTests, vExternal) { struct MyExternal : ExternalValueBase { public: std::string showType() const override { return ""; } std::string typeOf() const override { return ""; } virtual std::ostream & print(std::ostream & str) const override { str << "testing-external!"; return str; } } myExternal; Value vExternal; vExternal.mkExternal(&myExternal); test(vExternal, "testing-external!"); } TEST_F(ValuePrintingTests, vFloat) { Value vFloat; vFloat.mkFloat(2.0); test(vFloat, "2"); } TEST_F(ValuePrintingTests, vBlackhole) { Value vBlackhole; vBlackhole.mkBlackhole(); test(vBlackhole, "«potential infinite recursion»"); } TEST_F(ValuePrintingTests, depthAttrs) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); BindingsBuilder builderEmpty(state, state.allocBindings(0)); Value vAttrsEmpty; vAttrsEmpty.mkAttrs(builderEmpty.finish()); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("one"), &vOne); builder.insert(state.symbols.create("two"), &vTwo); builder.insert(state.symbols.create("nested"), &vAttrsEmpty); Value vAttrs; vAttrs.mkAttrs(builder.finish()); BindingsBuilder builder2(state, state.allocBindings(10)); builder2.insert(state.symbols.create("one"), &vOne); builder2.insert(state.symbols.create("two"), &vTwo); builder2.insert(state.symbols.create("nested"), &vAttrs); Value vNested; vNested.mkAttrs(builder2.finish()); test(vNested, "{ nested = { ... }; one = 1; two = 2; }", PrintOptions { .maxDepth = 1 }); test(vNested, "{ nested = { nested = { ... }; one = 1; two = 2; }; one = 1; two = 2; }", PrintOptions { .maxDepth = 2 }); test(vNested, "{ nested = { nested = { }; one = 1; two = 2; }; one = 1; two = 2; }", PrintOptions { .maxDepth = 3 }); test(vNested, "{ nested = { nested = { }; one = 1; two = 2; }; one = 1; two = 2; }", PrintOptions { .maxDepth = 4 }); } TEST_F(ValuePrintingTests, depthList) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("one"), &vOne); builder.insert(state.symbols.create("two"), &vTwo); Value vAttrs; vAttrs.mkAttrs(builder.finish()); BindingsBuilder builder2(state, state.allocBindings(10)); builder2.insert(state.symbols.create("one"), &vOne); builder2.insert(state.symbols.create("two"), &vTwo); builder2.insert(state.symbols.create("nested"), &vAttrs); Value vNested; vNested.mkAttrs(builder2.finish()); auto list = state.buildList(3); list.elems[0] = &vOne; list.elems[1] = &vTwo; list.elems[2] = &vNested; Value vList; vList.mkList(list); test(vList, "[ 1 2 { ... } ]", PrintOptions { .maxDepth = 1 }); test(vList, "[ 1 2 { nested = { ... }; one = 1; two = 2; } ]", PrintOptions { .maxDepth = 2 }); test(vList, "[ 1 2 { nested = { one = 1; two = 2; }; one = 1; two = 2; } ]", PrintOptions { .maxDepth = 3 }); test(vList, "[ 1 2 { nested = { one = 1; two = 2; }; one = 1; two = 2; } ]", PrintOptions { .maxDepth = 4 }); test(vList, "[ 1 2 { nested = { one = 1; two = 2; }; one = 1; two = 2; } ]", PrintOptions { .maxDepth = 5 }); } struct StringPrintingTests : LibExprTest { template<class... A> void test(std::string_view literal, std::string_view expected, unsigned int maxLength, A... args) { Value v; v.mkString(literal); std::stringstream out; printValue(state, out, v, PrintOptions { .maxStringLength = maxLength }); ASSERT_EQ(out.str(), expected); } }; TEST_F(StringPrintingTests, maxLengthTruncation) { test("abcdefghi", "\"abcdefghi\"", 10); test("abcdefghij", "\"abcdefghij\"", 10); test("abcdefghijk", "\"abcdefghij\" «1 byte elided»", 10); test("abcdefghijkl", "\"abcdefghij\" «2 bytes elided»", 10); test("abcdefghijklm", "\"abcdefghij\" «3 bytes elided»", 10); } // Check that printing an attrset shows 'important' attributes like `type` // first, but only reorder the attrs when we have a maxAttrs budget. TEST_F(ValuePrintingTests, attrsTypeFirst) { Value vType; vType.mkString("puppy"); Value vApple; vApple.mkString("apple"); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("type"), &vType); builder.insert(state.symbols.create("apple"), &vApple); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ type = \"puppy\"; apple = \"apple\"; }", PrintOptions { .maxAttrs = 100 }); test(vAttrs, "{ apple = \"apple\"; type = \"puppy\"; }", PrintOptions { }); } TEST_F(ValuePrintingTests, ansiColorsInt) { Value v; v.mkInt(10); test(v, ANSI_CYAN "10" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsFloat) { Value v; v.mkFloat(1.6); test(v, ANSI_CYAN "1.6" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsBool) { Value v; v.mkBool(true); test(v, ANSI_CYAN "true" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsString) { Value v; v.mkString("puppy"); test(v, ANSI_MAGENTA "\"puppy\"" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsStringElided) { Value v; v.mkString("puppy"); test(v, ANSI_MAGENTA "\"pup\" " ANSI_FAINT "«2 bytes elided»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .maxStringLength = 3 }); } TEST_F(ValuePrintingTests, ansiColorsPath) { Value v; v.mkPath(state.rootPath(CanonPath("puppy"))); test(v, ANSI_GREEN "/puppy" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsNull) { Value v; v.mkNull(); test(v, ANSI_CYAN "null" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsAttrs) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("one"), &vOne); builder.insert(state.symbols.create("two"), &vTwo); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ one = " ANSI_CYAN "1" ANSI_NORMAL "; two = " ANSI_CYAN "2" ANSI_NORMAL "; }", PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsDerivation) { Value vDerivation; vDerivation.mkString("derivation"); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.sType, &vDerivation); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, ANSI_GREEN "«derivation»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true, .derivationPaths = true }); test(vAttrs, "{ type = " ANSI_MAGENTA "\"derivation\"" ANSI_NORMAL "; }", PrintOptions { .ansiColors = true, .force = true }); } TEST_F(ValuePrintingTests, ansiColorsError) { Value throw_ = state.getBuiltin("throw"); Value message; message.mkString("uh oh!"); Value vError; vError.mkApp(&throw_, &message); test(vError, ANSI_RED "«error: uh oh!»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true, }); } TEST_F(ValuePrintingTests, ansiColorsDerivationError) { Value throw_ = state.getBuiltin("throw"); Value message; message.mkString("uh oh!"); Value vError; vError.mkApp(&throw_, &message); Value vDerivation; vDerivation.mkString("derivation"); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.sType, &vDerivation); builder.insert(state.sDrvPath, &vError); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ drvPath = " ANSI_RED "«error: uh oh!»" ANSI_NORMAL "; type = " ANSI_MAGENTA "\"derivation\"" ANSI_NORMAL "; }", PrintOptions { .ansiColors = true, .force = true }); test(vAttrs, ANSI_RED "«error: uh oh!»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true, .derivationPaths = true, }); } TEST_F(ValuePrintingTests, ansiColorsAssert) { ExprVar eFalse(state.symbols.create("false")); eFalse.bindVars(state, state.staticBaseEnv); ExprInt eInt(1); ExprAssert expr(noPos, &eFalse, &eInt); Value v; state.mkThunk_(v, &expr); test(v, ANSI_RED "«error: assertion 'false' failed»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true }); } TEST_F(ValuePrintingTests, ansiColorsList) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); auto list = state.buildList(3); list.elems[0] = &vOne; list.elems[1] = &vTwo; Value vList; vList.mkList(list); test(vList, "[ " ANSI_CYAN "1" ANSI_NORMAL " " ANSI_CYAN "2" ANSI_NORMAL " " ANSI_MAGENTA "«nullptr»" ANSI_NORMAL " ]", PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsLambda) { Env env { .up = nullptr, .values = { } }; PosTable::Origin origin = state.positions.addOrigin(std::monostate(), 1); auto posIdx = state.positions.add(origin, 0); auto body = ExprInt(0); auto formals = Formals {}; ExprLambda eLambda(posIdx, createSymbol("a"), &formals, &body); Value vLambda; vLambda.mkLambda(&env, &eLambda); test(vLambda, ANSI_BLUE "«lambda @ «none»:1:1»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true }); eLambda.setName(createSymbol("puppy")); test(vLambda, ANSI_BLUE "«lambda puppy @ «none»:1:1»" ANSI_NORMAL, PrintOptions { .ansiColors = true, .force = true }); } TEST_F(ValuePrintingTests, ansiColorsPrimOp) { PrimOp primOp{ .name = "puppy" }; Value v; v.mkPrimOp(&primOp); test(v, ANSI_BLUE "«primop puppy»" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsPrimOpApp) { PrimOp primOp{ .name = "puppy" }; Value vPrimOp; vPrimOp.mkPrimOp(&primOp); Value v; v.mkPrimOpApp(&vPrimOp, nullptr); test(v, ANSI_BLUE "«partially applied primop puppy»" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsThunk) { Value v; v.mkThunk(nullptr, nullptr); test(v, ANSI_MAGENTA "«thunk»" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsBlackhole) { Value v; v.mkBlackhole(); test(v, ANSI_RED "«potential infinite recursion»" ANSI_NORMAL, PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsAttrsRepeated) { BindingsBuilder emptyBuilder(state, state.allocBindings(1)); Value vEmpty; vEmpty.mkAttrs(emptyBuilder.finish()); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("a"), &vEmpty); builder.insert(state.symbols.create("b"), &vEmpty); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ a = { }; b = " ANSI_MAGENTA "«repeated»" ANSI_NORMAL "; }", PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, ansiColorsListRepeated) { BindingsBuilder emptyBuilder(state, state.allocBindings(1)); Value vEmpty; vEmpty.mkAttrs(emptyBuilder.finish()); auto list = state.buildList(2); list.elems[0] = &vEmpty; list.elems[1] = &vEmpty; Value vList; vList.mkList(list); test(vList, "[ { } " ANSI_MAGENTA "«repeated»" ANSI_NORMAL " ]", PrintOptions { .ansiColors = true }); } TEST_F(ValuePrintingTests, listRepeated) { BindingsBuilder emptyBuilder(state, state.allocBindings(1)); Value vEmpty; vEmpty.mkAttrs(emptyBuilder.finish()); auto list = state.buildList(2); list.elems[0] = &vEmpty; list.elems[1] = &vEmpty; Value vList; vList.mkList(list); test(vList, "[ { } «repeated» ]", PrintOptions { }); test(vList, "[ { } { } ]", PrintOptions { .trackRepeated = false }); } TEST_F(ValuePrintingTests, ansiColorsAttrsElided) { Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); BindingsBuilder builder(state, state.allocBindings(10)); builder.insert(state.symbols.create("one"), &vOne); builder.insert(state.symbols.create("two"), &vTwo); Value vAttrs; vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ one = " ANSI_CYAN "1" ANSI_NORMAL "; " ANSI_FAINT "«1 attribute elided»" ANSI_NORMAL " }", PrintOptions { .ansiColors = true, .maxAttrs = 1 }); Value vThree; vThree.mkInt(3); builder.insert(state.symbols.create("three"), &vThree); vAttrs.mkAttrs(builder.finish()); test(vAttrs, "{ one = " ANSI_CYAN "1" ANSI_NORMAL "; " ANSI_FAINT "«2 attributes elided»" ANSI_NORMAL " }", PrintOptions { .ansiColors = true, .maxAttrs = 1 }); } TEST_F(ValuePrintingTests, ansiColorsListElided) { BindingsBuilder emptyBuilder(state, state.allocBindings(1)); Value vOne; vOne.mkInt(1); Value vTwo; vTwo.mkInt(2); { auto list = state.buildList(2); list.elems[0] = &vOne; list.elems[1] = &vTwo; Value vList; vList.mkList(list); test(vList, "[ " ANSI_CYAN "1" ANSI_NORMAL " " ANSI_FAINT "«1 item elided»" ANSI_NORMAL " ]", PrintOptions { .ansiColors = true, .maxListItems = 1 }); } Value vThree; vThree.mkInt(3); { auto list = state.buildList(3); list.elems[0] = &vOne; list.elems[1] = &vTwo; list.elems[2] = &vThree; Value vList; vList.mkList(list); test(vList, "[ " ANSI_CYAN "1" ANSI_NORMAL " " ANSI_FAINT "«2 items elided»" ANSI_NORMAL " ]", PrintOptions { .ansiColors = true, .maxListItems = 1 }); } } } // namespace nix
18,226
C++
.cc
640
22.382813
125
0.607612
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,043
value.cc
NixOS_nix/src/libexpr-tests/value/value.cc
#include "value.hh" #include "tests/libstore.hh" namespace nix { class ValueTest : public LibStoreTest {}; TEST_F(ValueTest, unsetValue) { Value unsetValue; ASSERT_EQ(false, unsetValue.isValid()); ASSERT_EQ(nThunk, unsetValue.type(true)); ASSERT_DEATH(unsetValue.type(), ""); } TEST_F(ValueTest, vInt) { Value vInt; vInt.mkInt(42); ASSERT_EQ(true, vInt.isValid()); } } // namespace nix
420
C++
.cc
19
19.315789
45
0.706329
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,044
context.cc
NixOS_nix/src/libexpr-tests/value/context.cc
#include <nlohmann/json.hpp> #include <gtest/gtest.h> #include <rapidcheck/gtest.h> #include "tests/path.hh" #include "tests/libexpr.hh" #include "tests/value/context.hh" namespace nix { // Test a few cases of invalid string context elements. TEST(NixStringContextElemTest, empty_invalid) { EXPECT_THROW( NixStringContextElem::parse(""), BadNixStringContextElem); } TEST(NixStringContextElemTest, single_bang_invalid) { EXPECT_THROW( NixStringContextElem::parse("!"), BadNixStringContextElem); } TEST(NixStringContextElemTest, double_bang_invalid) { EXPECT_THROW( NixStringContextElem::parse("!!/"), BadStorePath); } TEST(NixStringContextElemTest, eq_slash_invalid) { EXPECT_THROW( NixStringContextElem::parse("=/"), BadStorePath); } TEST(NixStringContextElemTest, slash_invalid) { EXPECT_THROW( NixStringContextElem::parse("/"), BadStorePath); } /** * Round trip (string <-> data structure) test for * `NixStringContextElem::Opaque`. */ TEST(NixStringContextElemTest, opaque) { std::string_view opaque = "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x"; auto elem = NixStringContextElem::parse(opaque); auto * p = std::get_if<NixStringContextElem::Opaque>(&elem.raw); ASSERT_TRUE(p); ASSERT_EQ(p->path, StorePath { opaque }); ASSERT_EQ(elem.to_string(), opaque); } /** * Round trip (string <-> data structure) test for * `NixStringContextElem::DrvDeep`. */ TEST(NixStringContextElemTest, drvDeep) { std::string_view drvDeep = "=g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; auto elem = NixStringContextElem::parse(drvDeep); auto * p = std::get_if<NixStringContextElem::DrvDeep>(&elem.raw); ASSERT_TRUE(p); ASSERT_EQ(p->drvPath, StorePath { drvDeep.substr(1) }); ASSERT_EQ(elem.to_string(), drvDeep); } /** * Round trip (string <-> data structure) test for a simpler * `NixStringContextElem::Built`. */ TEST(NixStringContextElemTest, built_opaque) { std::string_view built = "!foo!g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; auto elem = NixStringContextElem::parse(built); auto * p = std::get_if<NixStringContextElem::Built>(&elem.raw); ASSERT_TRUE(p); ASSERT_EQ(p->output, "foo"); ASSERT_EQ(*p->drvPath, ((SingleDerivedPath) SingleDerivedPath::Opaque { .path = StorePath { built.substr(5) }, })); ASSERT_EQ(elem.to_string(), built); } /** * Round trip (string <-> data structure) test for a more complex, * inductive `NixStringContextElem::Built`. */ TEST(NixStringContextElemTest, built_built) { /** * We set these in tests rather than the regular globals so we don't have * to worry about race conditions if the tests run concurrently. */ ExperimentalFeatureSettings mockXpSettings; mockXpSettings.set("experimental-features", "dynamic-derivations ca-derivations"); std::string_view built = "!foo!bar!g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; auto elem = NixStringContextElem::parse(built, mockXpSettings); auto * p = std::get_if<NixStringContextElem::Built>(&elem.raw); ASSERT_TRUE(p); ASSERT_EQ(p->output, "foo"); auto * drvPath = std::get_if<SingleDerivedPath::Built>(&*p->drvPath); ASSERT_TRUE(drvPath); ASSERT_EQ(drvPath->output, "bar"); ASSERT_EQ(*drvPath->drvPath, ((SingleDerivedPath) SingleDerivedPath::Opaque { .path = StorePath { built.substr(9) }, })); ASSERT_EQ(elem.to_string(), built); } /** * Without the right experimental features enabled, we cannot parse a * complex inductive string context element. */ TEST(NixStringContextElemTest, built_built_xp) { ASSERT_THROW( NixStringContextElem::parse("!foo!bar!g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"), MissingExperimentalFeature); } #ifndef COVERAGE RC_GTEST_PROP( NixStringContextElemTest, prop_round_rip, (const NixStringContextElem & o)) { RC_ASSERT(o == NixStringContextElem::parse(o.to_string())); } #endif }
3,995
C++
.cc
114
31.166667
123
0.711105
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,045
nix_api_value.cc
NixOS_nix/src/libexpr-c/nix_api_value.cc
#include "attr-set.hh" #include "config.hh" #include "eval.hh" #include "globals.hh" #include "path.hh" #include "primops.hh" #include "value.hh" #include "nix_api_expr.h" #include "nix_api_expr_internal.h" #include "nix_api_util.h" #include "nix_api_util_internal.h" #include "nix_api_store_internal.h" #include "nix_api_value.h" #include "value/context.hh" // Internal helper functions to check [in] and [out] `Value *` parameters static const nix::Value & check_value_not_null(const nix_value * value) { if (!value) { throw std::runtime_error("nix_value is null"); } return *((const nix::Value *) value); } static nix::Value & check_value_not_null(nix_value * value) { if (!value) { throw std::runtime_error("nix_value is null"); } return value->value; } static const nix::Value & check_value_in(const nix_value * value) { auto & v = check_value_not_null(value); if (!v.isValid()) { throw std::runtime_error("Uninitialized nix_value"); } return v; } static nix::Value & check_value_in(nix_value * value) { auto & v = check_value_not_null(value); if (!v.isValid()) { throw std::runtime_error("Uninitialized nix_value"); } return v; } static nix::Value & check_value_out(nix_value * value) { auto & v = check_value_not_null(value); if (v.isValid()) { throw std::runtime_error("nix_value already initialized. Variables are immutable"); } return v; } static inline nix_value * as_nix_value_ptr(nix::Value * v) { return reinterpret_cast<nix_value *>(v); } /** * Helper function to convert calls from nix into C API. * * Deals with errors and converts arguments from C++ into C types. */ static void nix_c_primop_wrapper( PrimOpFun f, void * userdata, nix::EvalState & state, const nix::PosIdx pos, nix::Value ** args, nix::Value & v) { nix_c_context ctx; // v currently has a thunk, but the C API initializers require an uninitialized value. // // We can't destroy the thunk, because that makes it impossible to retry, // which is needed for tryEval and for evaluation drivers that evaluate more // than one value (e.g. an attrset with two derivations, both of which // reference v). // // Instead we create a temporary value, and then assign the result to v. // This does not give the primop definition access to the thunk, but that's // ok because we don't see a need for this yet (e.g. inspecting thunks, // or maybe something to make blackholes work better; we don't know). nix::Value vTmp; f(userdata, &ctx, (EvalState *) &state, (nix_value **) args, (nix_value *) &vTmp); if (ctx.last_err_code != NIX_OK) { /* TODO: Throw different errors depending on the error code */ state.error<nix::EvalError>("Error from custom function: %s", *ctx.last_err).atPos(pos).debugThrow(); } if (!vTmp.isValid()) { state.error<nix::EvalError>("Implementation error in custom function: return value was not initialized") .atPos(pos) .debugThrow(); } if (vTmp.type() == nix::nThunk) { // We might allow this in the future if it makes sense for the evaluator // e.g. implementing tail recursion by returning a thunk to the next // "iteration". Until then, this is most likely a mistake or misunderstanding. state.error<nix::EvalError>("Implementation error in custom function: return value must not be a thunk") .atPos(pos) .debugThrow(); } v = vTmp; } PrimOp * nix_alloc_primop( nix_c_context * context, PrimOpFun fun, int arity, const char * name, const char ** args, const char * doc, void * user_data) { if (context) context->last_err_code = NIX_OK; try { using namespace std::placeholders; auto p = new #if HAVE_BOEHMGC (GC) #endif nix::PrimOp{ .name = name, .args = {}, .arity = (size_t) arity, .doc = doc, .fun = std::bind(nix_c_primop_wrapper, fun, user_data, _1, _2, _3, _4)}; if (args) for (size_t i = 0; args[i]; i++) p->args.emplace_back(*args); nix_gc_incref(nullptr, p); return (PrimOp *) p; } NIXC_CATCH_ERRS_NULL } nix_err nix_register_primop(nix_c_context * context, PrimOp * primOp) { if (context) context->last_err_code = NIX_OK; try { nix::RegisterPrimOp r(std::move(*((nix::PrimOp *) primOp))); } NIXC_CATCH_ERRS } nix_value * nix_alloc_value(nix_c_context * context, EvalState * state) { if (context) context->last_err_code = NIX_OK; try { nix_value * res = as_nix_value_ptr(state->state.allocValue()); nix_gc_incref(nullptr, res); return res; } NIXC_CATCH_ERRS_NULL } ValueType nix_get_type(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); using namespace nix; switch (v.type()) { case nThunk: return NIX_TYPE_THUNK; case nInt: return NIX_TYPE_INT; case nFloat: return NIX_TYPE_FLOAT; case nBool: return NIX_TYPE_BOOL; case nString: return NIX_TYPE_STRING; case nPath: return NIX_TYPE_PATH; case nNull: return NIX_TYPE_NULL; case nAttrs: return NIX_TYPE_ATTRS; case nList: return NIX_TYPE_LIST; case nFunction: return NIX_TYPE_FUNCTION; case nExternal: return NIX_TYPE_EXTERNAL; } return NIX_TYPE_NULL; } NIXC_CATCH_ERRS_RES(NIX_TYPE_NULL); } const char * nix_get_typename(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); auto s = nix::showType(v); return strdup(s.c_str()); } NIXC_CATCH_ERRS_NULL } bool nix_get_bool(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nBool); return v.boolean(); } NIXC_CATCH_ERRS_RES(false); } nix_err nix_get_string(nix_c_context * context, const nix_value * value, nix_get_string_callback callback, void * user_data) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nString); call_nix_get_string_callback(v.c_str(), callback, user_data); } NIXC_CATCH_ERRS } const char * nix_get_path_string(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nPath); // NOTE (from @yorickvP) // v._path.path should work but may not be how Eelco intended it. // Long-term this function should be rewritten to copy some data into a // user-allocated string. // We could use v.path().to_string().c_str(), but I'm concerned this // crashes. Looks like .path() allocates a CanonPath with a copy of the // string, then it gets the underlying data from that. return v.payload.path.path; } NIXC_CATCH_ERRS_NULL } unsigned int nix_get_list_size(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nList); return v.listSize(); } NIXC_CATCH_ERRS_RES(0); } unsigned int nix_get_attrs_size(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nAttrs); return v.attrs()->size(); } NIXC_CATCH_ERRS_RES(0); } double nix_get_float(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nFloat); return v.fpoint(); } NIXC_CATCH_ERRS_RES(0.0); } int64_t nix_get_int(nix_c_context * context, const nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nInt); return v.integer().value; } NIXC_CATCH_ERRS_RES(0); } ExternalValue * nix_get_external(nix_c_context * context, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); assert(v.type() == nix::nExternal); return (ExternalValue *) v.external(); } NIXC_CATCH_ERRS_NULL; } nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nList); auto * p = v.listElems()[ix]; nix_gc_incref(nullptr, p); if (p != nullptr) state->state.forceValue(*p, nix::noPos); return as_nix_value_ptr(p); } NIXC_CATCH_ERRS_NULL } nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nAttrs); nix::Symbol s = state->state.symbols.create(name); auto attr = v.attrs()->get(s); if (attr) { nix_gc_incref(nullptr, attr->value); state->state.forceValue(*attr->value, nix::noPos); return as_nix_value_ptr(attr->value); } nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); return nullptr; } NIXC_CATCH_ERRS_NULL } bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); assert(v.type() == nix::nAttrs); nix::Symbol s = state->state.symbols.create(name); auto attr = v.attrs()->get(s); if (attr) return true; return false; } NIXC_CATCH_ERRS_RES(false); } nix_value * nix_get_attr_byidx( nix_c_context * context, const nix_value * value, EvalState * state, unsigned int i, const char ** name) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); const nix::Attr & a = (*v.attrs())[i]; *name = state->state.symbols[a.name].c_str(); nix_gc_incref(nullptr, a.value); state->state.forceValue(*a.value, nix::noPos); return as_nix_value_ptr(a.value); } NIXC_CATCH_ERRS_NULL } const char * nix_get_attr_name_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int i) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); const nix::Attr & a = (*v.attrs())[i]; return state->state.symbols[a.name].c_str(); } NIXC_CATCH_ERRS_NULL } nix_err nix_init_bool(nix_c_context * context, nix_value * value, bool b) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkBool(b); } NIXC_CATCH_ERRS } // todo string context nix_err nix_init_string(nix_c_context * context, nix_value * value, const char * str) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkString(std::string_view(str)); } NIXC_CATCH_ERRS } nix_err nix_init_path_string(nix_c_context * context, EvalState * s, nix_value * value, const char * str) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkPath(s->state.rootPath(nix::CanonPath(str))); } NIXC_CATCH_ERRS } nix_err nix_init_float(nix_c_context * context, nix_value * value, double d) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkFloat(d); } NIXC_CATCH_ERRS } nix_err nix_init_int(nix_c_context * context, nix_value * value, int64_t i) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkInt(i); } NIXC_CATCH_ERRS } nix_err nix_init_null(nix_c_context * context, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkNull(); } NIXC_CATCH_ERRS } nix_err nix_init_apply(nix_c_context * context, nix_value * value, nix_value * fn, nix_value * arg) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_not_null(value); auto & f = check_value_not_null(fn); auto & a = check_value_not_null(arg); v.mkApp(&f, &a); } NIXC_CATCH_ERRS } nix_err nix_init_external(nix_c_context * context, nix_value * value, ExternalValue * val) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); auto r = (nix::ExternalValueBase *) val; v.mkExternal(r); } NIXC_CATCH_ERRS } ListBuilder * nix_make_list_builder(nix_c_context * context, EvalState * state, size_t capacity) { if (context) context->last_err_code = NIX_OK; try { auto builder = state->state.buildList(capacity); return new #if HAVE_BOEHMGC (NoGC) #endif ListBuilder{std::move(builder)}; } NIXC_CATCH_ERRS_NULL } nix_err nix_list_builder_insert(nix_c_context * context, ListBuilder * list_builder, unsigned int index, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & e = check_value_not_null(value); list_builder->builder[index] = &e; } NIXC_CATCH_ERRS } void nix_list_builder_free(ListBuilder * list_builder) { #if HAVE_BOEHMGC GC_FREE(list_builder); #else delete list_builder; #endif } nix_err nix_make_list(nix_c_context * context, ListBuilder * list_builder, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkList(list_builder->builder); } NIXC_CATCH_ERRS } nix_err nix_init_primop(nix_c_context * context, nix_value * value, PrimOp * p) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkPrimOp((nix::PrimOp *) p); } NIXC_CATCH_ERRS } nix_err nix_copy_value(nix_c_context * context, nix_value * value, const nix_value * source) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); auto & s = check_value_in(source); v = s; } NIXC_CATCH_ERRS } nix_err nix_make_attrs(nix_c_context * context, nix_value * value, BindingsBuilder * b) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_out(value); v.mkAttrs(b->builder); } NIXC_CATCH_ERRS } BindingsBuilder * nix_make_bindings_builder(nix_c_context * context, EvalState * state, size_t capacity) { if (context) context->last_err_code = NIX_OK; try { auto bb = state->state.buildBindings(capacity); return new #if HAVE_BOEHMGC (NoGC) #endif BindingsBuilder{std::move(bb)}; } NIXC_CATCH_ERRS_NULL } nix_err nix_bindings_builder_insert(nix_c_context * context, BindingsBuilder * bb, const char * name, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_not_null(value); nix::Symbol s = bb->builder.state.symbols.create(name); bb->builder.insert(s, &v); } NIXC_CATCH_ERRS } void nix_bindings_builder_free(BindingsBuilder * bb) { #if HAVE_BOEHMGC GC_FREE((nix::BindingsBuilder *) bb); #else delete (nix::BindingsBuilder *) bb; #endif } nix_realised_string * nix_string_realise(nix_c_context * context, EvalState * state, nix_value * value, bool isIFD) { if (context) context->last_err_code = NIX_OK; try { auto & v = check_value_in(value); nix::NixStringContext stringContext; auto rawStr = state->state.coerceToString(nix::noPos, v, stringContext, "while realising a string").toOwned(); nix::StorePathSet storePaths; auto rewrites = state->state.realiseContext(stringContext, &storePaths); auto s = nix::rewriteStrings(rawStr, rewrites); // Convert to the C API StorePath type and convert to vector for index-based access std::vector<StorePath> vec; for (auto & sp : storePaths) { vec.push_back(StorePath{sp}); } return new nix_realised_string{.str = s, .storePaths = vec}; } NIXC_CATCH_ERRS_NULL } void nix_realised_string_free(nix_realised_string * s) { delete s; } size_t nix_realised_string_get_buffer_size(nix_realised_string * s) { return s->str.size(); } const char * nix_realised_string_get_buffer_start(nix_realised_string * s) { return s->str.data(); } size_t nix_realised_string_get_store_path_count(nix_realised_string * s) { return s->storePaths.size(); } const StorePath * nix_realised_string_get_store_path(nix_realised_string * s, size_t i) { return &s->storePaths[i]; }
18,083
C++
.cc
598
24.585284
120
0.613681
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,046
nix_api_expr.cc
NixOS_nix/src/libexpr-c/nix_api_expr.cc
#include <cstring> #include <stdexcept> #include <string> #include "eval.hh" #include "eval-gc.hh" #include "globals.hh" #include "eval-settings.hh" #include "nix_api_expr.h" #include "nix_api_expr_internal.h" #include "nix_api_store.h" #include "nix_api_store_internal.h" #include "nix_api_util.h" #include "nix_api_util_internal.h" #if HAVE_BOEHMGC # include <mutex> #endif nix_err nix_libexpr_init(nix_c_context * context) { if (context) context->last_err_code = NIX_OK; { auto ret = nix_libutil_init(context); if (ret != NIX_OK) return ret; } { auto ret = nix_libstore_init(context); if (ret != NIX_OK) return ret; } try { nix::initGC(); } NIXC_CATCH_ERRS } nix_err nix_expr_eval_from_string( nix_c_context * context, EvalState * state, const char * expr, const char * path, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { nix::Expr * parsedExpr = state->state.parseExprFromString(expr, state->state.rootPath(nix::CanonPath(path))); state->state.eval(parsedExpr, value->value); state->state.forceValue(value->value, nix::noPos); } NIXC_CATCH_ERRS } nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, nix_value * arg, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { state->state.callFunction(fn->value, arg->value, value->value, nix::noPos); state->state.forceValue(value->value, nix::noPos); } NIXC_CATCH_ERRS } nix_err nix_value_call_multi(nix_c_context * context, EvalState * state, nix_value * fn, size_t nargs, nix_value ** args, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { state->state.callFunction(fn->value, nargs, (nix::Value * *)args, value->value, nix::noPos); state->state.forceValue(value->value, nix::noPos); } NIXC_CATCH_ERRS } nix_err nix_value_force(nix_c_context * context, EvalState * state, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { state->state.forceValue(value->value, nix::noPos); } NIXC_CATCH_ERRS } nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_value * value) { if (context) context->last_err_code = NIX_OK; try { state->state.forceValueDeep(value->value); } NIXC_CATCH_ERRS } EvalState * nix_state_create(nix_c_context * context, const char ** lookupPath_c, Store * store) { if (context) context->last_err_code = NIX_OK; try { nix::Strings lookupPath; if (lookupPath_c != nullptr) for (size_t i = 0; lookupPath_c[i] != nullptr; i++) lookupPath.push_back(lookupPath_c[i]); void * p = ::operator new( sizeof(EvalState), static_cast<std::align_val_t>(alignof(EvalState))); auto * p2 = static_cast<EvalState *>(p); new (p) EvalState { .fetchSettings = nix::fetchers::Settings{}, .settings = nix::EvalSettings{ nix::settings.readOnlyMode, }, .state = nix::EvalState( nix::LookupPath::parse(lookupPath), store->ptr, p2->fetchSettings, p2->settings), }; loadConfFile(p2->settings); return p2; } NIXC_CATCH_ERRS_NULL } void nix_state_free(EvalState * state) { delete state; } #if HAVE_BOEHMGC std::unordered_map< const void *, unsigned int, std::hash<const void *>, std::equal_to<const void *>, traceable_allocator<std::pair<const void * const, unsigned int>>> nix_refcounts; std::mutex nix_refcount_lock; nix_err nix_gc_incref(nix_c_context * context, const void * p) { if (context) context->last_err_code = NIX_OK; try { std::scoped_lock lock(nix_refcount_lock); auto f = nix_refcounts.find(p); if (f != nix_refcounts.end()) { f->second++; } else { nix_refcounts[p] = 1; } } NIXC_CATCH_ERRS } nix_err nix_gc_decref(nix_c_context * context, const void * p) { if (context) context->last_err_code = NIX_OK; try { std::scoped_lock lock(nix_refcount_lock); auto f = nix_refcounts.find(p); if (f != nix_refcounts.end()) { if (--f->second == 0) nix_refcounts.erase(f); } else throw std::runtime_error("nix_gc_decref: object was not referenced"); } NIXC_CATCH_ERRS } void nix_gc_now() { GC_gcollect(); } #else nix_err nix_gc_incref(nix_c_context * context, const void *) { if (context) context->last_err_code = NIX_OK; return NIX_OK; } nix_err nix_gc_decref(nix_c_context * context, const void *) { if (context) context->last_err_code = NIX_OK; return NIX_OK; } void nix_gc_now() {} #endif nix_err nix_value_incref(nix_c_context * context, nix_value *x) { return nix_gc_incref(context, (const void *) x); } nix_err nix_value_decref(nix_c_context * context, nix_value *x) { return nix_gc_decref(context, (const void *) x); } void nix_gc_register_finalizer(void * obj, void * cd, void (*finalizer)(void * obj, void * cd)) { #if HAVE_BOEHMGC GC_REGISTER_FINALIZER(obj, finalizer, cd, 0, 0); #endif }
5,431
C++
.cc
190
23.115789
140
0.612835
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,047
nix_api_external.cc
NixOS_nix/src/libexpr-c/nix_api_external.cc
#include "attr-set.hh" #include "config.hh" #include "eval.hh" #include "globals.hh" #include "value.hh" #include "nix_api_expr.h" #include "nix_api_expr_internal.h" #include "nix_api_external.h" #include "nix_api_util.h" #include "nix_api_util_internal.h" #include "nix_api_value.h" #include "value/context.hh" #include <nlohmann/json.hpp> void nix_set_string_return(nix_string_return * str, const char * c) { str->str = c; } nix_err nix_external_print(nix_c_context * context, nix_printer * printer, const char * c) { if (context) context->last_err_code = NIX_OK; try { printer->s << c; } NIXC_CATCH_ERRS } nix_err nix_external_add_string_context(nix_c_context * context, nix_string_context * ctx, const char * c) { if (context) context->last_err_code = NIX_OK; try { auto r = nix::NixStringContextElem::parse(c); ctx->ctx.insert(r); } NIXC_CATCH_ERRS } class NixCExternalValue : public nix::ExternalValueBase { NixCExternalValueDesc & desc; void * v; public: NixCExternalValue(NixCExternalValueDesc & desc, void * v) : desc(desc) , v(v){}; void * get_ptr() { return v; } /** * Print out the value */ virtual std::ostream & print(std::ostream & str) const override { nix_printer p{str}; desc.print(v, &p); return str; } /** * Return a simple string describing the type */ virtual std::string showType() const override { nix_string_return res; desc.showType(v, &res); return std::move(res.str); } /** * Return a string to be used in builtins.typeOf */ virtual std::string typeOf() const override { nix_string_return res; desc.typeOf(v, &res); return std::move(res.str); } /** * Coerce the value to a string. */ virtual std::string coerceToString( nix::EvalState & state, const nix::PosIdx & pos, nix::NixStringContext & context, bool copyMore, bool copyToStore) const override { if (!desc.coerceToString) { return nix::ExternalValueBase::coerceToString(state, pos, context, copyMore, copyToStore); } nix_string_context ctx{context}; nix_string_return res{""}; // todo: pos, errors desc.coerceToString(v, &ctx, copyMore, copyToStore, &res); if (res.str.empty()) { return nix::ExternalValueBase::coerceToString(state, pos, context, copyMore, copyToStore); } return std::move(res.str); } /** * Compare to another value of the same type. */ virtual bool operator==(const ExternalValueBase & b) const noexcept override { if (!desc.equal) { return false; } auto r = dynamic_cast<const NixCExternalValue *>(&b); if (!r) return false; return desc.equal(v, r->v); } /** * Print the value as JSON. */ virtual nlohmann::json printValueAsJSON( nix::EvalState & state, bool strict, nix::NixStringContext & context, bool copyToStore = true) const override { if (!desc.printValueAsJSON) { return nix::ExternalValueBase::printValueAsJSON(state, strict, context, copyToStore); } nix_string_context ctx{context}; nix_string_return res{""}; desc.printValueAsJSON(v, (EvalState *) &state, strict, &ctx, copyToStore, &res); if (res.str.empty()) { return nix::ExternalValueBase::printValueAsJSON(state, strict, context, copyToStore); } return nlohmann::json::parse(res.str); } /** * Print the value as XML. */ virtual void printValueAsXML( nix::EvalState & state, bool strict, bool location, nix::XMLWriter & doc, nix::NixStringContext & context, nix::PathSet & drvsSeen, const nix::PosIdx pos) const override { if (!desc.printValueAsXML) { return nix::ExternalValueBase::printValueAsXML(state, strict, location, doc, context, drvsSeen, pos); } nix_string_context ctx{context}; desc.printValueAsXML( v, (EvalState *) &state, strict, location, &doc, &ctx, &drvsSeen, *reinterpret_cast<const uint32_t *>(&pos)); } virtual ~NixCExternalValue() override{}; }; ExternalValue * nix_create_external_value(nix_c_context * context, NixCExternalValueDesc * desc, void * v) { if (context) context->last_err_code = NIX_OK; try { auto ret = new #if HAVE_BOEHMGC (GC) #endif NixCExternalValue(*desc, v); nix_gc_incref(nullptr, ret); return (ExternalValue *) ret; } NIXC_CATCH_ERRS_NULL } void * nix_get_external_value_content(nix_c_context * context, ExternalValue * b) { if (context) context->last_err_code = NIX_OK; try { auto r = dynamic_cast<NixCExternalValue *>((nix::ExternalValueBase *) b); if (r) return r->get_ptr(); return nullptr; } NIXC_CATCH_ERRS_NULL }
5,186
C++
.cc
176
23.045455
117
0.610533
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,048
shared.cc
NixOS_nix/src/libmain/shared.cc
#include "globals.hh" #include "current-process.hh" #include "shared.hh" #include "store-api.hh" #include "gc-store.hh" #include "loggers.hh" #include "progress-bar.hh" #include "signals.hh" #include <algorithm> #include <exception> #include <iostream> #include <cstdlib> #include <sys/time.h> #include <sys/stat.h> #include <unistd.h> #include <signal.h> #ifdef __linux__ #include <features.h> #endif #include <openssl/crypto.h> #include "exit.hh" #include "strings.hh" namespace nix { char * * savedArgv; static bool gcWarning = true; void printGCWarning() { if (!gcWarning) return; static bool haveWarned = false; warnOnce(haveWarned, "you did not specify '--add-root'; " "the result might be removed by the garbage collector"); } void printMissing(ref<Store> store, const std::vector<DerivedPath> & paths, Verbosity lvl) { uint64_t downloadSize, narSize; StorePathSet willBuild, willSubstitute, unknown; store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize); printMissing(store, willBuild, willSubstitute, unknown, downloadSize, narSize, lvl); } void printMissing(ref<Store> store, const StorePathSet & willBuild, const StorePathSet & willSubstitute, const StorePathSet & unknown, uint64_t downloadSize, uint64_t narSize, Verbosity lvl) { if (!willBuild.empty()) { if (willBuild.size() == 1) printMsg(lvl, "this derivation will be built:"); else printMsg(lvl, "these %d derivations will be built:", willBuild.size()); auto sorted = store->topoSortPaths(willBuild); reverse(sorted.begin(), sorted.end()); for (auto & i : sorted) printMsg(lvl, " %s", store->printStorePath(i)); } if (!willSubstitute.empty()) { const float downloadSizeMiB = downloadSize / (1024.f * 1024.f); const float narSizeMiB = narSize / (1024.f * 1024.f); if (willSubstitute.size() == 1) { printMsg(lvl, "this path will be fetched (%.2f MiB download, %.2f MiB unpacked):", downloadSizeMiB, narSizeMiB); } else { printMsg(lvl, "these %d paths will be fetched (%.2f MiB download, %.2f MiB unpacked):", willSubstitute.size(), downloadSizeMiB, narSizeMiB); } std::vector<const StorePath *> willSubstituteSorted = {}; std::for_each(willSubstitute.begin(), willSubstitute.end(), [&](const StorePath &p) { willSubstituteSorted.push_back(&p); }); std::sort(willSubstituteSorted.begin(), willSubstituteSorted.end(), [](const StorePath *lhs, const StorePath *rhs) { if (lhs->name() == rhs->name()) return lhs->to_string() < rhs->to_string(); else return lhs->name() < rhs->name(); }); for (auto p : willSubstituteSorted) printMsg(lvl, " %s", store->printStorePath(*p)); } if (!unknown.empty()) { printMsg(lvl, "don't know how to build these paths%s:", (settings.readOnlyMode ? " (may be caused by read-only store access)" : "")); for (auto & i : unknown) printMsg(lvl, " %s", store->printStorePath(i)); } } std::string getArg(const std::string & opt, Strings::iterator & i, const Strings::iterator & end) { ++i; if (i == end) throw UsageError("'%1%' requires an argument", opt); return *i; } #ifndef _WIN32 static void sigHandler(int signo) { } #endif void initNix(bool loadConfig) { /* Turn on buffering for cerr. */ #if HAVE_PUBSETBUF static char buf[1024]; std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif initLibStore(loadConfig); #ifndef _WIN32 unix::startSignalHandlerThread(); /* Reset SIGCHLD to its default. */ struct sigaction act; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = SIG_DFL; if (sigaction(SIGCHLD, &act, 0)) throw SysError("resetting SIGCHLD"); /* Install a dummy SIGUSR1 handler for use with pthread_kill(). */ act.sa_handler = sigHandler; if (sigaction(SIGUSR1, &act, 0)) throw SysError("handling SIGUSR1"); #endif #if __APPLE__ /* HACK: on darwin, we need can’t use sigprocmask with SIGWINCH. * Instead, add a dummy sigaction handler, and signalHandlerThread * can handle the rest. */ act.sa_handler = sigHandler; if (sigaction(SIGWINCH, &act, 0)) throw SysError("handling SIGWINCH"); /* Disable SA_RESTART for interrupts, so that system calls on this thread * error with EINTR like they do on Linux. * Most signals on BSD systems default to SA_RESTART on, but Nix * expects EINTR from syscalls to properly exit. */ act.sa_handler = SIG_DFL; if (sigaction(SIGINT, &act, 0)) throw SysError("handling SIGINT"); if (sigaction(SIGTERM, &act, 0)) throw SysError("handling SIGTERM"); if (sigaction(SIGHUP, &act, 0)) throw SysError("handling SIGHUP"); if (sigaction(SIGPIPE, &act, 0)) throw SysError("handling SIGPIPE"); if (sigaction(SIGQUIT, &act, 0)) throw SysError("handling SIGQUIT"); if (sigaction(SIGTRAP, &act, 0)) throw SysError("handling SIGTRAP"); #endif #ifndef _WIN32 /* Register a SIGSEGV handler to detect stack overflows. Why not initLibExpr()? initGC() is essentially that, but detectStackOverflow is not an instance of the init function concept, as it may have to be invoked more than once per process. */ detectStackOverflow(); #endif /* There is no privacy in the Nix system ;-) At least not for now. In particular, store objects should be readable by everybody. */ umask(0022); /* Initialise the PRNG. */ struct timeval tv; gettimeofday(&tv, 0); #ifndef _WIN32 srandom(tv.tv_usec); #endif srand(tv.tv_usec); } LegacyArgs::LegacyArgs(const std::string & programName, std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg) : MixCommonArgs(programName), parseArg(parseArg) { addFlag({ .longName = "no-build-output", .shortName = 'Q', .description = "Do not show build output.", .handler = {[&]() {setLogFormat(LogFormat::raw); }}, }); addFlag({ .longName = "keep-failed", .shortName ='K', .description = "Keep temporary directories of failed builds.", .handler = {&(bool&) settings.keepFailed, true}, }); addFlag({ .longName = "keep-going", .shortName ='k', .description = "Keep going after a build fails.", .handler = {&(bool&) settings.keepGoing, true}, }); addFlag({ .longName = "fallback", .description = "Build from source if substitution fails.", .handler = {&(bool&) settings.tryFallback, true}, }); auto intSettingAlias = [&](char shortName, const std::string & longName, const std::string & description, const std::string & dest) { addFlag({ .longName = longName, .shortName = shortName, .description = description, .labels = {"n"}, .handler = {[=](std::string s) { auto n = string2IntWithUnitPrefix<uint64_t>(s); settings.set(dest, std::to_string(n)); }} }); }; intSettingAlias(0, "cores", "Maximum number of CPU cores to use inside a build.", "cores"); intSettingAlias(0, "max-silent-time", "Number of seconds of silence before a build is killed.", "max-silent-time"); intSettingAlias(0, "timeout", "Number of seconds before a build is killed.", "timeout"); addFlag({ .longName = "readonly-mode", .description = "Do not write to the Nix store.", .handler = {&settings.readOnlyMode, true}, }); addFlag({ .longName = "no-gc-warning", .description = "Disable warnings about not using `--add-root`.", .handler = {&gcWarning, false}, }); addFlag({ .longName = "store", .description = "The URL of the Nix store to use.", .labels = {"store-uri"}, .handler = {&(std::string&) settings.storeUri}, }); } bool LegacyArgs::processFlag(Strings::iterator & pos, Strings::iterator end) { if (MixCommonArgs::processFlag(pos, end)) return true; bool res = parseArg(pos, end); if (res) ++pos; return res; } bool LegacyArgs::processArgs(const Strings & args, bool finish) { if (args.empty()) return true; assert(args.size() == 1); Strings ss(args); auto pos = ss.begin(); if (!parseArg(pos, ss.end())) throw UsageError("unexpected argument '%1%'", args.front()); return true; } void parseCmdLine(int argc, char * * argv, std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg) { parseCmdLine(std::string(baseNameOf(argv[0])), argvToStrings(argc, argv), parseArg); } void parseCmdLine(const std::string & programName, const Strings & args, std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg) { LegacyArgs(programName, parseArg).parseCmdline(args); } void printVersion(const std::string & programName) { std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if HAVE_BOEHMGC cfg.push_back("gc"); #endif cfg.push_back("signed-caches"); std::cout << "System type: " << settings.thisSystem << "\n"; std::cout << "Additional system types: " << concatStringsSep(", ", settings.extraPlatforms.get()) << "\n"; std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n"; std::cout << "System configuration file: " << settings.nixConfDir + "/nix.conf" << "\n"; std::cout << "User configuration files: " << concatStringsSep(":", settings.nixUserConfFiles) << "\n"; std::cout << "Store directory: " << settings.nixStore << "\n"; std::cout << "State directory: " << settings.nixStateDir << "\n"; std::cout << "Data directory: " << settings.nixDataDir << "\n"; } throw Exit(); } void showManPage(const std::string & name) { restoreProcessContext(); setEnv("MANPATH", settings.nixManDir.c_str()); execlp("man", "man", name.c_str(), nullptr); if (errno == ENOENT) { // Not SysError because we don't want to suffix the errno, aka No such file or directory. throw Error("The '%1%' command was not found, but it is needed for '%2%' and some other '%3%' commands' help text. Perhaps you could install the '%1%' command?", "man", name.c_str(), "nix-*"); } throw SysError("command 'man %1%' failed", name.c_str()); } int handleExceptions(const std::string & programName, std::function<void()> fun) { ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this ErrorInfo::programName = baseNameOf(programName); std::string error = ANSI_RED "error:" ANSI_NORMAL " "; try { try { fun(); } catch (...) { /* Subtle: we have to make sure that any `interrupted' condition is discharged before we reach printMsg() below, since otherwise it will throw an (uncaught) exception. */ setInterruptThrown(); throw; } } catch (Exit & e) { return e.status; } catch (UsageError & e) { logError(e.info()); printError("Try '%1% --help' for more information.", programName); return 1; } catch (BaseError & e) { logError(e.info()); return e.info().status; } catch (std::bad_alloc & e) { printError(error + "out of memory"); return 1; } catch (std::exception & e) { printError(error + e.what()); return 1; } return 0; } RunPager::RunPager() { if (!isatty(STDOUT_FILENO)) return; char * pager = getenv("NIX_PAGER"); if (!pager) pager = getenv("PAGER"); if (pager && ((std::string) pager == "" || (std::string) pager == "cat")) return; stopProgressBar(); Pipe toPager; toPager.create(); #ifdef _WIN32 // TODO re-enable on Windows, once we can start processes. throw Error("Commit signature verification not implemented on Windows yet"); #else pid = startProcess([&]() { if (dup2(toPager.readSide.get(), STDIN_FILENO) == -1) throw SysError("dupping stdin"); if (!getenv("LESS")) setEnv("LESS", "FRSXMK"); restoreProcessContext(); if (pager) execl("/bin/sh", "sh", "-c", pager, nullptr); execlp("pager", "pager", nullptr); execlp("less", "less", nullptr); execlp("more", "more", nullptr); throw SysError("executing '%1%'", pager); }); pid.setKillSignal(SIGINT); std_out = fcntl(STDOUT_FILENO, F_DUPFD_CLOEXEC, 0); if (dup2(toPager.writeSide.get(), STDOUT_FILENO) == -1) throw SysError("dupping standard output"); #endif } RunPager::~RunPager() { try { #ifndef _WIN32 // TODO re-enable on Windows, once we can start processes. if (pid != -1) { std::cout.flush(); dup2(std_out, STDOUT_FILENO); pid.wait(); } #endif } catch (...) { ignoreExceptionInDestructor(); } } PrintFreed::~PrintFreed() { if (show) std::cout << fmt("%d store paths deleted, %s freed\n", results.paths.size(), showBytes(results.bytesFreed)); } }
13,662
C++
.cc
363
31.212121
200
0.619803
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,049
progress-bar.cc
NixOS_nix/src/libmain/progress-bar.cc
#include "progress-bar.hh" #include "terminal.hh" #include "sync.hh" #include "store-api.hh" #include "names.hh" #include <atomic> #include <map> #include <thread> #include <sstream> #include <iostream> #include <chrono> namespace nix { static std::string_view getS(const std::vector<Logger::Field> & fields, size_t n) { assert(n < fields.size()); assert(fields[n].type == Logger::Field::tString); return fields[n].s; } static uint64_t getI(const std::vector<Logger::Field> & fields, size_t n) { assert(n < fields.size()); assert(fields[n].type == Logger::Field::tInt); return fields[n].i; } static std::string_view storePathToName(std::string_view path) { auto base = baseNameOf(path); auto i = base.find('-'); return i == std::string::npos ? base.substr(0, 0) : base.substr(i + 1); } class ProgressBar : public Logger { private: struct ActInfo { std::string s, lastLine, phase; ActivityType type = actUnknown; uint64_t done = 0; uint64_t expected = 0; uint64_t running = 0; uint64_t failed = 0; std::map<ActivityType, uint64_t> expectedByType; bool visible = true; ActivityId parent; std::optional<std::string> name; std::chrono::time_point<std::chrono::steady_clock> startTime; }; struct ActivitiesByType { std::map<ActivityId, std::list<ActInfo>::iterator> its; uint64_t done = 0; uint64_t expected = 0; uint64_t failed = 0; }; struct State { std::list<ActInfo> activities; std::map<ActivityId, std::list<ActInfo>::iterator> its; std::map<ActivityType, ActivitiesByType> activitiesByType; uint64_t filesLinked = 0, bytesLinked = 0; uint64_t corruptedPaths = 0, untrustedPaths = 0; bool active = true; bool paused = false; bool haveUpdate = true; }; /** Helps avoid unnecessary redraws, see `redraw()` */ Sync<std::string> lastOutput_; Sync<State> state_; std::thread updateThread; std::condition_variable quitCV, updateCV; bool printBuildLogs = false; bool isTTY; public: ProgressBar(bool isTTY) : isTTY(isTTY) { state_.lock()->active = isTTY; updateThread = std::thread([&]() { auto state(state_.lock()); auto nextWakeup = std::chrono::milliseconds::max(); while (state->active) { if (!state->haveUpdate) state.wait_for(updateCV, nextWakeup); nextWakeup = draw(*state); state.wait_for(quitCV, std::chrono::milliseconds(50)); } }); } ~ProgressBar() { stop(); } /* Called by destructor, can't be overridden */ void stop() override final { { auto state(state_.lock()); if (!state->active) return; state->active = false; writeToStderr("\r\e[K"); updateCV.notify_one(); quitCV.notify_one(); } updateThread.join(); } void pause() override { auto state (state_.lock()); state->paused = true; if (state->active) writeToStderr("\r\e[K"); } void resume() override { auto state (state_.lock()); state->paused = false; if (state->active) writeToStderr("\r\e[K"); state->haveUpdate = true; updateCV.notify_one(); } bool isVerbose() override { return printBuildLogs; } void log(Verbosity lvl, std::string_view s) override { if (lvl > verbosity) return; auto state(state_.lock()); log(*state, lvl, s); } void logEI(const ErrorInfo & ei) override { auto state(state_.lock()); std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); log(*state, ei.level, toView(oss)); } void log(State & state, Verbosity lvl, std::string_view s) { if (state.active) { writeToStderr("\r\e[K" + filterANSIEscapes(s, !isTTY) + ANSI_NORMAL "\n"); draw(state); } else { writeToStderr(filterANSIEscapes(s, !isTTY) + "\n"); } } void startActivity(ActivityId act, Verbosity lvl, ActivityType type, const std::string & s, const Fields & fields, ActivityId parent) override { auto state(state_.lock()); if (lvl <= verbosity && !s.empty() && type != actBuildWaiting) log(*state, lvl, s + "..."); state->activities.emplace_back(ActInfo { .s = s, .type = type, .parent = parent, .startTime = std::chrono::steady_clock::now() }); auto i = std::prev(state->activities.end()); state->its.emplace(act, i); state->activitiesByType[type].its.emplace(act, i); if (type == actBuild) { std::string name(storePathToName(getS(fields, 0))); if (hasSuffix(name, ".drv")) name = name.substr(0, name.size() - 4); i->s = fmt("building " ANSI_BOLD "%s" ANSI_NORMAL, name); auto machineName = getS(fields, 1); if (machineName != "") i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName); // Used to be curRound and nrRounds, but the // implementation was broken for a long time. if (getI(fields, 2) != 1 || getI(fields, 3) != 1) { throw Error("log message indicated repeating builds, but this is not currently implemented"); } i->name = DrvName(name).name; } if (type == actSubstitute) { auto name = storePathToName(getS(fields, 0)); auto sub = getS(fields, 1); i->s = fmt( hasPrefix(sub, "local") ? "copying " ANSI_BOLD "%s" ANSI_NORMAL " from %s" : "fetching " ANSI_BOLD "%s" ANSI_NORMAL " from %s", name, sub); } if (type == actPostBuildHook) { auto name = storePathToName(getS(fields, 0)); if (hasSuffix(name, ".drv")) name = name.substr(0, name.size() - 4); i->s = fmt("post-build " ANSI_BOLD "%s" ANSI_NORMAL, name); i->name = DrvName(name).name; } if (type == actQueryPathInfo) { auto name = storePathToName(getS(fields, 0)); i->s = fmt("querying " ANSI_BOLD "%s" ANSI_NORMAL " on %s", name, getS(fields, 1)); } if ((type == actFileTransfer && hasAncestor(*state, actCopyPath, parent)) || (type == actFileTransfer && hasAncestor(*state, actQueryPathInfo, parent)) || (type == actCopyPath && hasAncestor(*state, actSubstitute, parent))) i->visible = false; update(*state); } /* Check whether an activity has an ancestore with the specified type. */ bool hasAncestor(State & state, ActivityType type, ActivityId act) { while (act != 0) { auto i = state.its.find(act); if (i == state.its.end()) break; if (i->second->type == type) return true; act = i->second->parent; } return false; } void stopActivity(ActivityId act) override { auto state(state_.lock()); auto i = state->its.find(act); if (i != state->its.end()) { auto & actByType = state->activitiesByType[i->second->type]; actByType.done += i->second->done; actByType.failed += i->second->failed; for (auto & j : i->second->expectedByType) state->activitiesByType[j.first].expected -= j.second; actByType.its.erase(act); state->activities.erase(i->second); state->its.erase(i); } update(*state); } void result(ActivityId act, ResultType type, const std::vector<Field> & fields) override { auto state(state_.lock()); if (type == resFileLinked) { state->filesLinked++; state->bytesLinked += getI(fields, 0); update(*state); } else if (type == resBuildLogLine || type == resPostBuildLogLine) { auto lastLine = chomp(getS(fields, 0)); if (!lastLine.empty()) { auto i = state->its.find(act); assert(i != state->its.end()); ActInfo info = *i->second; if (printBuildLogs) { auto suffix = "> "; if (type == resPostBuildLogLine) { suffix = " (post)> "; } log(*state, lvlInfo, ANSI_FAINT + info.name.value_or("unnamed") + suffix + ANSI_NORMAL + lastLine); } else { state->activities.erase(i->second); info.lastLine = lastLine; state->activities.emplace_back(info); i->second = std::prev(state->activities.end()); update(*state); } } } else if (type == resUntrustedPath) { state->untrustedPaths++; update(*state); } else if (type == resCorruptedPath) { state->corruptedPaths++; update(*state); } else if (type == resSetPhase) { auto i = state->its.find(act); assert(i != state->its.end()); i->second->phase = getS(fields, 0); update(*state); } else if (type == resProgress) { auto i = state->its.find(act); assert(i != state->its.end()); ActInfo & actInfo = *i->second; actInfo.done = getI(fields, 0); actInfo.expected = getI(fields, 1); actInfo.running = getI(fields, 2); actInfo.failed = getI(fields, 3); update(*state); } else if (type == resSetExpected) { auto i = state->its.find(act); assert(i != state->its.end()); ActInfo & actInfo = *i->second; auto type = (ActivityType) getI(fields, 0); auto & j = actInfo.expectedByType[type]; state->activitiesByType[type].expected -= j; j = getI(fields, 1); state->activitiesByType[type].expected += j; update(*state); } else if (type == resFetchStatus) { auto i = state->its.find(act); assert(i != state->its.end()); ActInfo & actInfo = *i->second; actInfo.lastLine = getS(fields, 0); update(*state); } } void update(State & state) { state.haveUpdate = true; updateCV.notify_one(); } /** * Redraw, if the output has changed. * * Excessive redrawing is noticable on slow terminals, and it interferes * with text selection in some terminals, including libvte-based terminal * emulators. */ void redraw(std::string newOutput) { auto lastOutput(lastOutput_.lock()); if (newOutput != *lastOutput) { writeToStderr(newOutput); *lastOutput = std::move(newOutput); } } std::chrono::milliseconds draw(State & state) { auto nextWakeup = std::chrono::milliseconds::max(); state.haveUpdate = false; if (state.paused || !state.active) return nextWakeup; std::string line; std::string status = getStatus(state); if (!status.empty()) { line += '['; line += status; line += "]"; } auto now = std::chrono::steady_clock::now(); if (!state.activities.empty()) { if (!status.empty()) line += " "; auto i = state.activities.rbegin(); while (i != state.activities.rend()) { if (i->visible && (!i->s.empty() || !i->lastLine.empty())) { /* Don't show activities until some time has passed, to avoid displaying very short activities. */ auto delay = std::chrono::milliseconds(10); if (i->startTime + delay < now) break; else nextWakeup = std::min(nextWakeup, std::chrono::duration_cast<std::chrono::milliseconds>(delay - (now - i->startTime))); } ++i; } if (i != state.activities.rend()) { line += i->s; if (!i->phase.empty()) { line += " ("; line += i->phase; line += ")"; } if (!i->lastLine.empty()) { if (!i->s.empty()) line += ": "; line += i->lastLine; } } } auto width = getWindowSize().second; if (width <= 0) width = std::numeric_limits<decltype(width)>::max(); redraw("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); return nextWakeup; } std::string getStatus(State & state) { auto MiB = 1024.0 * 1024.0; std::string res; auto renderActivity = [&](ActivityType type, const std::string & itemFmt, const std::string & numberFmt = "%d", double unit = 1) { auto & act = state.activitiesByType[type]; uint64_t done = act.done, expected = act.done, running = 0, failed = act.failed; for (auto & j : act.its) { done += j.second->done; expected += j.second->expected; running += j.second->running; failed += j.second->failed; } expected = std::max(expected, act.expected); std::string s; if (running || done || expected || failed) { if (running) if (expected != 0) s = fmt(ANSI_BLUE + numberFmt + ANSI_NORMAL "/" ANSI_GREEN + numberFmt + ANSI_NORMAL "/" + numberFmt, running / unit, done / unit, expected / unit); else s = fmt(ANSI_BLUE + numberFmt + ANSI_NORMAL "/" ANSI_GREEN + numberFmt + ANSI_NORMAL, running / unit, done / unit); else if (expected != done) if (expected != 0) s = fmt(ANSI_GREEN + numberFmt + ANSI_NORMAL "/" + numberFmt, done / unit, expected / unit); else s = fmt(ANSI_GREEN + numberFmt + ANSI_NORMAL, done / unit); else s = fmt(done ? ANSI_GREEN + numberFmt + ANSI_NORMAL : numberFmt, done / unit); s = fmt(itemFmt, s); if (failed) s += fmt(" (" ANSI_RED "%d failed" ANSI_NORMAL ")", failed / unit); } return s; }; auto showActivity = [&](ActivityType type, const std::string & itemFmt, const std::string & numberFmt = "%d", double unit = 1) { auto s = renderActivity(type, itemFmt, numberFmt, unit); if (s.empty()) return; if (!res.empty()) res += ", "; res += s; }; showActivity(actBuilds, "%s built"); auto s1 = renderActivity(actCopyPaths, "%s copied"); auto s2 = renderActivity(actCopyPath, "%s MiB", "%.1f", MiB); if (!s1.empty() || !s2.empty()) { if (!res.empty()) res += ", "; if (s1.empty()) res += "0 copied"; else res += s1; if (!s2.empty()) { res += " ("; res += s2; res += ')'; } } showActivity(actFileTransfer, "%s MiB DL", "%.1f", MiB); { auto s = renderActivity(actOptimiseStore, "%s paths optimised"); if (s != "") { s += fmt(", %.1f MiB / %d inodes freed", state.bytesLinked / MiB, state.filesLinked); if (!res.empty()) res += ", "; res += s; } } // FIXME: don't show "done" paths in green. showActivity(actVerifyPaths, "%s paths verified"); if (state.corruptedPaths) { if (!res.empty()) res += ", "; res += fmt(ANSI_RED "%d corrupted" ANSI_NORMAL, state.corruptedPaths); } if (state.untrustedPaths) { if (!res.empty()) res += ", "; res += fmt(ANSI_RED "%d untrusted" ANSI_NORMAL, state.untrustedPaths); } return res; } void writeToStdout(std::string_view s) override { auto state(state_.lock()); if (state->active) { std::cerr << "\r\e[K"; Logger::writeToStdout(s); draw(*state); } else { Logger::writeToStdout(s); } } std::optional<char> ask(std::string_view msg) override { auto state(state_.lock()); if (!state->active) return {}; std::cerr << fmt("\r\e[K%s ", msg); auto s = trim(readLine(getStandardInput(), true)); if (s.size() != 1) return {}; draw(*state); return s[0]; } void setPrintBuildLogs(bool printBuildLogs) override { this->printBuildLogs = printBuildLogs; } }; Logger * makeProgressBar() { return new ProgressBar(isTTY()); } void startProgressBar() { logger = makeProgressBar(); } void stopProgressBar() { auto progressBar = dynamic_cast<ProgressBar *>(logger); if (progressBar) progressBar->stop(); } }
17,812
C++
.cc
480
26.5625
143
0.519232
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,050
loggers.cc
NixOS_nix/src/libmain/loggers.cc
#include "loggers.hh" #include "environment-variables.hh" #include "progress-bar.hh" namespace nix { LogFormat defaultLogFormat = LogFormat::raw; LogFormat parseLogFormat(const std::string & logFormatStr) { if (logFormatStr == "raw" || getEnv("NIX_GET_COMPLETIONS")) return LogFormat::raw; else if (logFormatStr == "raw-with-logs") return LogFormat::rawWithLogs; else if (logFormatStr == "internal-json") return LogFormat::internalJSON; else if (logFormatStr == "bar") return LogFormat::bar; else if (logFormatStr == "bar-with-logs") return LogFormat::barWithLogs; throw Error("option 'log-format' has an invalid value '%s'", logFormatStr); } Logger * makeDefaultLogger() { switch (defaultLogFormat) { case LogFormat::raw: return makeSimpleLogger(false); case LogFormat::rawWithLogs: return makeSimpleLogger(true); case LogFormat::internalJSON: return makeJSONLogger(*makeSimpleLogger(true)); case LogFormat::bar: return makeProgressBar(); case LogFormat::barWithLogs: { auto logger = makeProgressBar(); logger->setPrintBuildLogs(true); return logger; } default: unreachable(); } } void setLogFormat(const std::string & logFormatStr) { setLogFormat(parseLogFormat(logFormatStr)); } void setLogFormat(const LogFormat & logFormat) { defaultLogFormat = logFormat; createDefaultLogger(); } void createDefaultLogger() { logger = makeDefaultLogger(); } }
1,537
C++
.cc
48
27.104167
79
0.700203
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,051
common-args.cc
NixOS_nix/src/libmain/common-args.cc
#include "common-args.hh" #include "args/root.hh" #include "config-global.hh" #include "globals.hh" #include "logging.hh" #include "loggers.hh" #include "util.hh" #include "plugin.hh" namespace nix { MixCommonArgs::MixCommonArgs(const std::string & programName) : programName(programName) { addFlag({ .longName = "verbose", .shortName = 'v', .description = "Increase the logging verbosity level.", .category = loggingCategory, .handler = {[]() { verbosity = (Verbosity) std::min<std::underlying_type_t<Verbosity>>(verbosity + 1, lvlVomit); }}, }); addFlag({ .longName = "quiet", .description = "Decrease the logging verbosity level.", .category = loggingCategory, .handler = {[]() { verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError; }}, }); addFlag({ .longName = "debug", .description = "Set the logging verbosity level to 'debug'.", .category = loggingCategory, .handler = {[]() { verbosity = lvlDebug; }}, }); addFlag({ .longName = "option", .description = "Set the Nix configuration setting *name* to *value* (overriding `nix.conf`).", .category = miscCategory, .labels = {"name", "value"}, .handler = {[this](std::string name, std::string value) { try { globalConfig.set(name, value); } catch (UsageError & e) { if (!getRoot().completions) warn(e.what()); } }}, .completer = [](AddCompletions & completions, size_t index, std::string_view prefix) { if (index == 0) { std::map<std::string, Config::SettingInfo> settings; globalConfig.getSettings(settings); for (auto & s : settings) if (hasPrefix(s.first, prefix)) completions.add(s.first, fmt("Set the `%s` setting.", s.first)); } } }); addFlag({ .longName = "log-format", .description = "Set the format of log output; one of `raw`, `internal-json`, `bar` or `bar-with-logs`.", .category = loggingCategory, .labels = {"format"}, .handler = {[](std::string format) { setLogFormat(format); }}, }); addFlag({ .longName = "max-jobs", .shortName = 'j', .description = "The maximum number of parallel builds.", .labels = Strings{"jobs"}, .handler = {[=](std::string s) { settings.set("max-jobs", s); }} }); std::string cat = "Options to override configuration settings"; globalConfig.convertToArgs(*this, cat); // Backward compatibility hack: nix-env already had a --system flag. if (programName == "nix-env") longFlags.erase("system"); hiddenCategories.insert(cat); } void MixCommonArgs::initialFlagsProcessed() { initPlugins(); pluginsInited(); } }
3,009
C++
.cc
84
27.857143
112
0.571772
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,052
plugin.cc
NixOS_nix/src/libmain/plugin.cc
#ifndef _WIN32 # include <dlfcn.h> #endif #include <filesystem> #include "config-global.hh" #include "signals.hh" namespace nix { struct PluginFilesSetting : public BaseSetting<Paths> { bool pluginsLoaded = false; PluginFilesSetting( Config * options, const Paths & def, const std::string & name, const std::string & description, const std::set<std::string> & aliases = {}) : BaseSetting<Paths>(def, true, name, description, aliases) { options->addSetting(this); } Paths parse(const std::string & str) const override; }; Paths PluginFilesSetting::parse(const std::string & str) const { if (pluginsLoaded) throw UsageError( "plugin-files set after plugins were loaded, you may need to move the flag before the subcommand"); return BaseSetting<Paths>::parse(str); } struct PluginSettings : Config { PluginFilesSetting pluginFiles{ this, {}, "plugin-files", R"( A list of plugin files to be loaded by Nix. Each of these files will be dlopened by Nix. If they contain the symbol `nix_plugin_entry()`, this symbol will be called. Alternatively, they can affect execution through static initialization. In particular, these plugins may construct static instances of RegisterPrimOp to add new primops or constants to the expression language, RegisterStoreImplementation to add new store implementations, RegisterCommand to add new subcommands to the `nix` command, and RegisterSetting to add new nix config settings. See the constructors for those types for more details. Warning! These APIs are inherently unstable and may change from release to release. Since these files are loaded into the same address space as Nix itself, they must be DSOs compatible with the instance of Nix running at the time (i.e. compiled against the same headers, not linked to any incompatible libraries). They should not be linked to any Nix libs directly, as those will be available already at load time. If an entry in the list is a directory, all files in the directory are loaded as plugins (non-recursively). )"}; }; static PluginSettings pluginSettings; static GlobalConfig::Register rPluginSettings(&pluginSettings); void initPlugins() { assert(!pluginSettings.pluginFiles.pluginsLoaded); for (const auto & pluginFile : pluginSettings.pluginFiles.get()) { std::vector<std::filesystem::path> pluginFiles; try { auto ents = std::filesystem::directory_iterator{pluginFile}; for (const auto & ent : ents) { checkInterrupt(); pluginFiles.emplace_back(ent.path()); } } catch (std::filesystem::filesystem_error & e) { if (e.code() != std::errc::not_a_directory) throw; pluginFiles.emplace_back(pluginFile); } for (const auto & file : pluginFiles) { checkInterrupt(); /* handle is purposefully leaked as there may be state in the DSO needed by the action of the plugin. */ #ifndef _WIN32 // TODO implement via DLL loading on Windows void * handle = dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL); if (!handle) throw Error("could not dynamically open plugin file '%s': %s", file, dlerror()); /* Older plugins use a statically initialized object to run their code. Newer plugins can also export nix_plugin_entry() */ void (*nix_plugin_entry)() = (void (*)()) dlsym(handle, "nix_plugin_entry"); if (nix_plugin_entry) nix_plugin_entry(); #else throw Error("could not dynamically open plugin file '%s'", file); #endif } } /* Since plugins can add settings, try to re-apply previously unknown settings. */ globalConfig.reapplyUnknownSettings(); globalConfig.warnUnknownSettings(); /* Tell the user if they try to set plugin-files after we've already loaded */ pluginSettings.pluginFiles.pluginsLoaded = true; } }
4,288
C++
.cc
101
34.435644
111
0.654833
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,053
stack.cc
NixOS_nix/src/libmain/unix/stack.cc
#include "error.hh" #include "shared.hh" #include <cstring> #include <cstddef> #include <cstdlib> #include <unistd.h> #include <signal.h> namespace nix { static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) { /* Detect stack overflows by comparing the faulting address with the stack pointer. Unfortunately, getting the stack pointer is not portable. */ bool haveSP = true; char * sp = 0; #if defined(__x86_64__) && defined(REG_RSP) sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_RSP]; #elif defined(REG_ESP) sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_ESP]; #else haveSP = false; #endif if (haveSP) { ptrdiff_t diff = (char *) info->si_addr - sp; if (diff < 0) diff = -diff; if (diff < 4096) { nix::stackOverflowHandler(info, ctx); } } /* Restore default behaviour (i.e. segfault and dump core). */ struct sigaction act; sigfillset(&act.sa_mask); act.sa_handler = SIG_DFL; act.sa_flags = 0; if (sigaction(SIGSEGV, &act, 0)) abort(); } void detectStackOverflow() { #if defined(SA_SIGINFO) && defined (SA_ONSTACK) /* Install a SIGSEGV handler to detect stack overflows. This requires an alternative stack, otherwise the signal cannot be delivered when we're out of stack space. */ stack_t stack; stack.ss_size = 4096 * 4 + MINSIGSTKSZ; static auto stackBuf = std::make_unique<std::vector<char>>(stack.ss_size); stack.ss_sp = stackBuf->data(); if (!stack.ss_sp) throw Error("cannot allocate alternative stack"); stack.ss_flags = 0; if (sigaltstack(&stack, 0) == -1) throw SysError("cannot set alternative stack"); struct sigaction act; sigfillset(&act.sa_mask); act.sa_sigaction = sigsegvHandler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; if (sigaction(SIGSEGV, &act, 0)) throw SysError("resetting SIGSEGV"); #endif } std::function<void(siginfo_t * info, void * ctx)> stackOverflowHandler(defaultStackOverflowHandler); void defaultStackOverflowHandler(siginfo_t * info, void * ctx) { char msg[] = "error: stack overflow (possible infinite recursion)\n"; [[gnu::unused]] auto res = write(2, msg, strlen(msg)); _exit(1); // maybe abort instead? } }
2,298
C++
.cc
64
31.578125
100
0.666817
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,054
nix-channel.cc
NixOS_nix/src/nix-channel/nix-channel.cc
#include "profiles.hh" #include "shared.hh" #include "globals.hh" #include "filetransfer.hh" #include "store-api.hh" #include "legacy.hh" #include "eval-settings.hh" // for defexpr #include "users.hh" #include "tarball.hh" #include "self-exe.hh" #include <fcntl.h> #include <regex> #include <pwd.h> using namespace nix; typedef std::map<std::string, std::string> Channels; static Channels channels; static std::filesystem::path channelsList; // Reads the list of channels. static void readChannels() { if (!pathExists(channelsList)) return; auto channelsFile = readFile(channelsList); for (const auto & line : tokenizeString<std::vector<std::string>>(channelsFile, "\n")) { chomp(line); if (std::regex_search(line, std::regex("^\\s*\\#"))) continue; auto split = tokenizeString<std::vector<std::string>>(line, " "); auto url = std::regex_replace(split[0], std::regex("/*$"), ""); auto name = split.size() > 1 ? split[1] : std::string(baseNameOf(url)); channels[name] = url; } } // Writes the list of channels. static void writeChannels() { auto channelsFD = AutoCloseFD{open(channelsList.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0644)}; if (!channelsFD) throw SysError("opening '%1%' for writing", channelsList.string()); for (const auto & channel : channels) writeFull(channelsFD.get(), channel.second + " " + channel.first + "\n"); } // Adds a channel. static void addChannel(const std::string & url, const std::string & name) { if (!regex_search(url, std::regex("^(file|http|https)://"))) throw Error("invalid channel URL '%1%'", url); if (!regex_search(name, std::regex("^[a-zA-Z0-9_][a-zA-Z0-9_\\.-]*$"))) throw Error("invalid channel identifier '%1%'", name); readChannels(); channels[name] = url; writeChannels(); } static Path profile; // Remove a channel. static void removeChannel(const std::string & name) { readChannels(); channels.erase(name); writeChannels(); runProgram(getNixBin("nix-env").string(), true, { "--profile", profile, "--uninstall", name }); } static Path nixDefExpr; // Fetch Nix expressions and binary cache URLs from the subscribed channels. static void update(const StringSet & channelNames) { readChannels(); auto store = openStore(); auto [fd, unpackChannelPath] = createTempFile(); writeFull(fd.get(), #include "unpack-channel.nix.gen.hh" ); fd = -1; AutoDelete del(unpackChannelPath, false); // Download each channel. Strings exprs; for (const auto & channel : channels) { auto name = channel.first; auto url = channel.second; // If the URL contains a version number, append it to the name // attribute (so that "nix-env -q" on the channels profile // shows something useful). auto cname = name; std::smatch match; auto urlBase = std::string(baseNameOf(url)); if (std::regex_search(urlBase, match, std::regex("(-\\d.*)$"))) cname = cname + match.str(1); std::string extraAttrs; if (!(channelNames.empty() || channelNames.count(name))) { // no need to update this channel, reuse the existing store path Path symlink = profile + "/" + name; Path storepath = dirOf(readLink(symlink)); exprs.push_back("f: rec { name = \"" + cname + "\"; type = \"derivation\"; outputs = [\"out\"]; system = \"builtin\"; outPath = builtins.storePath \"" + storepath + "\"; out = { inherit outPath; };}"); } else { // We want to download the url to a file to see if it's a tarball while also checking if we // got redirected in the process, so that we can grab the various parts of a nix channel // definition from a consistent location if the redirect changes mid-download. auto result = fetchers::downloadFile(store, url, std::string(baseNameOf(url))); auto filename = store->toRealPath(result.storePath); url = result.effectiveUrl; bool unpacked = false; if (std::regex_search(filename, std::regex("\\.tar\\.(gz|bz2|xz)$"))) { runProgram(getNixBin("nix-build").string(), false, { "--no-out-link", "--expr", "import " + unpackChannelPath + "{ name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; }" }); unpacked = true; } if (!unpacked) { // Download the channel tarball. try { filename = store->toRealPath(fetchers::downloadFile(store, url + "/nixexprs.tar.xz", "nixexprs.tar.xz").storePath); } catch (FileTransferError & e) { filename = store->toRealPath(fetchers::downloadFile(store, url + "/nixexprs.tar.bz2", "nixexprs.tar.bz2").storePath); } } // Regardless of where it came from, add the expression representing this channel to accumulated expression exprs.push_back("f: f { name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; " + extraAttrs + " }"); } } // Unpack the channel tarballs into the Nix store and install them // into the channels profile. std::cerr << "unpacking " << exprs.size() << " channels...\n"; Strings envArgs{ "--profile", profile, "--file", unpackChannelPath, "--install", "--remove-all", "--from-expression" }; for (auto & expr : exprs) envArgs.push_back(std::move(expr)); envArgs.push_back("--quiet"); runProgram(getNixBin("nix-env").string(), false, envArgs); // Make the channels appear in nix-env. struct stat st; if (lstat(nixDefExpr.c_str(), &st) == 0) { if (S_ISLNK(st.st_mode)) // old-skool ~/.nix-defexpr if (unlink(nixDefExpr.c_str()) == -1) throw SysError("unlinking %1%", nixDefExpr); } else if (errno != ENOENT) { throw SysError("getting status of %1%", nixDefExpr); } createDirs(nixDefExpr); auto channelLink = nixDefExpr + "/channels"; replaceSymlink(profile, channelLink); } static int main_nix_channel(int argc, char ** argv) { { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); channelsList = settings.useXDGBaseDirectories ? createNixStateDir() + "/channels" : home + "/.nix-channels"; nixDefExpr = getNixDefExpr(); // Figure out the name of the channels profile. profile = profilesDir() + "/channels"; createDirs(dirOf(profile)); enum { cNone, cAdd, cRemove, cList, cUpdate, cListGenerations, cRollback } cmd = cNone; std::vector<std::string> args; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--help") { showManPage("nix-channel"); } else if (*arg == "--version") { printVersion("nix-channel"); } else if (*arg == "--add") { cmd = cAdd; } else if (*arg == "--remove") { cmd = cRemove; } else if (*arg == "--list") { cmd = cList; } else if (*arg == "--update") { cmd = cUpdate; } else if (*arg == "--list-generations") { cmd = cListGenerations; } else if (*arg == "--rollback") { cmd = cRollback; } else { if (hasPrefix(*arg, "-")) throw UsageError("unsupported argument '%s'", *arg); args.push_back(std::move(*arg)); } return true; }); switch (cmd) { case cNone: throw UsageError("no command specified"); case cAdd: if (args.size() < 1 || args.size() > 2) throw UsageError("'--add' requires one or two arguments"); { auto url = args[0]; std::string name; if (args.size() == 2) { name = args[1]; } else { name = baseNameOf(url); name = std::regex_replace(name, std::regex("-unstable$"), ""); name = std::regex_replace(name, std::regex("-stable$"), ""); } addChannel(url, name); } break; case cRemove: if (args.size() != 1) throw UsageError("'--remove' requires one argument"); removeChannel(args[0]); break; case cList: if (!args.empty()) throw UsageError("'--list' expects no arguments"); readChannels(); for (const auto & channel : channels) std::cout << channel.first << ' ' << channel.second << '\n'; break; case cUpdate: update(StringSet(args.begin(), args.end())); break; case cListGenerations: if (!args.empty()) throw UsageError("'--list-generations' expects no arguments"); std::cout << runProgram(getNixBin("nix-env").string(), false, {"--profile", profile, "--list-generations"}) << std::flush; break; case cRollback: if (args.size() > 1) throw UsageError("'--rollback' has at most one argument"); Strings envArgs{"--profile", profile}; if (args.size() == 1) { envArgs.push_back("--switch-generation"); envArgs.push_back(args[0]); } else { envArgs.push_back("--rollback"); } runProgram(getNixBin("nix-env").string(), false, envArgs); break; } return 0; } } static RegisterLegacyCommand r_nix_channel("nix-channel", main_nix_channel);
10,312
C++
.cc
239
32.953975
213
0.549681
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,055
user-env.cc
NixOS_nix/src/nix-env/user-env.cc
#include "user-env.hh" #include "derivations.hh" #include "store-api.hh" #include "path-with-outputs.hh" #include "local-fs-store.hh" #include "globals.hh" #include "shared.hh" #include "eval.hh" #include "eval-inline.hh" #include "profiles.hh" #include "print-ambiguous.hh" #include <limits> #include <sstream> namespace nix { PackageInfos queryInstalled(EvalState & state, const Path & userEnv) { PackageInfos elems; if (pathExists(userEnv + "/manifest.json")) throw Error("profile '%s' is incompatible with 'nix-env'; please use 'nix profile' instead", userEnv); auto manifestFile = userEnv + "/manifest.nix"; if (pathExists(manifestFile)) { Value v; state.evalFile(state.rootPath(CanonPath(manifestFile)).resolveSymlinks(), v); Bindings & bindings(*state.allocBindings(0)); getDerivations(state, v, "", bindings, elems, false); } return elems; } bool createUserEnv(EvalState & state, PackageInfos & elems, const Path & profile, bool keepDerivations, const std::string & lockToken) { /* Build the components in the user environment, if they don't exist already. */ std::vector<StorePathWithOutputs> drvsToBuild; for (auto & i : elems) if (auto drvPath = i.queryDrvPath()) drvsToBuild.push_back({*drvPath}); debug("building user environment dependencies"); state.store->buildPaths( toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); /* Construct the whole top level derivation. */ StorePathSet references; auto list = state.buildList(elems.size()); for (const auto & [n, i] : enumerate(elems)) { /* Create a pseudo-derivation containing the name, system, output paths, and optionally the derivation path, as well as the meta attributes. */ std::optional<StorePath> drvPath = keepDerivations ? i.queryDrvPath() : std::nullopt; PackageInfo::Outputs outputs = i.queryOutputs(true, true); StringSet metaNames = i.queryMetaNames(); auto attrs = state.buildBindings(7 + outputs.size()); attrs.alloc(state.sType).mkString("derivation"); attrs.alloc(state.sName).mkString(i.queryName()); auto system = i.querySystem(); if (!system.empty()) attrs.alloc(state.sSystem).mkString(system); attrs.alloc(state.sOutPath).mkString(state.store->printStorePath(i.queryOutPath())); if (drvPath) attrs.alloc(state.sDrvPath).mkString(state.store->printStorePath(*drvPath)); // Copy each output meant for installation. auto outputsList = state.buildList(outputs.size()); for (const auto & [m, j] : enumerate(outputs)) { (outputsList[m] = state.allocValue())->mkString(j.first); auto outputAttrs = state.buildBindings(2); outputAttrs.alloc(state.sOutPath).mkString(state.store->printStorePath(*j.second)); attrs.alloc(j.first).mkAttrs(outputAttrs); /* This is only necessary when installing store paths, e.g., `nix-env -i /nix/store/abcd...-foo'. */ state.store->addTempRoot(*j.second); state.store->ensurePath(*j.second); references.insert(*j.second); } attrs.alloc(state.sOutputs).mkList(outputsList); // Copy the meta attributes. auto meta = state.buildBindings(metaNames.size()); for (auto & j : metaNames) { Value * v = i.queryMeta(j); if (!v) continue; meta.insert(state.symbols.create(j), v); } attrs.alloc(state.sMeta).mkAttrs(meta); (list[n] = state.allocValue())->mkAttrs(attrs); if (drvPath) references.insert(*drvPath); } Value manifest; manifest.mkList(list); /* Also write a copy of the list of user environment elements to the store; we need it for future modifications of the environment. */ auto manifestFile = ({ std::ostringstream str; printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits<int>::max()); StringSource source { toView(str) }; state.store->addToStoreFromDump( source, "env-manifest.nix", FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, references); }); /* Get the environment builder expression. */ Value envBuilder; state.eval(state.parseExprFromString( #include "buildenv.nix.gen.hh" , state.rootPath(CanonPath::root)), envBuilder); /* Construct a Nix expression that calls the user environment builder with the manifest as argument. */ auto attrs = state.buildBindings(3); state.mkStorePathString(manifestFile, attrs.alloc("manifest")); attrs.insert(state.symbols.create("derivations"), &manifest); Value args; args.mkAttrs(attrs); Value topLevel; topLevel.mkApp(&envBuilder, &args); /* Evaluate it. */ debug("evaluating user environment builder"); state.forceValue(topLevel, topLevel.determinePos(noPos)); NixStringContext context; auto & aDrvPath(*topLevel.attrs()->find(state.sDrvPath)); auto topLevelDrv = state.coerceToStorePath(aDrvPath.pos, *aDrvPath.value, context, ""); topLevelDrv.requireDerivation(); auto & aOutPath(*topLevel.attrs()->find(state.sOutPath)); auto topLevelOut = state.coerceToStorePath(aOutPath.pos, *aOutPath.value, context, ""); /* Realise the resulting store expression. */ debug("building user environment"); std::vector<StorePathWithOutputs> topLevelDrvs; topLevelDrvs.push_back({topLevelDrv}); state.store->buildPaths( toDerivedPaths(topLevelDrvs), state.repair ? bmRepair : bmNormal); /* Switch the current user environment to the output path. */ auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>(); if (store2) { PathLocks lock; lockProfile(lock, profile); Path lockTokenCur = optimisticLockProfile(profile); if (lockToken != lockTokenCur) { printInfo("profile '%1%' changed while we were busy; restarting", profile); return false; } debug("switching to new user environment"); Path generation = createGeneration(*store2, profile, topLevelOut); switchLink(profile, generation); } return true; } }
6,423
C++
.cc
145
37.144828
139
0.670242
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,056
nix-env.cc
NixOS_nix/src/nix-env/nix-env.cc
#include "users.hh" #include "attr-path.hh" #include "common-eval-args.hh" #include "derivations.hh" #include "eval.hh" #include "get-drvs.hh" #include "globals.hh" #include "names.hh" #include "profiles.hh" #include "path-with-outputs.hh" #include "shared.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "user-env.hh" #include "value-to-json.hh" #include "xml-writer.hh" #include "legacy.hh" #include "eval-settings.hh" // for defexpr #include "terminal.hh" #include <cerrno> #include <ctime> #include <algorithm> #include <iostream> #include <sstream> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <nlohmann/json.hpp> using namespace nix; using std::cout; typedef enum { srcNixExprDrvs, srcNixExprs, srcStorePaths, srcProfile, srcAttrPath, srcUnknown } InstallSourceType; struct InstallSourceInfo { InstallSourceType type; std::shared_ptr<SourcePath> nixExprPath; /* for srcNixExprDrvs, srcNixExprs */ Path profile; /* for srcProfile */ std::string systemFilter; /* for srcNixExprDrvs */ Bindings * autoArgs; }; struct Globals { InstallSourceInfo instSource; Path profile; std::shared_ptr<EvalState> state; bool dryRun; bool preserveInstalled; bool removeAll; std::string forceName; bool prebuiltOnly; }; typedef void (* Operation) (Globals & globals, Strings opFlags, Strings opArgs); static std::string needArg(Strings::iterator & i, Strings & args, const std::string & arg) { if (i == args.end()) throw UsageError("'%1%' requires an argument", arg); return *i++; } static bool parseInstallSourceOptions(Globals & globals, Strings::iterator & i, Strings & args, const std::string & arg) { if (arg == "--from-expression" || arg == "-E") globals.instSource.type = srcNixExprs; else if (arg == "--from-profile") { globals.instSource.type = srcProfile; globals.instSource.profile = needArg(i, args, arg); } else if (arg == "--attr" || arg == "-A") globals.instSource.type = srcAttrPath; else return false; return true; } static bool isNixExpr(const SourcePath & path, struct SourceAccessor::Stat & st) { return st.type == SourceAccessor::tRegular || (st.type == SourceAccessor::tDirectory && (path / "default.nix").resolveSymlinks().pathExists()); } static constexpr size_t maxAttrs = 1024; static void getAllExprs(EvalState & state, const SourcePath & path, StringSet & seen, BindingsBuilder & attrs) { StringSet namesSorted; for (auto & [name, _] : path.resolveSymlinks().readDirectory()) namesSorted.insert(name); for (auto & i : namesSorted) { /* Ignore the manifest.nix used by profiles. This is necessary to prevent it from showing up in channels (which are implemented using profiles). */ if (i == "manifest.nix") continue; auto path2 = (path / i).resolveSymlinks(); SourceAccessor::Stat st; try { st = path2.lstat(); } catch (Error &) { continue; // ignore dangling symlinks in ~/.nix-defexpr } if (isNixExpr(path2, st) && (st.type != SourceAccessor::tRegular || hasSuffix(path2.baseName(), ".nix"))) { /* Strip off the `.nix' filename suffix (if applicable), otherwise the attribute cannot be selected with the `-A' option. Useful if you want to stick a Nix expression directly in ~/.nix-defexpr. */ std::string attrName = i; if (hasSuffix(attrName, ".nix")) attrName = std::string(attrName, 0, attrName.size() - 4); if (!seen.insert(attrName).second) { std::string suggestionMessage = ""; if (path2.path.abs().find("channels") != std::string::npos && path.path.abs().find("channels") != std::string::npos) suggestionMessage = fmt("\nsuggestion: remove '%s' from either the root channels or the user channels", attrName); printError("warning: name collision in input Nix expressions, skipping '%1%'" "%2%", path2, suggestionMessage); continue; } /* Load the expression on demand. */ auto vArg = state.allocValue(); vArg->mkPath(path2); if (seen.size() == maxAttrs) throw Error("too many Nix expressions in directory '%1%'", path); attrs.alloc(attrName).mkApp(&state.getBuiltin("import"), vArg); } else if (st.type == SourceAccessor::tDirectory) /* `path2' is a directory (with no default.nix in it); recurse into it. */ getAllExprs(state, path2, seen, attrs); } } static void loadSourceExpr(EvalState & state, const SourcePath & path, Value & v) { auto st = path.resolveSymlinks().lstat(); if (isNixExpr(path, st)) state.evalFile(path, v); /* The path is a directory. Put the Nix expressions in the directory in a set, with the file name of each expression as the attribute name. Recurse into subdirectories (but keep the set flat, not nested, to make it easier for a user to have a ~/.nix-defexpr directory that includes some system-wide directory). */ else if (st.type == SourceAccessor::tDirectory) { auto attrs = state.buildBindings(maxAttrs); attrs.insert(state.symbols.create("_combineChannels"), &state.vEmptyList); StringSet seen; getAllExprs(state, path, seen, attrs); v.mkAttrs(attrs); } else throw Error("path '%s' is not a directory or a Nix expression", path); } static void loadDerivations(EvalState & state, const SourcePath & nixExprPath, std::string systemFilter, Bindings & autoArgs, const std::string & pathPrefix, PackageInfos & elems) { Value vRoot; loadSourceExpr(state, nixExprPath, vRoot); Value & v(*findAlongAttrPath(state, pathPrefix, autoArgs, vRoot).first); getDerivations(state, v, pathPrefix, autoArgs, elems, true); /* Filter out all derivations not applicable to the current system. */ for (PackageInfos::iterator i = elems.begin(), j; i != elems.end(); i = j) { j = i; j++; if (systemFilter != "*" && i->querySystem() != systemFilter) elems.erase(i); } } static NixInt getPriority(EvalState & state, PackageInfo & drv) { return drv.queryMetaInt("priority", NixInt(0)); } static std::strong_ordering comparePriorities(EvalState & state, PackageInfo & drv1, PackageInfo & drv2) { return getPriority(state, drv2) <=> getPriority(state, drv1); } // FIXME: this function is rather slow since it checks a single path // at a time. static bool isPrebuilt(EvalState & state, PackageInfo & elem) { auto path = elem.queryOutPath(); if (state.store->isValidPath(path)) return true; return state.store->querySubstitutablePaths({path}).count(path); } static void checkSelectorUse(DrvNames & selectors) { /* Check that all selectors have been used. */ for (auto & i : selectors) if (i.hits == 0 && i.fullName != "*") throw Error("selector '%1%' matches no derivations", i.fullName); } namespace { std::set<std::string> searchByPrefix(const PackageInfos & allElems, std::string_view prefix) { constexpr std::size_t maxResults = 3; std::set<std::string> result; for (const auto & packageInfo : allElems) { const auto drvName = DrvName { packageInfo.queryName() }; if (hasPrefix(drvName.name, prefix)) { result.emplace(drvName.name); if (result.size() >= maxResults) { break; } } } return result; } struct Match { PackageInfo packageInfo; std::size_t index; Match(PackageInfo packageInfo_, std::size_t index_) : packageInfo{std::move(packageInfo_)} , index{index_} {} }; /* If a selector matches multiple derivations with the same name, pick the one matching the current system. If there are still multiple derivations, pick the one with the highest priority. If there are still multiple derivations, pick the one with the highest version. Finally, if there are still multiple derivations, arbitrarily pick the first one. */ std::vector<Match> pickNewestOnly(EvalState & state, std::vector<Match> matches) { /* Map from package names to derivations. */ std::map<std::string, Match> newest; StringSet multiple; for (auto & match : matches) { auto & oneDrv = match.packageInfo; const auto drvName = DrvName { oneDrv.queryName() }; std::strong_ordering comparison = std::strong_ordering::greater; const auto itOther = newest.find(drvName.name); if (itOther != newest.end()) { auto & newestDrv = itOther->second.packageInfo; comparison = oneDrv.querySystem() == newestDrv.querySystem() ? std::strong_ordering::equal : oneDrv.querySystem() == settings.thisSystem ? std::strong_ordering::greater : newestDrv.querySystem() == settings.thisSystem ? std::strong_ordering::less : std::strong_ordering::equal; if (comparison == 0) comparison = comparePriorities(state, oneDrv, newestDrv); if (comparison == 0) comparison = compareVersions(drvName.version, DrvName { newestDrv.queryName() }.version); } if (comparison > 0) { newest.erase(drvName.name); newest.emplace(drvName.name, match); multiple.erase(drvName.fullName); } else if (comparison == 0) { multiple.insert(drvName.fullName); } } matches.clear(); for (auto & [name, match] : newest) { if (multiple.find(name) != multiple.end()) warn( "there are multiple derivations named '%1%'; using the first one", name); matches.push_back(match); } return matches; } } // end namespace static PackageInfos filterBySelector(EvalState & state, const PackageInfos & allElems, const Strings & args, bool newestOnly) { DrvNames selectors = drvNamesFromArgs(args); if (selectors.empty()) selectors.emplace_back("*"); PackageInfos elems; std::set<std::size_t> done; for (auto & selector : selectors) { std::vector<Match> matches; for (const auto & [index, packageInfo] : enumerate(allElems)) { const auto drvName = DrvName { packageInfo.queryName() }; if (selector.matches(drvName)) { ++selector.hits; matches.emplace_back(packageInfo, index); } } if (newestOnly) { matches = pickNewestOnly(state, std::move(matches)); } /* Insert only those elements in the final list that we haven't inserted before. */ for (auto & match : matches) if (done.insert(match.index).second) elems.push_back(match.packageInfo); if (selector.hits == 0 && selector.fullName != "*") { const auto prefixHits = searchByPrefix(allElems, selector.name); if (prefixHits.empty()) { throw Error("selector '%1%' matches no derivations", selector.fullName); } else { std::string suggestionMessage = ", maybe you meant:"; for (const auto & drvName : prefixHits) { suggestionMessage += fmt("\n%s", drvName); } throw Error("selector '%1%' matches no derivations" + suggestionMessage, selector.fullName); } } } return elems; } static bool isPath(std::string_view s) { return s.find('/') != std::string_view::npos; } static void queryInstSources(EvalState & state, InstallSourceInfo & instSource, const Strings & args, PackageInfos & elems, bool newestOnly) { InstallSourceType type = instSource.type; if (type == srcUnknown && args.size() > 0 && isPath(args.front())) type = srcStorePaths; switch (type) { /* Get the available user environment elements from the derivations specified in a Nix expression, including only those with names matching any of the names in `args'. */ case srcUnknown: case srcNixExprDrvs: { /* Load the derivations from the (default or specified) Nix expression. */ PackageInfos allElems; loadDerivations(state, *instSource.nixExprPath, instSource.systemFilter, *instSource.autoArgs, "", allElems); elems = filterBySelector(state, allElems, args, newestOnly); break; } /* Get the available user environment elements from the Nix expressions specified on the command line; these should be functions that take the default Nix expression file as argument, e.g., if the file is `./foo.nix', then the argument `x: x.bar' is equivalent to `(x: x.bar) (import ./foo.nix)' = `(import ./foo.nix).bar'. */ case srcNixExprs: { Value vArg; loadSourceExpr(state, *instSource.nixExprPath, vArg); for (auto & i : args) { Expr * eFun = state.parseExprFromString(i, state.rootPath(".")); Value vFun, vTmp; state.eval(eFun, vFun); vTmp.mkApp(&vFun, &vArg); getDerivations(state, vTmp, "", *instSource.autoArgs, elems, true); } break; } /* The available user environment elements are specified as a list of store paths (which may or may not be derivations). */ case srcStorePaths: { for (auto & i : args) { auto path = state.store->followLinksToStorePath(i); std::string name(path.name()); PackageInfo elem(state, "", nullptr); elem.setName(name); if (path.isDerivation()) { elem.setDrvPath(path); auto outputs = state.store->queryDerivationOutputMap(path); elem.setOutPath(outputs.at("out")); if (name.size() >= drvExtension.size() && std::string(name, name.size() - drvExtension.size()) == drvExtension) name = name.substr(0, name.size() - drvExtension.size()); } else elem.setOutPath(path); elems.push_back(elem); } break; } /* Get the available user environment elements from another user environment. These are then filtered as in the `srcNixExprDrvs' case. */ case srcProfile: { elems = filterBySelector(state, queryInstalled(state, instSource.profile), args, newestOnly); break; } case srcAttrPath: { Value vRoot; loadSourceExpr(state, *instSource.nixExprPath, vRoot); for (auto & i : args) { Value & v(*findAlongAttrPath(state, i, *instSource.autoArgs, vRoot).first); getDerivations(state, v, "", *instSource.autoArgs, elems, true); } break; } } } static void printMissing(EvalState & state, PackageInfos & elems) { std::vector<DerivedPath> targets; for (auto & i : elems) if (auto drvPath = i.queryDrvPath()) targets.emplace_back(DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), .outputs = OutputsSpec::All { }, }); else targets.emplace_back(DerivedPath::Opaque{ .path = i.queryOutPath(), }); printMissing(state.store, targets); } static bool keep(PackageInfo & drv) { return drv.queryMetaBool("keep", false); } static void installDerivations(Globals & globals, const Strings & args, const Path & profile) { debug("installing derivations"); /* Get the set of user environment elements to be installed. */ PackageInfos newElems, newElemsTmp; queryInstSources(*globals.state, globals.instSource, args, newElemsTmp, true); /* If --prebuilt-only is given, filter out source-only packages. */ for (auto & i : newElemsTmp) if (!globals.prebuiltOnly || isPrebuilt(*globals.state, i)) newElems.push_back(i); StringSet newNames; for (auto & i : newElems) { /* `forceName' is a hack to get package names right in some one-click installs, namely those where the name used in the path is not the one we want (e.g., `java-front' versus `java-front-0.9pre15899'). */ if (globals.forceName != "") i.setName(globals.forceName); newNames.insert(DrvName(i.queryName()).name); } while (true) { auto lockToken = optimisticLockProfile(profile); PackageInfos allElems(newElems); /* Add in the already installed derivations, unless they have the same name as a to-be-installed element. */ if (!globals.removeAll) { PackageInfos installedElems = queryInstalled(*globals.state, profile); for (auto & i : installedElems) { DrvName drvName(i.queryName()); if (!globals.preserveInstalled && newNames.find(drvName.name) != newNames.end() && !keep(i)) printInfo("replacing old '%s'", i.queryName()); else allElems.push_back(i); } for (auto & i : newElems) printInfo("installing '%s'", i.queryName()); } printMissing(*globals.state, newElems); if (globals.dryRun) return; if (createUserEnv(*globals.state, allElems, profile, settings.envKeepDerivations, lockToken)) break; } } static void opInstall(Globals & globals, Strings opFlags, Strings opArgs) { for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) { auto arg = *i++; if (parseInstallSourceOptions(globals, i, opFlags, arg)) ; else if (arg == "--preserve-installed" || arg == "-P") globals.preserveInstalled = true; else if (arg == "--remove-all" || arg == "-r") globals.removeAll = true; else throw UsageError("unknown flag '%1%'", arg); } installDerivations(globals, opArgs, globals.profile); } typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType; static void upgradeDerivations(Globals & globals, const Strings & args, UpgradeType upgradeType) { debug("upgrading derivations"); /* Upgrade works as follows: we take all currently installed derivations, and for any derivation matching any selector, look for a derivation in the input Nix expression that has the same name and a higher version number. */ while (true) { auto lockToken = optimisticLockProfile(globals.profile); PackageInfos installedElems = queryInstalled(*globals.state, globals.profile); /* Fetch all derivations from the input file. */ PackageInfos availElems; queryInstSources(*globals.state, globals.instSource, args, availElems, false); /* Go through all installed derivations. */ PackageInfos newElems; for (auto & i : installedElems) { DrvName drvName(i.queryName()); try { if (keep(i)) { newElems.push_back(i); continue; } /* Find the derivation in the input Nix expression with the same name that satisfies the version constraints specified by upgradeType. If there are multiple matches, take the one with the highest priority. If there are still multiple matches, take the one with the highest version. Do not upgrade if it would decrease the priority. */ PackageInfos::iterator bestElem = availElems.end(); std::string bestVersion; for (auto j = availElems.begin(); j != availElems.end(); ++j) { if (comparePriorities(*globals.state, i, *j) > 0) continue; DrvName newName(j->queryName()); if (newName.name == drvName.name) { std::strong_ordering d = compareVersions(drvName.version, newName.version); if ((upgradeType == utLt && d < 0) || (upgradeType == utLeq && d <= 0) || (upgradeType == utEq && d == 0) || upgradeType == utAlways) { std::strong_ordering d2 = std::strong_ordering::less; if (bestElem != availElems.end()) { d2 = comparePriorities(*globals.state, *bestElem, *j); if (d2 == 0) d2 = compareVersions(bestVersion, newName.version); } if (d2 < 0 && (!globals.prebuiltOnly || isPrebuilt(*globals.state, *j))) { bestElem = j; bestVersion = newName.version; } } } } if (bestElem != availElems.end() && i.queryOutPath() != bestElem->queryOutPath()) { const char * action = compareVersions(drvName.version, bestVersion) <= 0 ? "upgrading" : "downgrading"; printInfo("%1% '%2%' to '%3%'", action, i.queryName(), bestElem->queryName()); newElems.push_back(*bestElem); } else newElems.push_back(i); } catch (Error & e) { e.addTrace(nullptr, "while trying to find an upgrade for '%s'", i.queryName()); throw; } } printMissing(*globals.state, newElems); if (globals.dryRun) return; if (createUserEnv(*globals.state, newElems, globals.profile, settings.envKeepDerivations, lockToken)) break; } } static void opUpgrade(Globals & globals, Strings opFlags, Strings opArgs) { UpgradeType upgradeType = utLt; for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) { std::string arg = *i++; if (parseInstallSourceOptions(globals, i, opFlags, arg)) ; else if (arg == "--lt") upgradeType = utLt; else if (arg == "--leq") upgradeType = utLeq; else if (arg == "--eq") upgradeType = utEq; else if (arg == "--always") upgradeType = utAlways; else throw UsageError("unknown flag '%1%'", arg); } upgradeDerivations(globals, opArgs, upgradeType); } static void setMetaFlag(EvalState & state, PackageInfo & drv, const std::string & name, const std::string & value) { auto v = state.allocValue(); v->mkString(value); drv.setMeta(name, v); } static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() < 2) throw UsageError("not enough arguments to '--set-flag'"); Strings::iterator arg = opArgs.begin(); std::string flagName = *arg++; std::string flagValue = *arg++; DrvNames selectors = drvNamesFromArgs(Strings(arg, opArgs.end())); while (true) { std::string lockToken = optimisticLockProfile(globals.profile); PackageInfos installedElems = queryInstalled(*globals.state, globals.profile); /* Update all matching derivations. */ for (auto & i : installedElems) { DrvName drvName(i.queryName()); for (auto & j : selectors) if (j.matches(drvName)) { printInfo("setting flag on '%1%'", i.queryName()); j.hits++; setMetaFlag(*globals.state, i, flagName, flagValue); break; } } checkSelectorUse(selectors); /* Write the new user environment. */ if (createUserEnv(*globals.state, installedElems, globals.profile, settings.envKeepDerivations, lockToken)) break; } } static void opSet(Globals & globals, Strings opFlags, Strings opArgs) { auto store2 = globals.state->store.dynamic_pointer_cast<LocalFSStore>(); if (!store2) throw Error("--set is not supported for this Nix store"); for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) { std::string arg = *i++; if (parseInstallSourceOptions(globals, i, opFlags, arg)) ; else throw UsageError("unknown flag '%1%'", arg); } PackageInfos elems; queryInstSources(*globals.state, globals.instSource, opArgs, elems, true); if (elems.size() != 1) throw Error("--set requires exactly one derivation"); PackageInfo & drv(elems.front()); if (globals.forceName != "") drv.setName(globals.forceName); auto drvPath = drv.queryDrvPath(); std::vector<DerivedPath> paths { drvPath ? (DerivedPath) (DerivedPath::Built { .drvPath = makeConstantStorePathRef(*drvPath), .outputs = OutputsSpec::All { }, }) : (DerivedPath) (DerivedPath::Opaque { .path = drv.queryOutPath(), }), }; printMissing(globals.state->store, paths); if (globals.dryRun) return; globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal); debug("switching to new user environment"); Path generation = createGeneration( *store2, globals.profile, drv.queryOutPath()); switchLink(globals.profile, generation); } static void uninstallDerivations(Globals & globals, Strings & selectors, Path & profile) { while (true) { auto lockToken = optimisticLockProfile(profile); PackageInfos workingElems = queryInstalled(*globals.state, profile); for (auto & selector : selectors) { PackageInfos::iterator split = workingElems.begin(); if (isPath(selector)) { StorePath selectorStorePath = globals.state->store->followLinksToStorePath(selector); split = std::partition( workingElems.begin(), workingElems.end(), [&selectorStorePath, globals](auto &elem) { return selectorStorePath != elem.queryOutPath(); } ); } else { DrvName selectorName(selector); split = std::partition( workingElems.begin(), workingElems.end(), [&selectorName](auto &elem){ DrvName elemName(elem.queryName()); return !selectorName.matches(elemName); } ); } if (split == workingElems.end()) warn("selector '%s' matched no installed derivations", selector); for (auto removedElem = split; removedElem != workingElems.end(); removedElem++) { printInfo("uninstalling '%s'", removedElem->queryName()); } workingElems.erase(split, workingElems.end()); } if (globals.dryRun) return; if (createUserEnv(*globals.state, workingElems, profile, settings.envKeepDerivations, lockToken)) break; } } static void opUninstall(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); uninstallDerivations(globals, opArgs, globals.profile); } static bool cmpChars(char a, char b) { return toupper(a) < toupper(b); } static bool cmpElemByName(const PackageInfo & a, const PackageInfo & b) { auto a_name = a.queryName(); auto b_name = b.queryName(); return lexicographical_compare( a_name.begin(), a_name.end(), b_name.begin(), b_name.end(), cmpChars); } typedef std::list<Strings> Table; void printTable(Table & table) { auto nrColumns = table.size() > 0 ? table.front().size() : 0; std::vector<size_t> widths; widths.resize(nrColumns); for (auto & i : table) { assert(i.size() == nrColumns); Strings::iterator j; size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) if (j->size() > widths[column]) widths[column] = j->size(); } for (auto & i : table) { Strings::iterator j; size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) { std::string s = *j; replace(s.begin(), s.end(), '\n', ' '); cout << s; if (column < nrColumns - 1) cout << std::string(widths[column] - s.size() + 2, ' '); } cout << std::endl; } } /* This function compares the version of an element against the versions in the given set of elements. `cvLess' means that only lower versions are in the set, `cvEqual' means that at most an equal version is in the set, and `cvGreater' means that there is at least one element with a higher version in the set. `cvUnavail' means that there are no elements with the same name in the set. */ typedef enum { cvLess, cvEqual, cvGreater, cvUnavail } VersionDiff; static VersionDiff compareVersionAgainstSet( const PackageInfo & elem, const PackageInfos & elems, std::string & version) { DrvName name(elem.queryName()); VersionDiff diff = cvUnavail; version = "?"; for (auto & i : elems) { DrvName name2(i.queryName()); if (name.name == name2.name) { std::strong_ordering d = compareVersions(name.version, name2.version); if (d < 0) { diff = cvGreater; version = name2.version; } else if (diff != cvGreater && d == 0) { diff = cvEqual; version = name2.version; } else if (diff != cvGreater && diff != cvEqual && d > 0) { diff = cvLess; if (version == "" || compareVersions(version, name2.version) < 0) version = name2.version; } } } return diff; } static void queryJSON(Globals & globals, std::vector<PackageInfo> & elems, bool printOutPath, bool printDrvPath, bool printMeta) { using nlohmann::json; json topObj = json::object(); for (auto & i : elems) { try { if (i.hasFailed()) continue; auto drvName = DrvName(i.queryName()); json &pkgObj = topObj[i.attrPath]; pkgObj = { {"name", drvName.fullName}, {"pname", drvName.name}, {"version", drvName.version}, {"system", i.querySystem()}, {"outputName", i.queryOutputName()}, }; { PackageInfo::Outputs outputs = i.queryOutputs(printOutPath); json &outputObj = pkgObj["outputs"]; outputObj = json::object(); for (auto & j : outputs) { if (j.second) outputObj[j.first] = globals.state->store->printStorePath(*j.second); else outputObj[j.first] = nullptr; } } if (printDrvPath) { auto drvPath = i.queryDrvPath(); if (drvPath) pkgObj["drvPath"] = globals.state->store->printStorePath(*drvPath); } if (printMeta) { json &metaObj = pkgObj["meta"]; metaObj = json::object(); StringSet metaNames = i.queryMetaNames(); for (auto & j : metaNames) { Value * v = i.queryMeta(j); if (!v) { printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j); metaObj[j] = nullptr; } else { NixStringContext context; metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context); } } } } catch (AssertionError & e) { printMsg(lvlTalkative, "skipping derivation named '%1%' which gives an assertion failure", i.queryName()); } catch (Error & e) { e.addTrace(nullptr, "while querying the derivation named '%1%'", i.queryName()); throw; } } std::cout << topObj.dump(2); } static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) { auto & store { *globals.state->store }; Strings remaining; std::string attrPath; bool printStatus = false; bool printName = true; bool printAttrPath = false; bool printSystem = false; bool printDrvPath = false; bool printOutPath = false; bool printDescription = false; bool printMeta = false; bool compareVersions = false; bool xmlOutput = false; bool jsonOutput = false; enum { sInstalled, sAvailable } source = sInstalled; settings.readOnlyMode = true; /* makes evaluation a bit faster */ for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) { auto arg = *i++; if (arg == "--status" || arg == "-s") printStatus = true; else if (arg == "--no-name") printName = false; else if (arg == "--system") printSystem = true; else if (arg == "--description") printDescription = true; else if (arg == "--compare-versions" || arg == "-c") compareVersions = true; else if (arg == "--drv-path") printDrvPath = true; else if (arg == "--out-path") printOutPath = true; else if (arg == "--meta") printMeta = true; else if (arg == "--installed") source = sInstalled; else if (arg == "--available" || arg == "-a") source = sAvailable; else if (arg == "--xml") xmlOutput = true; else if (arg == "--json") jsonOutput = true; else if (arg == "--attr-path" || arg == "-P") printAttrPath = true; else if (arg == "--attr" || arg == "-A") attrPath = needArg(i, opFlags, arg); else throw UsageError("unknown flag '%1%'", arg); } if (printAttrPath && source != sAvailable) throw UsageError("--attr-path(-P) only works with --available"); /* Obtain derivation information from the specified source. */ PackageInfos availElems, installedElems; if (source == sInstalled || compareVersions || printStatus) installedElems = queryInstalled(*globals.state, globals.profile); if (source == sAvailable || compareVersions) loadDerivations(*globals.state, *globals.instSource.nixExprPath, globals.instSource.systemFilter, *globals.instSource.autoArgs, attrPath, availElems); PackageInfos elems_ = filterBySelector(*globals.state, source == sInstalled ? installedElems : availElems, opArgs, false); PackageInfos & otherElems(source == sInstalled ? availElems : installedElems); /* Sort them by name. */ /* !!! */ std::vector<PackageInfo> elems; for (auto & i : elems_) elems.push_back(i); sort(elems.begin(), elems.end(), cmpElemByName); /* We only need to know the installed paths when we are querying the status of the derivation. */ StorePathSet installed; /* installed paths */ if (printStatus) for (auto & i : installedElems) installed.insert(i.queryOutPath()); /* Query which paths have substitutes. */ StorePathSet validPaths; StorePathSet substitutablePaths; if (printStatus || globals.prebuiltOnly) { StorePathSet paths; for (auto & i : elems) try { paths.insert(i.queryOutPath()); } catch (AssertionError & e) { printMsg(lvlTalkative, "skipping derivation named '%s' which gives an assertion failure", i.queryName()); i.setFailed(); } validPaths = store.queryValidPaths(paths); substitutablePaths = store.querySubstitutablePaths(paths); } /* Print the desired columns, or XML output. */ if (jsonOutput) { queryJSON(globals, elems, printOutPath, printDrvPath, printMeta); cout << '\n'; return; } bool tty = isTTY(); RunPager pager; Table table; std::ostringstream dummy; XMLWriter xml(true, *(xmlOutput ? &cout : &dummy)); XMLOpenElement xmlRoot(xml, "items"); for (auto & i : elems) { try { if (i.hasFailed()) continue; //Activity act(*logger, lvlDebug, "outputting query result '%1%'", i.attrPath); if (globals.prebuiltOnly && !validPaths.count(i.queryOutPath()) && !substitutablePaths.count(i.queryOutPath())) continue; /* For table output. */ Strings columns; /* For XML output. */ XMLAttrs attrs; if (printStatus) { auto outPath = i.queryOutPath(); bool hasSubs = substitutablePaths.count(outPath); bool isInstalled = installed.count(outPath); bool isValid = validPaths.count(outPath); if (xmlOutput) { attrs["installed"] = isInstalled ? "1" : "0"; attrs["valid"] = isValid ? "1" : "0"; attrs["substitutable"] = hasSubs ? "1" : "0"; } else columns.push_back( (std::string) (isInstalled ? "I" : "-") + (isValid ? "P" : "-") + (hasSubs ? "S" : "-")); } if (xmlOutput) attrs["attrPath"] = i.attrPath; else if (printAttrPath) columns.push_back(i.attrPath); if (xmlOutput) { auto drvName = DrvName(i.queryName()); attrs["name"] = drvName.fullName; attrs["pname"] = drvName.name; attrs["version"] = drvName.version; } else if (printName) { columns.push_back(i.queryName()); } if (compareVersions) { /* Compare this element against the versions of the same named packages in either the set of available elements, or the set of installed elements. !!! This is O(N * M), should be O(N * lg M). */ std::string version; VersionDiff diff = compareVersionAgainstSet(i, otherElems, version); char ch; switch (diff) { case cvLess: ch = '>'; break; case cvEqual: ch = '='; break; case cvGreater: ch = '<'; break; case cvUnavail: ch = '-'; break; default: unreachable(); } if (xmlOutput) { if (diff != cvUnavail) { attrs["versionDiff"] = ch; attrs["maxComparedVersion"] = version; } } else { auto column = (std::string) "" + ch + " " + version; if (diff == cvGreater && tty) column = ANSI_RED + column + ANSI_NORMAL; columns.push_back(column); } } if (xmlOutput) { if (i.querySystem() != "") attrs["system"] = i.querySystem(); } else if (printSystem) columns.push_back(i.querySystem()); if (printDrvPath) { auto drvPath = i.queryDrvPath(); if (xmlOutput) { if (drvPath) attrs["drvPath"] = store.printStorePath(*drvPath); } else columns.push_back(drvPath ? store.printStorePath(*drvPath) : "-"); } if (xmlOutput) attrs["outputName"] = i.queryOutputName(); if (printOutPath && !xmlOutput) { PackageInfo::Outputs outputs = i.queryOutputs(); std::string s; for (auto & j : outputs) { if (!s.empty()) s += ';'; if (j.first != "out") { s += j.first; s += "="; } s += store.printStorePath(*j.second); } columns.push_back(s); } if (printDescription) { auto descr = i.queryMetaString("description"); if (xmlOutput) { if (descr != "") attrs["description"] = descr; } else columns.push_back(descr); } if (xmlOutput) { XMLOpenElement item(xml, "item", attrs); PackageInfo::Outputs outputs = i.queryOutputs(printOutPath); for (auto & j : outputs) { XMLAttrs attrs2; attrs2["name"] = j.first; if (j.second) attrs2["path"] = store.printStorePath(*j.second); xml.writeEmptyElement("output", attrs2); } if (printMeta) { StringSet metaNames = i.queryMetaNames(); for (auto & j : metaNames) { XMLAttrs attrs2; attrs2["name"] = j; Value * v = i.queryMeta(j); if (!v) printError( "derivation '%s' has invalid meta attribute '%s'", i.queryName(), j); else { if (v->type() == nString) { attrs2["type"] = "string"; attrs2["value"] = v->c_str(); xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nInt) { attrs2["type"] = "int"; attrs2["value"] = fmt("%1%", v->integer()); xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nFloat) { attrs2["type"] = "float"; attrs2["value"] = fmt("%1%", v->fpoint()); xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nBool) { attrs2["type"] = "bool"; attrs2["value"] = v->boolean() ? "true" : "false"; xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nList) { attrs2["type"] = "strings"; XMLOpenElement m(xml, "meta", attrs2); for (auto elem : v->listItems()) { if (elem->type() != nString) continue; XMLAttrs attrs3; attrs3["value"] = elem->c_str(); xml.writeEmptyElement("string", attrs3); } } else if (v->type() == nAttrs) { attrs2["type"] = "strings"; XMLOpenElement m(xml, "meta", attrs2); for (auto & i : *v->attrs()) { if (i.value->type() != nString) continue; XMLAttrs attrs3; attrs3["type"] = globals.state->symbols[i.name]; attrs3["value"] = i.value->c_str(); xml.writeEmptyElement("string", attrs3); } } } } } } else table.push_back(columns); cout.flush(); } catch (AssertionError & e) { printMsg(lvlTalkative, "skipping derivation named '%1%' which gives an assertion failure", i.queryName()); } catch (Error & e) { e.addTrace(nullptr, "while querying the derivation named '%1%'", i.queryName()); throw; } } if (!xmlOutput) printTable(table); } static void opSwitchProfile(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() != 1) throw UsageError("exactly one argument expected"); Path profile = absPath(opArgs.front()); Path profileLink = settings.useXDGBaseDirectories ? createNixStateDir() + "/profile" : getHome() + "/.nix-profile"; switchLink(profileLink, profile); } static void opSwitchGeneration(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() != 1) throw UsageError("exactly one argument expected"); if (auto dstGen = string2Int<GenerationNumber>(opArgs.front())) switchGeneration(globals.profile, *dstGen, globals.dryRun); else throw UsageError("expected a generation number"); } static void opRollback(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() != 0) throw UsageError("no arguments expected"); switchGeneration(globals.profile, {}, globals.dryRun); } static void opListGenerations(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() != 0) throw UsageError("no arguments expected"); PathLocks lock; lockProfile(lock, globals.profile); auto [gens, curGen] = findGenerations(globals.profile); RunPager pager; for (auto & i : gens) { #ifdef _WIN32 // TODO portable wrapper in libutil tm * tp = localtime(&i.creationTime); if (!tp) throw Error("cannot convert time"); auto & t = *tp; #else tm t; if (!localtime_r(&i.creationTime, &t)) throw Error("cannot convert time"); #endif logger->cout("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||", i.number, t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, i.number == curGen ? "(current)" : ""); } } static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opArgs) { if (opFlags.size() > 0) throw UsageError("unknown flag '%1%'", opFlags.front()); if (opArgs.size() == 1 && opArgs.front() == "old") { deleteOldGenerations(globals.profile, globals.dryRun); } else if (opArgs.size() == 1 && opArgs.front().find('d') != std::string::npos) { auto t = parseOlderThanTimeSpec(opArgs.front()); deleteGenerationsOlderThan(globals.profile, t, globals.dryRun); } else if (opArgs.size() == 1 && opArgs.front().find('+') != std::string::npos) { if (opArgs.front().size() < 2) throw Error("invalid number of generations '%1%'", opArgs.front()); auto str_max = opArgs.front().substr(1); auto max = string2Int<GenerationNumber>(str_max); if (!max) throw Error("invalid number of generations to keep '%1%'", opArgs.front()); deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun); } else { std::set<GenerationNumber> gens; for (auto & i : opArgs) { if (auto n = string2Int<GenerationNumber>(i)) gens.insert(*n); else throw UsageError("invalid generation number '%1%'", i); } deleteGenerations(globals.profile, gens, globals.dryRun); } } static void opVersion(Globals & globals, Strings opFlags, Strings opArgs) { printVersion("nix-env"); } static int main_nix_env(int argc, char * * argv) { { Strings opFlags, opArgs; Operation op = 0; std::string opName; bool showHelp = false; std::string file; Globals globals; globals.instSource.type = srcUnknown; globals.instSource.systemFilter = "*"; Path nixExprPath = getNixDefExpr(); if (!pathExists(nixExprPath)) { try { createDirs(nixExprPath); replaceSymlink( defaultChannelsDir(), nixExprPath + "/channels"); if (!isRootUser()) replaceSymlink( rootChannelsDir(), nixExprPath + "/channels_root"); } catch (Error &) { } } globals.dryRun = false; globals.preserveInstalled = false; globals.removeAll = false; globals.prebuiltOnly = false; struct MyArgs : LegacyArgs, MixEvalArgs { using LegacyArgs::LegacyArgs; }; MyArgs myArgs(std::string(baseNameOf(argv[0])), [&](Strings::iterator & arg, const Strings::iterator & end) { Operation oldOp = op; if (*arg == "--help") showHelp = true; else if (*arg == "--version") op = opVersion; else if (*arg == "--install" || *arg == "-i") { op = opInstall; opName = "-install"; } else if (*arg == "--force-name") // undocumented flag for nix-install-package globals.forceName = getArg(*arg, arg, end); else if (*arg == "--uninstall" || *arg == "-e") { op = opUninstall; opName = "-uninstall"; } else if (*arg == "--upgrade" || *arg == "-u") { op = opUpgrade; opName = "-upgrade"; } else if (*arg == "--set-flag") { op = opSetFlag; opName = arg->substr(1); } else if (*arg == "--set") { op = opSet; opName = arg->substr(1); } else if (*arg == "--query" || *arg == "-q") { op = opQuery; opName = "-query"; } else if (*arg == "--profile" || *arg == "-p") globals.profile = absPath(getArg(*arg, arg, end)); else if (*arg == "--file" || *arg == "-f") file = getArg(*arg, arg, end); else if (*arg == "--switch-profile" || *arg == "-S") { op = opSwitchProfile; opName = "-switch-profile"; } else if (*arg == "--switch-generation" || *arg == "-G") { op = opSwitchGeneration; opName = "-switch-generation"; } else if (*arg == "--rollback") { op = opRollback; opName = arg->substr(1); } else if (*arg == "--list-generations") { op = opListGenerations; opName = arg->substr(1); } else if (*arg == "--delete-generations") { op = opDeleteGenerations; opName = arg->substr(1); } else if (*arg == "--dry-run") { printInfo("(dry run; not doing anything)"); globals.dryRun = true; } else if (*arg == "--system-filter") globals.instSource.systemFilter = getArg(*arg, arg, end); else if (*arg == "--prebuilt-only" || *arg == "-b") globals.prebuiltOnly = true; else if (*arg != "" && arg->at(0) == '-') { opFlags.push_back(*arg); /* FIXME: hacky */ if (*arg == "--from-profile" || (op == opQuery && (*arg == "--attr" || *arg == "-A"))) opFlags.push_back(getArg(*arg, arg, end)); } else opArgs.push_back(*arg); if (oldOp && oldOp != op) throw UsageError("only one operation may be specified"); return true; }); myArgs.parseCmdline(argvToStrings(argc, argv)); if (showHelp) showManPage("nix-env" + opName); if (!op) throw UsageError("no operation specified"); auto store = openStore(); globals.state = std::shared_ptr<EvalState>(new EvalState(myArgs.lookupPath, store, fetchSettings, evalSettings)); globals.state->repair = myArgs.repair; globals.instSource.nixExprPath = std::make_shared<SourcePath>( file != "" ? lookupFileArg(*globals.state, file) : globals.state->rootPath(CanonPath(nixExprPath))); globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state); if (globals.profile == "") globals.profile = getEnv("NIX_PROFILE").value_or(""); if (globals.profile == "") globals.profile = getDefaultProfile(); op(globals, std::move(opFlags), std::move(opArgs)); globals.state->maybePrintStats(); return 0; } } static RegisterLegacyCommand r_nix_env("nix-env", main_nix_env);
54,502
C++
.cc
1,279
31.326818
134
0.553239
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,057
lockfile.cc
NixOS_nix/src/libflake/flake/lockfile.cc
#include <unordered_set> #include "lockfile.hh" #include "store-api.hh" #include <algorithm> #include <iomanip> #include <iterator> #include <nlohmann/json.hpp> #include "strings.hh" namespace nix::flake { static FlakeRef getFlakeRef( const fetchers::Settings & fetchSettings, const nlohmann::json & json, const char * attr, const char * info) { auto i = json.find(attr); if (i != json.end()) { auto attrs = fetchers::jsonToAttrs(*i); // FIXME: remove when we drop support for version 5. if (info) { auto j = json.find(info); if (j != json.end()) { for (auto k : fetchers::jsonToAttrs(*j)) attrs.insert_or_assign(k.first, k.second); } } return FlakeRef::fromAttrs(fetchSettings, attrs); } throw Error("attribute '%s' missing in lock file", attr); } LockedNode::LockedNode( const fetchers::Settings & fetchSettings, const nlohmann::json & json) : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) { if (!lockedRef.input.isLocked()) throw Error("lock file contains unlocked input '%s'", fetchers::attrsToJSON(lockedRef.input.toAttrs())); // For backward compatibility, lock file entries are implicitly final. assert(!lockedRef.input.attrs.contains("__final")); lockedRef.input.attrs.insert_or_assign("__final", Explicit<bool>(true)); } StorePath LockedNode::computeStorePath(Store & store) const { return lockedRef.input.computeStorePath(store); } static std::shared_ptr<Node> doFind(const ref<Node> & root, const InputPath & path, std::vector<InputPath> & visited) { auto pos = root; auto found = std::find(visited.cbegin(), visited.cend(), path); if (found != visited.end()) { std::vector<std::string> cycle; std::transform(found, visited.cend(), std::back_inserter(cycle), printInputPath); cycle.push_back(printInputPath(path)); throw Error("follow cycle detected: [%s]", concatStringsSep(" -> ", cycle)); } visited.push_back(path); for (auto & elem : path) { if (auto i = get(pos->inputs, elem)) { if (auto node = std::get_if<0>(&*i)) pos = *node; else if (auto follows = std::get_if<1>(&*i)) { if (auto p = doFind(root, *follows, visited)) pos = ref(p); else return {}; } } else return {}; } return pos; } std::shared_ptr<Node> LockFile::findInput(const InputPath & path) { std::vector<InputPath> visited; return doFind(root, path, visited); } LockFile::LockFile( const fetchers::Settings & fetchSettings, std::string_view contents, std::string_view path) { auto json = nlohmann::json::parse(contents); auto version = json.value("version", 0); if (version < 5 || version > 7) throw Error("lock file '%s' has unsupported version %d", path, version); std::map<std::string, ref<Node>> nodeMap; std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs; getInputs = [&](Node & node, const nlohmann::json & jsonNode) { if (jsonNode.find("inputs") == jsonNode.end()) return; for (auto & i : jsonNode["inputs"].items()) { if (i.value().is_array()) { // FIXME: remove, obsolete InputPath path; for (auto & j : i.value()) path.push_back(j); node.inputs.insert_or_assign(i.key(), path); } else { std::string inputKey = i.value(); auto k = nodeMap.find(inputKey); if (k == nodeMap.end()) { auto & nodes = json["nodes"]; auto jsonNode2 = nodes.find(inputKey); if (jsonNode2 == nodes.end()) throw Error("lock file references missing node '%s'", inputKey); auto input = make_ref<LockedNode>(fetchSettings, *jsonNode2); k = nodeMap.insert_or_assign(inputKey, input).first; getInputs(*input, *jsonNode2); } if (auto child = k->second.dynamic_pointer_cast<LockedNode>()) node.inputs.insert_or_assign(i.key(), ref(child)); else // FIXME: replace by follows node throw Error("lock file contains cycle to root node"); } } }; std::string rootKey = json["root"]; nodeMap.insert_or_assign(rootKey, root); getInputs(*root, json["nodes"][rootKey]); // FIXME: check that there are no cycles in version >= 7. Cycles // between inputs are only possible using 'follows' indirections. // Once we drop support for version <= 6, we can simplify the code // a bit since we don't need to worry about cycles. } std::pair<nlohmann::json, LockFile::KeyMap> LockFile::toJSON() const { nlohmann::json nodes; KeyMap nodeKeys; std::unordered_set<std::string> keys; std::function<std::string(const std::string & key, ref<const Node> node)> dumpNode; dumpNode = [&](std::string key, ref<const Node> node) -> std::string { auto k = nodeKeys.find(node); if (k != nodeKeys.end()) return k->second; if (!keys.insert(key).second) { for (int n = 2; ; ++n) { auto k = fmt("%s_%d", key, n); if (keys.insert(k).second) { key = k; break; } } } nodeKeys.insert_or_assign(node, key); auto n = nlohmann::json::object(); if (!node->inputs.empty()) { auto inputs = nlohmann::json::object(); for (auto & i : node->inputs) { if (auto child = std::get_if<0>(&i.second)) { inputs[i.first] = dumpNode(i.first, *child); } else if (auto follows = std::get_if<1>(&i.second)) { auto arr = nlohmann::json::array(); for (auto & x : *follows) arr.push_back(x); inputs[i.first] = std::move(arr); } } n["inputs"] = std::move(inputs); } if (auto lockedNode = node.dynamic_pointer_cast<const LockedNode>()) { n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs()); n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs()); /* For backward compatibility, omit the "__final" attribute. We never allow non-final inputs in lock files anyway. */ assert(lockedNode->lockedRef.input.isFinal()); n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; } nodes[key] = std::move(n); return key; }; nlohmann::json json; json["version"] = 7; json["root"] = dumpNode("root", root); json["nodes"] = std::move(nodes); return {json, std::move(nodeKeys)}; } std::pair<std::string, LockFile::KeyMap> LockFile::to_string() const { auto [json, nodeKeys] = toJSON(); return {json.dump(2), std::move(nodeKeys)}; } std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) { stream << lockFile.toJSON().first.dump(2); return stream; } std::optional<FlakeRef> LockFile::isUnlocked() const { std::set<ref<const Node>> nodes; std::function<void(ref<const Node> node)> visit; visit = [&](ref<const Node> node) { if (!nodes.insert(node).second) return; for (auto & i : node->inputs) if (auto child = std::get_if<0>(&i.second)) visit(*child); }; visit(root); for (auto & i : nodes) { if (i == ref<const Node>(root)) continue; auto node = i.dynamic_pointer_cast<const LockedNode>(); if (node && (!node->lockedRef.input.isLocked() || !node->lockedRef.input.isFinal())) return node->lockedRef; } return {}; } bool LockFile::operator ==(const LockFile & other) const { // FIXME: slow return toJSON().first == other.toJSON().first; } InputPath parseInputPath(std::string_view s) { InputPath path; for (auto & elem : tokenizeString<std::vector<std::string>>(s, "/")) { if (!std::regex_match(elem, flakeIdRegex)) throw UsageError("invalid flake input path element '%s'", elem); path.push_back(elem); } return path; } std::map<InputPath, Node::Edge> LockFile::getAllInputs() const { std::set<ref<Node>> done; std::map<InputPath, Node::Edge> res; std::function<void(const InputPath & prefix, ref<Node> node)> recurse; recurse = [&](const InputPath & prefix, ref<Node> node) { if (!done.insert(node).second) return; for (auto &[id, input] : node->inputs) { auto inputPath(prefix); inputPath.push_back(id); res.emplace(inputPath, input); if (auto child = std::get_if<0>(&input)) recurse(inputPath, *child); } }; recurse({}, root); return res; } static std::string describe(const FlakeRef & flakeRef) { auto s = fmt("'%s'", flakeRef.to_string()); if (auto lastModified = flakeRef.input.getLastModified()) s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d")); return s; } std::ostream & operator <<(std::ostream & stream, const Node::Edge & edge) { if (auto node = std::get_if<0>(&edge)) stream << describe((*node)->lockedRef); else if (auto follows = std::get_if<1>(&edge)) stream << fmt("follows '%s'", printInputPath(*follows)); return stream; } static bool equals(const Node::Edge & e1, const Node::Edge & e2) { if (auto n1 = std::get_if<0>(&e1)) if (auto n2 = std::get_if<0>(&e2)) return (*n1)->lockedRef == (*n2)->lockedRef; if (auto f1 = std::get_if<1>(&e1)) if (auto f2 = std::get_if<1>(&e2)) return *f1 == *f2; return false; } std::string LockFile::diff(const LockFile & oldLocks, const LockFile & newLocks) { auto oldFlat = oldLocks.getAllInputs(); auto newFlat = newLocks.getAllInputs(); auto i = oldFlat.begin(); auto j = newFlat.begin(); std::string res; while (i != oldFlat.end() || j != newFlat.end()) { if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) { res += fmt("• " ANSI_GREEN "Added input '%s':" ANSI_NORMAL "\n %s\n", printInputPath(j->first), j->second); ++j; } else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) { res += fmt("• " ANSI_RED "Removed input '%s'" ANSI_NORMAL "\n", printInputPath(i->first)); ++i; } else { if (!equals(i->second, j->second)) { res += fmt("• " ANSI_BOLD "Updated input '%s':" ANSI_NORMAL "\n %s\n → %s\n", printInputPath(i->first), i->second, j->second); } ++i; ++j; } } return res; } void LockFile::check() { auto inputs = getAllInputs(); for (auto & [inputPath, input] : inputs) { if (auto follows = std::get_if<1>(&input)) { if (!follows->empty() && !findInput(*follows)) throw Error("input '%s' follows a non-existent input '%s'", printInputPath(inputPath), printInputPath(*follows)); } } } void check(); std::string printInputPath(const InputPath & path) { return concatStringsSep("/", path); } }
12,031
C++
.cc
317
29.542587
117
0.56431
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,058
config.cc
NixOS_nix/src/libflake/flake/config.cc
#include "users.hh" #include "config-global.hh" #include "flake/settings.hh" #include "flake.hh" #include <nlohmann/json.hpp> namespace nix::flake { // setting name -> setting value -> allow or ignore. typedef std::map<std::string, std::map<std::string, bool>> TrustedList; Path trustedListPath() { return getDataDir() + "/trusted-settings.json"; } static TrustedList readTrustedList() { auto path = trustedListPath(); if (!pathExists(path)) return {}; auto json = nlohmann::json::parse(readFile(path)); return json; } static void writeTrustedList(const TrustedList & trustedList) { auto path = trustedListPath(); createDirs(dirOf(path)); writeFile(path, nlohmann::json(trustedList).dump()); } void ConfigFile::apply(const Settings & flakeSettings) { std::set<std::string> whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry", "commit-lock-file-summary", "commit-lockfile-summary"}; for (auto & [name, value] : settings) { auto baseName = hasPrefix(name, "extra-") ? std::string(name, 6) : name; // FIXME: Move into libutil/config.cc. std::string valueS; if (auto* s = std::get_if<std::string>(&value)) valueS = *s; else if (auto* n = std::get_if<int64_t>(&value)) valueS = fmt("%d", *n); else if (auto* b = std::get_if<Explicit<bool>>(&value)) valueS = b->t ? "true" : "false"; else if (auto ss = std::get_if<std::vector<std::string>>(&value)) valueS = dropEmptyInitThenConcatStringsSep(" ", *ss); // FIXME: evil else assert(false); if (!whitelist.count(baseName) && !flakeSettings.acceptFlakeConfig) { bool trusted = false; auto trustedList = readTrustedList(); auto tlname = get(trustedList, name); if (auto saved = tlname ? get(*tlname, valueS) : nullptr) { trusted = *saved; printInfo("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name, valueS); } else { // FIXME: filter ANSI escapes, newlines, \r, etc. if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') { trusted = true; } if (std::tolower(logger->ask(fmt("do you want to permanently mark this value as %s (y/N)?", trusted ? "trusted": "untrusted" )).value_or('n')) == 'y') { trustedList[name][valueS] = trusted; writeTrustedList(trustedList); } } if (!trusted) { warn("ignoring untrusted flake configuration setting '%s'.\nPass '%s' to trust it", name, "--accept-flake-config"); continue; } } globalConfig.set(name, valueS); } } }
2,990
C++
.cc
68
35.544118
191
0.591268
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,059
url-name.cc
NixOS_nix/src/libflake/flake/url-name.cc
#include "url-name.hh" #include <regex> #include <iostream> namespace nix { static const std::string attributeNamePattern("[a-zA-Z0-9_-]+"); static const std::regex lastAttributeRegex("^((?:" + attributeNamePattern + "\\.)*)(" + attributeNamePattern +")(\\^.*)?$"); static const std::string pathSegmentPattern("[a-zA-Z0-9_-]+"); static const std::regex lastPathSegmentRegex(".*/(" + pathSegmentPattern +")"); static const std::regex secondPathSegmentRegex("(?:" + pathSegmentPattern + ")/(" + pathSegmentPattern +")(?:/.*)?"); static const std::regex gitProviderRegex("github|gitlab|sourcehut"); static const std::regex gitSchemeRegex("git($|\\+.*)"); std::optional<std::string> getNameFromURL(const ParsedURL & url) { std::smatch match; /* If there is a dir= argument, use its value */ if (url.query.count("dir") > 0) return url.query.at("dir"); /* If the fragment isn't a "default" and contains two attribute elements, use the last one */ if (std::regex_match(url.fragment, match, lastAttributeRegex) && match.str(1) != "defaultPackage." && match.str(2) != "default") { return match.str(2); } /* If this is a github/gitlab/sourcehut flake, use the repo name */ if (std::regex_match(url.scheme, gitProviderRegex) && std::regex_match(url.path, match, secondPathSegmentRegex)) return match.str(1); /* If it is a regular git flake, use the directory name */ if (std::regex_match(url.scheme, gitSchemeRegex) && std::regex_match(url.path, match, lastPathSegmentRegex)) return match.str(1); /* If there is no fragment, take the last element of the path */ if (std::regex_match(url.path, match, lastPathSegmentRegex)) return match.str(1); /* If even that didn't work, the URL does not contain enough info to determine a useful name */ return {}; } }
1,866
C++
.cc
36
47.444444
124
0.675824
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,060
flake.cc
NixOS_nix/src/libflake/flake/flake.cc
#include "terminal.hh" #include "flake.hh" #include "eval.hh" #include "eval-settings.hh" #include "lockfile.hh" #include "primops.hh" #include "eval-inline.hh" #include "store-api.hh" #include "fetchers.hh" #include "finally.hh" #include "fetch-settings.hh" #include "flake/settings.hh" #include "value-to-json.hh" #include "local-fs-store.hh" #include <nlohmann/json.hpp> namespace nix { using namespace flake; namespace flake { typedef std::pair<StorePath, FlakeRef> FetchedFlake; typedef std::vector<std::pair<FlakeRef, FetchedFlake>> FlakeCache; static std::optional<FetchedFlake> lookupInFlakeCache( const FlakeCache & flakeCache, const FlakeRef & flakeRef) { // FIXME: inefficient. for (auto & i : flakeCache) { if (flakeRef == i.first) { debug("mapping '%s' to previously seen input '%s' -> '%s", flakeRef, i.first, i.second.second); return i.second; } } return std::nullopt; } static std::tuple<StorePath, FlakeRef, FlakeRef> fetchOrSubstituteTree( EvalState & state, const FlakeRef & originalRef, bool allowLookup, FlakeCache & flakeCache) { auto fetched = lookupInFlakeCache(flakeCache, originalRef); FlakeRef resolvedRef = originalRef; if (!fetched) { if (originalRef.input.isDirect()) { fetched.emplace(originalRef.fetchTree(state.store)); } else { if (allowLookup) { resolvedRef = originalRef.resolve(state.store); auto fetchedResolved = lookupInFlakeCache(flakeCache, originalRef); if (!fetchedResolved) fetchedResolved.emplace(resolvedRef.fetchTree(state.store)); flakeCache.push_back({resolvedRef, *fetchedResolved}); fetched.emplace(*fetchedResolved); } else { throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", originalRef); } } flakeCache.push_back({originalRef, *fetched}); } auto [storePath, lockedRef] = *fetched; debug("got tree '%s' from '%s'", state.store->printStorePath(storePath), lockedRef); state.allowPath(storePath); assert(!originalRef.input.getNarHash() || storePath == originalRef.input.computeStorePath(*state.store)); return {std::move(storePath), resolvedRef, lockedRef}; } static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { if (value.isThunk() && value.isTrivial()) state.forceValue(value, pos); } static void expectType(EvalState & state, ValueType type, Value & value, const PosIdx pos) { forceTrivialValue(state, value, pos); if (value.type() != type) throw Error("expected %s but got %s at %s", showType(type), showType(value.type()), state.positions[pos]); } static std::map<FlakeId, FlakeInput> parseFlakeInputs( EvalState & state, Value * value, const PosIdx pos, const std::optional<Path> & baseDir, InputPath lockRootPath); static FlakeInput parseFlakeInput(EvalState & state, std::string_view inputName, Value * value, const PosIdx pos, const std::optional<Path> & baseDir, InputPath lockRootPath) { expectType(state, nAttrs, *value, pos); FlakeInput input; auto sInputs = state.symbols.create("inputs"); auto sUrl = state.symbols.create("url"); auto sFlake = state.symbols.create("flake"); auto sFollows = state.symbols.create("follows"); fetchers::Attrs attrs; std::optional<std::string> url; for (auto & attr : *value->attrs()) { try { if (attr.name == sUrl) { expectType(state, nString, *attr.value, attr.pos); url = attr.value->string_view(); attrs.emplace("url", *url); } else if (attr.name == sFlake) { expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); } else if (attr.name == sInputs) { input.overrides = parseFlakeInputs(state, attr.value, attr.pos, baseDir, lockRootPath); } else if (attr.name == sFollows) { expectType(state, nString, *attr.value, attr.pos); auto follows(parseInputPath(attr.value->c_str())); follows.insert(follows.begin(), lockRootPath.begin(), lockRootPath.end()); input.follows = follows; } else { // Allow selecting a subset of enum values #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wswitch-enum" switch (attr.value->type()) { case nString: attrs.emplace(state.symbols[attr.name], attr.value->c_str()); break; case nBool: attrs.emplace(state.symbols[attr.name], Explicit<bool> { attr.value->boolean() }); break; case nInt: { auto intValue = attr.value->integer().value; if (intValue < 0) { state.error<EvalError>("negative value given for flake input attribute %1%: %2%", state.symbols[attr.name], intValue).debugThrow(); } attrs.emplace(state.symbols[attr.name], uint64_t(intValue)); break; } default: if (attr.name == state.symbols.create("publicKeys")) { experimentalFeatureSettings.require(Xp::VerifiedFetches); NixStringContext emptyContext = {}; attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, emptyContext).dump()); } else state.error<TypeError>("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", state.symbols[attr.name], showType(*attr.value)).debugThrow(); } #pragma GCC diagnostic pop } } catch (Error & e) { e.addTrace( state.positions[attr.pos], HintFmt("while evaluating flake attribute '%s'", state.symbols[attr.name])); throw; } } if (attrs.count("type")) try { input.ref = FlakeRef::fromAttrs(state.fetchSettings, attrs); } catch (Error & e) { e.addTrace(state.positions[pos], HintFmt("while evaluating flake input")); throw; } else { attrs.erase("url"); if (!attrs.empty()) throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, state.positions[pos]); if (url) input.ref = parseFlakeRef(state.fetchSettings, *url, baseDir, true, input.isFlake); } if (!input.follows && !input.ref) input.ref = FlakeRef::fromAttrs(state.fetchSettings, {{"type", "indirect"}, {"id", std::string(inputName)}}); return input; } static std::map<FlakeId, FlakeInput> parseFlakeInputs( EvalState & state, Value * value, const PosIdx pos, const std::optional<Path> & baseDir, InputPath lockRootPath) { std::map<FlakeId, FlakeInput> inputs; expectType(state, nAttrs, *value, pos); for (auto & inputAttr : *value->attrs()) { inputs.emplace(state.symbols[inputAttr.name], parseFlakeInput(state, state.symbols[inputAttr.name], inputAttr.value, inputAttr.pos, baseDir, lockRootPath)); } return inputs; } static Flake readFlake( EvalState & state, const FlakeRef & originalRef, const FlakeRef & resolvedRef, const FlakeRef & lockedRef, const SourcePath & rootDir, const InputPath & lockRootPath) { auto flakePath = rootDir / CanonPath(resolvedRef.subdir) / "flake.nix"; // NOTE evalFile forces vInfo to be an attrset because mustBeTrivial is true. Value vInfo; state.evalFile(flakePath, vInfo, true); Flake flake { .originalRef = originalRef, .resolvedRef = resolvedRef, .lockedRef = lockedRef, .path = flakePath, }; if (auto description = vInfo.attrs()->get(state.sDescription)) { expectType(state, nString, *description->value, description->pos); flake.description = description->value->c_str(); } auto sInputs = state.symbols.create("inputs"); if (auto inputs = vInfo.attrs()->get(sInputs)) flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, flakePath.parent().path.abs(), lockRootPath); // FIXME auto sOutputs = state.symbols.create("outputs"); if (auto outputs = vInfo.attrs()->get(sOutputs)) { expectType(state, nFunction, *outputs->value, outputs->pos); if (outputs->value->isLambda() && outputs->value->payload.lambda.fun->hasFormals()) { for (auto & formal : outputs->value->payload.lambda.fun->formals->formals) { if (formal.name != state.sSelf) flake.inputs.emplace(state.symbols[formal.name], FlakeInput { .ref = parseFlakeRef(state.fetchSettings, std::string(state.symbols[formal.name])) }); } } } else throw Error("flake '%s' lacks attribute 'outputs'", resolvedRef); auto sNixConfig = state.symbols.create("nixConfig"); if (auto nixConfig = vInfo.attrs()->get(sNixConfig)) { expectType(state, nAttrs, *nixConfig->value, nixConfig->pos); for (auto & setting : *nixConfig->value->attrs()) { forceTrivialValue(state, *setting.value, setting.pos); if (setting.value->type() == nString) flake.config.settings.emplace( state.symbols[setting.name], std::string(state.forceStringNoCtx(*setting.value, setting.pos, ""))); else if (setting.value->type() == nPath) { NixStringContext emptyContext = {}; flake.config.settings.emplace( state.symbols[setting.name], state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true).toOwned()); } else if (setting.value->type() == nInt) flake.config.settings.emplace( state.symbols[setting.name], state.forceInt(*setting.value, setting.pos, "").value); else if (setting.value->type() == nBool) flake.config.settings.emplace( state.symbols[setting.name], Explicit<bool> { state.forceBool(*setting.value, setting.pos, "") }); else if (setting.value->type() == nList) { std::vector<std::string> ss; for (auto elem : setting.value->listItems()) { if (elem->type() != nString) state.error<TypeError>("list element in flake configuration setting '%s' is %s while a string is expected", state.symbols[setting.name], showType(*setting.value)).debugThrow(); ss.emplace_back(state.forceStringNoCtx(*elem, setting.pos, "")); } flake.config.settings.emplace(state.symbols[setting.name], ss); } else state.error<TypeError>("flake configuration setting '%s' is %s", state.symbols[setting.name], showType(*setting.value)).debugThrow(); } } for (auto & attr : *vInfo.attrs()) { if (attr.name != state.sDescription && attr.name != sInputs && attr.name != sOutputs && attr.name != sNixConfig) throw Error("flake '%s' has an unsupported attribute '%s', at %s", resolvedRef, state.symbols[attr.name], state.positions[attr.pos]); } return flake; } static Flake getFlake( EvalState & state, const FlakeRef & originalRef, bool allowLookup, FlakeCache & flakeCache, InputPath lockRootPath) { auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( state, originalRef, allowLookup, flakeCache); return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup, FlakeCache & flakeCache) { return getFlake(state, originalRef, allowLookup, flakeCache, {}); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup) { FlakeCache flakeCache; return getFlake(state, originalRef, allowLookup, flakeCache); } static LockFile readLockFile( const fetchers::Settings & fetchSettings, const SourcePath & lockFilePath) { return lockFilePath.pathExists() ? LockFile(fetchSettings, lockFilePath.readFile(), fmt("%s", lockFilePath)) : LockFile(); } /* Compute an in-memory lock file for the specified top-level flake, and optionally write it to file, if the flake is writable. */ LockedFlake lockFlake( const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) { experimentalFeatureSettings.require(Xp::Flakes); FlakeCache flakeCache; auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto flake = getFlake(state, topRef, useRegistries, flakeCache); if (lockFlags.applyNixConfig) { flake.config.apply(settings); state.store->setOptions(); } try { if (!state.fetchSettings.allowDirty && lockFlags.referenceLockFilePath) { throw Error("reference lock file was provided, but the `allow-dirty` setting is set to false"); } auto oldLockFile = readLockFile( state.fetchSettings, lockFlags.referenceLockFilePath.value_or( flake.lockFilePath())); debug("old lock file: %s", oldLockFile); std::map<InputPath, FlakeInput> overrides; std::set<InputPath> explicitCliOverrides; std::set<InputPath> overridesUsed, updatesUsed; std::map<ref<Node>, SourcePath> nodePaths; for (auto & i : lockFlags.inputOverrides) { overrides.insert_or_assign(i.first, FlakeInput { .ref = i.second }); explicitCliOverrides.insert(i.first); } LockFile newLockFile; std::vector<FlakeRef> parents; std::function<void( const FlakeInputs & flakeInputs, ref<Node> node, const InputPath & inputPathPrefix, std::shared_ptr<const Node> oldNode, const InputPath & lockRootPath, const Path & parentPath, bool trustLock)> computeLocks; computeLocks = [&]( /* The inputs of this node, either from flake.nix or flake.lock. */ const FlakeInputs & flakeInputs, /* The node whose locks are to be updated.*/ ref<Node> node, /* The path to this node in the lock file graph. */ const InputPath & inputPathPrefix, /* The old node, if any, from which locks can be copied. */ std::shared_ptr<const Node> oldNode, const InputPath & lockRootPath, const Path & parentPath, bool trustLock) { debug("computing lock file node '%s'", printInputPath(inputPathPrefix)); /* Get the overrides (i.e. attributes of the form 'inputs.nixops.inputs.nixpkgs.url = ...'). */ for (auto & [id, input] : flakeInputs) { for (auto & [idOverride, inputOverride] : input.overrides) { auto inputPath(inputPathPrefix); inputPath.push_back(id); inputPath.push_back(idOverride); overrides.insert_or_assign(inputPath, inputOverride); } } /* Check whether this input has overrides for a non-existent input. */ for (auto [inputPath, inputOverride] : overrides) { auto inputPath2(inputPath); auto follow = inputPath2.back(); inputPath2.pop_back(); if (inputPath2 == inputPathPrefix && !flakeInputs.count(follow)) warn( "input '%s' has an override for a non-existent input '%s'", printInputPath(inputPathPrefix), follow); } /* Go over the flake inputs, resolve/fetch them if necessary (i.e. if they're new or the flakeref changed from what's in the lock file). */ for (auto & [id, input2] : flakeInputs) { auto inputPath(inputPathPrefix); inputPath.push_back(id); auto inputPathS = printInputPath(inputPath); debug("computing input '%s'", inputPathS); try { /* Do we have an override for this input from one of the ancestors? */ auto i = overrides.find(inputPath); bool hasOverride = i != overrides.end(); bool hasCliOverride = explicitCliOverrides.contains(inputPath); if (hasOverride) { overridesUsed.insert(inputPath); // Respect the “flakeness” of the input even if we // override it i->second.isFlake = input2.isFlake; } auto & input = hasOverride ? i->second : input2; /* Resolve 'follows' later (since it may refer to an input path we haven't processed yet. */ if (input.follows) { InputPath target; target.insert(target.end(), input.follows->begin(), input.follows->end()); debug("input '%s' follows '%s'", inputPathS, printInputPath(target)); node->inputs.insert_or_assign(id, target); continue; } assert(input.ref); /* Do we have an entry in the existing lock file? And the input is not in updateInputs? */ std::shared_ptr<LockedNode> oldLock; updatesUsed.insert(inputPath); if (oldNode && !lockFlags.inputUpdates.count(inputPath)) if (auto oldLock2 = get(oldNode->inputs, id)) if (auto oldLock3 = std::get_if<0>(&*oldLock2)) oldLock = *oldLock3; if (oldLock && oldLock->originalRef == *input.ref && !hasCliOverride) { debug("keeping existing input '%s'", inputPathS); /* Copy the input from the old lock since its flakeref didn't change and there is no override from a higher level flake. */ auto childNode = make_ref<LockedNode>( oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake); node->inputs.insert_or_assign(id, childNode); /* If we have this input in updateInputs, then we must fetch the flake to update it. */ auto lb = lockFlags.inputUpdates.lower_bound(inputPath); auto mustRefetch = lb != lockFlags.inputUpdates.end() && lb->size() > inputPath.size() && std::equal(inputPath.begin(), inputPath.end(), lb->begin()); FlakeInputs fakeInputs; if (!mustRefetch) { /* No need to fetch this flake, we can be lazy. However there may be new overrides on the inputs of this flake, so we need to check those. */ for (auto & i : oldLock->inputs) { if (auto lockedNode = std::get_if<0>(&i.second)) { fakeInputs.emplace(i.first, FlakeInput { .ref = (*lockedNode)->originalRef, .isFlake = (*lockedNode)->isFlake, }); } else if (auto follows = std::get_if<1>(&i.second)) { if (!trustLock) { // It is possible that the flake has changed, // so we must confirm all the follows that are in the lock file are also in the flake. auto overridePath(inputPath); overridePath.push_back(i.first); auto o = overrides.find(overridePath); // If the override disappeared, we have to refetch the flake, // since some of the inputs may not be present in the lock file. if (o == overrides.end()) { mustRefetch = true; // There's no point populating the rest of the fake inputs, // since we'll refetch the flake anyways. break; } } auto absoluteFollows(lockRootPath); absoluteFollows.insert(absoluteFollows.end(), follows->begin(), follows->end()); fakeInputs.emplace(i.first, FlakeInput { .follows = absoluteFollows, }); } } } if (mustRefetch) { auto inputFlake = getFlake(state, oldLock->lockedRef, false, flakeCache, inputPath); nodePaths.emplace(childNode, inputFlake.path.parent()); computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, lockRootPath, parentPath, false); } else { computeLocks(fakeInputs, childNode, inputPath, oldLock, lockRootPath, parentPath, true); } } else { /* We need to create a new lock file entry. So fetch this input. */ debug("creating new input '%s'", inputPathS); if (!lockFlags.allowUnlocked && !input.ref->input.isLocked()) throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS); /* Note: in case of an --override-input, we use the *original* ref (input2.ref) for the "original" field, rather than the override. This ensures that the override isn't nuked the next time we update the lock file. That is, overrides are sticky unless you use --no-write-lock-file. */ auto ref = (input2.ref && explicitCliOverrides.contains(inputPath)) ? *input2.ref : *input.ref; if (input.isFlake) { Path localPath = parentPath; FlakeRef localRef = *input.ref; // If this input is a path, recurse it down. // This allows us to resolve path inputs relative to the current flake. if (localRef.input.getType() == "path") localPath = absPath(*input.ref->input.getSourcePath(), parentPath); auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath); auto childNode = make_ref<LockedNode>(inputFlake.lockedRef, ref); node->inputs.insert_or_assign(id, childNode); /* Guard against circular flake imports. */ for (auto & parent : parents) if (parent == *input.ref) throw Error("found circular import of flake '%s'", parent); parents.push_back(*input.ref); Finally cleanup([&]() { parents.pop_back(); }); /* Recursively process the inputs of this flake. Also, unless we already have this flake in the top-level lock file, use this flake's own lock file. */ nodePaths.emplace(childNode, inputFlake.path.parent()); computeLocks( inputFlake.inputs, childNode, inputPath, oldLock ? std::dynamic_pointer_cast<const Node>(oldLock) : readLockFile(state.fetchSettings, inputFlake.lockFilePath()).root.get_ptr(), oldLock ? lockRootPath : inputPath, localPath, false); } else { auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( state, *input.ref, useRegistries, flakeCache); auto childNode = make_ref<LockedNode>(lockedRef, ref, false); nodePaths.emplace(childNode, state.rootPath(state.store->toRealPath(storePath))); node->inputs.insert_or_assign(id, childNode); } } } catch (Error & e) { e.addTrace({}, "while updating the flake input '%s'", inputPathS); throw; } } }; // Bring in the current ref for relative path resolution if we have it auto parentPath = flake.path.parent().path.abs(); nodePaths.emplace(newLockFile.root, flake.path.parent()); computeLocks( flake.inputs, newLockFile.root, {}, lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(), {}, parentPath, false); for (auto & i : lockFlags.inputOverrides) if (!overridesUsed.count(i.first)) warn("the flag '--override-input %s %s' does not match any input", printInputPath(i.first), i.second); for (auto & i : lockFlags.inputUpdates) if (!updatesUsed.count(i)) warn("'%s' does not match any input of this flake", printInputPath(i)); /* Check 'follows' inputs. */ newLockFile.check(); debug("new lock file: %s", newLockFile); auto sourcePath = topRef.input.getSourcePath(); /* Check whether we need to / can write the new lock file. */ if (newLockFile != oldLockFile || lockFlags.outputLockFilePath) { auto diff = LockFile::diff(oldLockFile, newLockFile); if (lockFlags.writeLockFile) { if (sourcePath || lockFlags.outputLockFilePath) { if (auto unlockedInput = newLockFile.isUnlocked()) { if (state.fetchSettings.warnDirty) warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput); } else { if (!lockFlags.updateLockFile) throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef); auto newLockFileS = fmt("%s\n", newLockFile); if (lockFlags.outputLockFilePath) { if (lockFlags.commitLockFile) throw Error("'--commit-lock-file' and '--output-lock-file' are incompatible"); writeFile(*lockFlags.outputLockFilePath, newLockFileS); } else { auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"; auto outputLockFilePath = *sourcePath + "/" + relPath; bool lockFileExists = pathExists(outputLockFilePath); auto s = chomp(diff); if (lockFileExists) { if (s.empty()) warn("updating lock file '%s'", outputLockFilePath); else warn("updating lock file '%s':\n%s", outputLockFilePath, s); } else warn("creating lock file '%s': \n%s", outputLockFilePath, s); std::optional<std::string> commitMessage = std::nullopt; if (lockFlags.commitLockFile) { std::string cm; cm = settings.commitLockFileSummary.get(); if (cm == "") { cm = fmt("%s: %s", relPath, lockFileExists ? "Update" : "Add"); } cm += "\n\nFlake lock file updates:\n\n"; cm += filterANSIEscapes(diff, true); commitMessage = cm; } topRef.input.putFile( CanonPath((topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"), newLockFileS, commitMessage); } /* Rewriting the lockfile changed the top-level repo, so we should re-read it. FIXME: we could also just clear the 'rev' field... */ auto prevLockedRef = flake.lockedRef; FlakeCache dummyCache; flake = getFlake(state, topRef, useRegistries, dummyCache); if (lockFlags.commitLockFile && flake.lockedRef.input.getRev() && prevLockedRef.input.getRev() != flake.lockedRef.input.getRev()) warn("committed new revision '%s'", flake.lockedRef.input.getRev()->gitRev()); } } else throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef); } else { warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff)); flake.forceDirty = true; } } return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile), .nodePaths = std::move(nodePaths) }; } catch (Error & e) { e.addTrace({}, "while updating the lock file of flake '%s'", flake.lockedRef.to_string()); throw; } } std::pair<StorePath, Path> sourcePathToStorePath( ref<Store> store, const SourcePath & _path) { auto path = _path.path.abs(); if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>()) { auto realStoreDir = store2->getRealStoreDir(); if (isInDir(path, realStoreDir)) path = store2->storeDir + path.substr(realStoreDir.size()); } return store->toStorePath(path); } void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) { experimentalFeatureSettings.require(Xp::Flakes); auto [lockFileStr, keyMap] = lockedFlake.lockFile.to_string(); auto overrides = state.buildBindings(lockedFlake.nodePaths.size()); for (auto & [node, sourcePath] : lockedFlake.nodePaths) { auto override = state.buildBindings(2); auto & vSourceInfo = override.alloc(state.symbols.create("sourceInfo")); auto lockedNode = node.dynamic_pointer_cast<const LockedNode>(); auto [storePath, subdir] = sourcePathToStorePath(state.store, sourcePath); emitTreeAttrs( state, storePath, lockedNode ? lockedNode->lockedRef.input : lockedFlake.flake.lockedRef.input, vSourceInfo, false, !lockedNode && lockedFlake.flake.forceDirty); auto key = keyMap.find(node); assert(key != keyMap.end()); override .alloc(state.symbols.create("dir")) .mkString(CanonPath(subdir).rel()); overrides.alloc(state.symbols.create(key->second)).mkAttrs(override); } auto & vOverrides = state.allocValue()->mkAttrs(overrides); auto vCallFlake = state.allocValue(); state.evalFile(state.callFlakeInternal, *vCallFlake); auto vLocks = state.allocValue(); vLocks->mkString(lockFileStr); auto vFetchFinalTree = get(state.internalPrimOps, "fetchFinalTree"); assert(vFetchFinalTree); Value * args[] = {vLocks, &vOverrides, *vFetchFinalTree}; state.callFunction(*vCallFlake, 3, args, vRes, noPos); } void initLib(const Settings & settings) { auto prim_getFlake = [&settings](EvalState & state, const PosIdx pos, Value * * args, Value & v) { std::string flakeRefS(state.forceStringNoCtx(*args[0], pos, "while evaluating the argument passed to builtins.getFlake")); auto flakeRef = parseFlakeRef(state.fetchSettings, flakeRefS, {}, true); if (state.settings.pureEval && !flakeRef.input.isLocked()) throw Error("cannot call 'getFlake' on unlocked flake reference '%s', at %s (use --impure to override)", flakeRefS, state.positions[pos]); callFlake(state, lockFlake(settings, state, flakeRef, LockFlags { .updateLockFile = false, .writeLockFile = false, .useRegistries = !state.settings.pureEval && settings.useRegistries, .allowUnlocked = !state.settings.pureEval, }), v); }; RegisterPrimOp::primOps->push_back({ .name = "__getFlake", .args = {"args"}, .doc = R"( Fetch a flake from a flake reference, and return its output attributes and some metadata. For example: ```nix (builtins.getFlake "nix/55bc52401966fbffa525c574c14f67b00bc4fb3a").packages.x86_64-linux.nix ``` Unless impure evaluation is allowed (`--impure`), the flake reference must be "locked", e.g. contain a Git revision or content hash. An example of an unlocked usage is: ```nix (builtins.getFlake "github:edolstra/dwarffs").rev ``` )", .fun = prim_getFlake, .experimentalFeature = Xp::Flakes, }); } static void prim_parseFlakeRef( EvalState & state, const PosIdx pos, Value * * args, Value & v) { std::string flakeRefS(state.forceStringNoCtx(*args[0], pos, "while evaluating the argument passed to builtins.parseFlakeRef")); auto attrs = parseFlakeRef(state.fetchSettings, flakeRefS, {}, true).toAttrs(); auto binds = state.buildBindings(attrs.size()); for (const auto & [key, value] : attrs) { auto s = state.symbols.create(key); auto & vv = binds.alloc(s); std::visit(overloaded { [&vv](const std::string & value) { vv.mkString(value); }, [&vv](const uint64_t & value) { vv.mkInt(value); }, [&vv](const Explicit<bool> & value) { vv.mkBool(value.t); } }, value); } v.mkAttrs(binds); } static RegisterPrimOp r3({ .name = "__parseFlakeRef", .args = {"flake-ref"}, .doc = R"( Parse a flake reference, and return its exploded form. For example: ```nix builtins.parseFlakeRef "github:NixOS/nixpkgs/23.05?dir=lib" ``` evaluates to: ```nix { dir = "lib"; owner = "NixOS"; ref = "23.05"; repo = "nixpkgs"; type = "github"; } ``` )", .fun = prim_parseFlakeRef, .experimentalFeature = Xp::Flakes, }); static void prim_flakeRefToString( EvalState & state, const PosIdx pos, Value * * args, Value & v) { state.forceAttrs(*args[0], noPos, "while evaluating the argument passed to builtins.flakeRefToString"); fetchers::Attrs attrs; for (const auto & attr : *args[0]->attrs()) { auto t = attr.value->type(); if (t == nInt) { auto intValue = attr.value->integer().value; if (intValue < 0) { state.error<EvalError>("negative value given for flake ref attr %1%: %2%", state.symbols[attr.name], intValue).atPos(pos).debugThrow(); } attrs.emplace(state.symbols[attr.name], uint64_t(intValue)); } else if (t == nBool) { attrs.emplace(state.symbols[attr.name], Explicit<bool> { attr.value->boolean() }); } else if (t == nString) { attrs.emplace(state.symbols[attr.name], std::string(attr.value->string_view())); } else { state.error<EvalError>( "flake reference attribute sets may only contain integers, Booleans, " "and strings, but attribute '%s' is %s", state.symbols[attr.name], showType(*attr.value)).debugThrow(); } } auto flakeRef = FlakeRef::fromAttrs(state.fetchSettings, attrs); v.mkString(flakeRef.to_string()); } static RegisterPrimOp r4({ .name = "__flakeRefToString", .args = {"attrs"}, .doc = R"( Convert a flake reference from attribute set format to URL format. For example: ```nix builtins.flakeRefToString { dir = "lib"; owner = "NixOS"; ref = "23.05"; repo = "nixpkgs"; type = "github"; } ``` evaluates to ```nix "github:NixOS/nixpkgs/23.05?dir=lib" ``` )", .fun = prim_flakeRefToString, .experimentalFeature = Xp::Flakes, }); } std::optional<Fingerprint> LockedFlake::getFingerprint(ref<Store> store) const { if (lockFile.isUnlocked()) return std::nullopt; auto fingerprint = flake.lockedRef.input.getFingerprint(store); if (!fingerprint) return std::nullopt; *fingerprint += fmt(";%s;%s", flake.lockedRef.subdir, lockFile); /* Include revCount and lastModified because they're not necessarily implied by the content fingerprint (e.g. for tarball flakes) but can influence the evaluation result. */ if (auto revCount = flake.lockedRef.input.getRevCount()) *fingerprint += fmt(";revCount=%d", *revCount); if (auto lastModified = flake.lockedRef.input.getLastModified()) *fingerprint += fmt(";lastModified=%d", *lastModified); // FIXME: as an optimization, if the flake contains a lock file // and we haven't changed it, then it's sufficient to use // flake.sourceInfo.storePath for the fingerprint. return hashString(HashAlgorithm::SHA256, *fingerprint); } Flake::~Flake() { } }
40,462
C++
.cc
825
34.586667
159
0.547592
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,061
flakeref.cc
NixOS_nix/src/libflake/flake/flakeref.cc
#include "flakeref.hh" #include "store-api.hh" #include "url.hh" #include "url-parts.hh" #include "fetchers.hh" #include "registry.hh" namespace nix { #if 0 // 'dir' path elements cannot start with a '.'. We also reject // potentially dangerous characters like ';'. const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; #endif std::string FlakeRef::to_string() const { std::map<std::string, std::string> extraQuery; if (subdir != "") extraQuery.insert_or_assign("dir", subdir); return input.toURLString(extraQuery); } fetchers::Attrs FlakeRef::toAttrs() const { auto attrs = input.toAttrs(); if (subdir != "") attrs.emplace("dir", subdir); return attrs; } std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef) { str << flakeRef.to_string(); return str; } FlakeRef FlakeRef::resolve(ref<Store> store) const { auto [input2, extraAttrs] = lookupInRegistries(store, input); return FlakeRef(std::move(input2), fetchers::maybeGetStrAttr(extraAttrs, "dir").value_or(subdir)); } FlakeRef parseFlakeRef( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir, bool allowMissing, bool isFlake) { auto [flakeRef, fragment] = parseFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake); if (fragment != "") throw Error("unexpected fragment '%s' in flake reference '%s'", fragment, url); return flakeRef; } std::optional<FlakeRef> maybeParseFlakeRef( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir) { try { return parseFlakeRef(fetchSettings, url, baseDir); } catch (Error &) { return {}; } } std::pair<FlakeRef, std::string> parsePathFlakeRefWithFragment( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir, bool allowMissing, bool isFlake) { std::string path = url; std::string fragment = ""; std::map<std::string, std::string> query; auto pathEnd = url.find_first_of("#?"); auto fragmentStart = pathEnd; if (pathEnd != std::string::npos && url[pathEnd] == '?') { fragmentStart = url.find("#"); } if (pathEnd != std::string::npos) { path = url.substr(0, pathEnd); } if (fragmentStart != std::string::npos) { fragment = percentDecode(url.substr(fragmentStart+1)); } if (pathEnd != std::string::npos && fragmentStart != std::string::npos && url[pathEnd] == '?') { query = decodeQuery(url.substr(pathEnd+1, fragmentStart-pathEnd-1)); } if (baseDir) { /* Check if 'url' is a path (either absolute or relative to 'baseDir'). If so, search upward to the root of the repo (i.e. the directory containing .git). */ path = absPath(path, baseDir); if (isFlake) { if (!S_ISDIR(lstat(path).st_mode)) { if (baseNameOf(path) == "flake.nix") { // Be gentle with people who accidentally write `/foo/bar/flake.nix` instead of `/foo/bar` warn( "Path '%s' should point at the directory containing the 'flake.nix' file, not the file itself. " "Pretending that you meant '%s'" , path, dirOf(path)); path = dirOf(path); } else { throw BadURL("path '%s' is not a flake (because it's not a directory)", path); } } if (!allowMissing && !pathExists(path + "/flake.nix")){ notice("path '%s' does not contain a 'flake.nix', searching up",path); // Save device to detect filesystem boundary dev_t device = lstat(path).st_dev; bool found = false; while (path != "/") { if (pathExists(path + "/flake.nix")) { found = true; break; } else if (pathExists(path + "/.git")) throw Error("path '%s' is not part of a flake (neither it nor its parent directories contain a 'flake.nix' file)", path); else { if (lstat(path).st_dev != device) throw Error("unable to find a flake before encountering filesystem boundary at '%s'", path); } path = dirOf(path); } if (!found) throw BadURL("could not find a flake.nix file"); } if (!allowMissing && !pathExists(path + "/flake.nix")) throw BadURL("path '%s' is not a flake (because it doesn't contain a 'flake.nix' file)", path); auto flakeRoot = path; std::string subdir; while (flakeRoot != "/") { if (pathExists(flakeRoot + "/.git")) { auto base = std::string("git+file://") + flakeRoot; auto parsedURL = ParsedURL{ .url = base, // FIXME .base = base, .scheme = "git+file", .authority = "", .path = flakeRoot, .query = query, }; if (subdir != "") { if (parsedURL.query.count("dir")) throw Error("flake URL '%s' has an inconsistent 'dir' parameter", url); parsedURL.query.insert_or_assign("dir", subdir); } if (pathExists(flakeRoot + "/.git/shallow")) parsedURL.query.insert_or_assign("shallow", "1"); return std::make_pair( FlakeRef(fetchers::Input::fromURL(fetchSettings, parsedURL), getOr(parsedURL.query, "dir", "")), fragment); } subdir = std::string(baseNameOf(flakeRoot)) + (subdir.empty() ? "" : "/" + subdir); flakeRoot = dirOf(flakeRoot); } } } else { if (!hasPrefix(path, "/")) throw BadURL("flake reference '%s' is not an absolute path", url); path = canonPath(path + "/" + getOr(query, "dir", "")); } fetchers::Attrs attrs; attrs.insert_or_assign("type", "path"); attrs.insert_or_assign("path", path); return std::make_pair(FlakeRef(fetchers::Input::fromAttrs(fetchSettings, std::move(attrs)), ""), fragment); }; /* Check if 'url' is a flake ID. This is an abbreviated syntax for 'flake:<flake-id>?ref=<ref>&rev=<rev>'. */ static std::optional<std::pair<FlakeRef, std::string>> parseFlakeIdRef( const fetchers::Settings & fetchSettings, const std::string & url, bool isFlake ) { std::smatch match; static std::regex flakeRegex( "((" + flakeIdRegexS + ")(?:/(?:" + refAndOrRevRegex + "))?)" + "(?:#(" + fragmentRegex + "))?", std::regex::ECMAScript); if (std::regex_match(url, match, flakeRegex)) { auto parsedURL = ParsedURL{ .url = url, .base = "flake:" + match.str(1), .scheme = "flake", .authority = "", .path = match[1], }; return std::make_pair( FlakeRef(fetchers::Input::fromURL(fetchSettings, parsedURL, isFlake), ""), percentDecode(match.str(6))); } return {}; } std::optional<std::pair<FlakeRef, std::string>> parseURLFlakeRef( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir, bool isFlake ) { ParsedURL parsedURL; try { parsedURL = parseURL(url); } catch (BadURL &) { return std::nullopt; } std::string fragment; std::swap(fragment, parsedURL.fragment); auto input = fetchers::Input::fromURL(fetchSettings, parsedURL, isFlake); input.parent = baseDir; return std::make_pair( FlakeRef(std::move(input), getOr(parsedURL.query, "dir", "")), fragment); } std::pair<FlakeRef, std::string> parseFlakeRefWithFragment( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir, bool allowMissing, bool isFlake) { using namespace fetchers; std::smatch match; if (auto res = parseFlakeIdRef(fetchSettings, url, isFlake)) { return *res; } else if (auto res = parseURLFlakeRef(fetchSettings, url, baseDir, isFlake)) { return *res; } else { return parsePathFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake); } } std::optional<std::pair<FlakeRef, std::string>> maybeParseFlakeRefWithFragment( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir) { try { return parseFlakeRefWithFragment(fetchSettings, url, baseDir); } catch (Error & e) { return {}; } } FlakeRef FlakeRef::fromAttrs( const fetchers::Settings & fetchSettings, const fetchers::Attrs & attrs) { auto attrs2(attrs); attrs2.erase("dir"); return FlakeRef( fetchers::Input::fromAttrs(fetchSettings, std::move(attrs2)), fetchers::maybeGetStrAttr(attrs, "dir").value_or("")); } std::pair<StorePath, FlakeRef> FlakeRef::fetchTree(ref<Store> store) const { auto [storePath, lockedInput] = input.fetchToStore(store); return {std::move(storePath), FlakeRef(std::move(lockedInput), subdir)}; } std::tuple<FlakeRef, std::string, ExtendedOutputsSpec> parseFlakeRefWithFragmentAndExtendedOutputsSpec( const fetchers::Settings & fetchSettings, const std::string & url, const std::optional<Path> & baseDir, bool allowMissing, bool isFlake) { auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(url); auto [flakeRef, fragment] = parseFlakeRefWithFragment( fetchSettings, std::string { prefix }, baseDir, allowMissing, isFlake); return {std::move(flakeRef), fragment, std::move(extendedOutputsSpec)}; } std::regex flakeIdRegex(flakeIdRegexS, std::regex::ECMAScript); }
10,434
C++
.cc
267
30.509363
145
0.587828
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,062
nix-copy-closure.cc
NixOS_nix/src/nix-copy-closure/nix-copy-closure.cc
#include "shared.hh" #include "realisation.hh" #include "store-api.hh" #include "legacy.hh" using namespace nix; static int main_nix_copy_closure(int argc, char ** argv) { { auto gzip = false; auto toMode = true; auto includeOutputs = false; auto dryRun = false; auto useSubstitutes = NoSubstitute; std::string sshHost; PathSet storePaths; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--help") showManPage("nix-copy-closure"); else if (*arg == "--version") printVersion("nix-copy-closure"); else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") { if (*arg != "--gzip") warn("'%1%' is not implemented, falling back to gzip", *arg); gzip = true; } else if (*arg == "--from") toMode = false; else if (*arg == "--to") toMode = true; else if (*arg == "--include-outputs") includeOutputs = true; else if (*arg == "--show-progress") printMsg(lvlError, "Warning: '--show-progress' is not implemented"); else if (*arg == "--dry-run") dryRun = true; else if (*arg == "--use-substitutes" || *arg == "-s") useSubstitutes = Substitute; else if (sshHost.empty()) sshHost = *arg; else storePaths.insert(*arg); return true; }); if (sshHost.empty()) throw UsageError("no host name specified"); auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : ""); auto to = toMode ? openStore(remoteUri) : openStore(); auto from = toMode ? openStore() : openStore(remoteUri); RealisedPath::Set storePaths2; for (auto & path : storePaths) storePaths2.insert(from->followLinksToStorePath(path)); copyClosure(*from, *to, storePaths2, NoRepair, NoCheckSigs, useSubstitutes); return 0; } } static RegisterLegacyCommand r_nix_copy_closure("nix-copy-closure", main_nix_copy_closure);
2,260
C++
.cc
55
30.4
94
0.534153
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,063
nix_api_main.cc
NixOS_nix/src/libmain-c/nix_api_main.cc
#include "nix_api_store.h" #include "nix_api_store_internal.h" #include "nix_api_util.h" #include "nix_api_util_internal.h" #include "plugin.hh" nix_err nix_init_plugins(nix_c_context * context) { if (context) context->last_err_code = NIX_OK; try { nix::initPlugins(); } NIXC_CATCH_ERRS }
323
C++
.cc
14
19.642857
49
0.667752
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,064
url-name.cc
NixOS_nix/src/libflake-tests/url-name.cc
#include "flake/url-name.hh" #include <gtest/gtest.h> namespace nix { /* ----------- tests for url-name.hh --------------------------------------------------*/ TEST(getNameFromURL, getNameFromURL) { ASSERT_EQ(getNameFromURL(parseURL("path:/home/user/project")), "project"); ASSERT_EQ(getNameFromURL(parseURL("path:~/repos/nixpkgs#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("path:~/repos/nixpkgs#legacyPackages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("path:~/repos/nixpkgs#packages.x86_64-linux.Hello")), "Hello"); ASSERT_EQ(getNameFromURL(parseURL("path:.#nonStandardAttr.mylaptop")), "mylaptop"); ASSERT_EQ(getNameFromURL(parseURL("path:./repos/myflake#nonStandardAttr.mylaptop")), "mylaptop"); ASSERT_EQ(getNameFromURL(parseURL("path:./nixpkgs#packages.x86_64-linux.complex^bin,man")), "complex"); ASSERT_EQ(getNameFromURL(parseURL("path:./myproj#packages.x86_64-linux.default^*")), "myproj"); ASSERT_EQ(getNameFromURL(parseURL("path:./myproj#defaultPackage.x86_64-linux")), "myproj"); ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nixpkgs#hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix#packages.x86_64-linux.default")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix#")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("github:cachix/devenv/main#packages.x86_64-linux.default")), "devenv"); ASSERT_EQ(getNameFromURL(parseURL("github:edolstra/nix-warez?rev=1234&dir=blender&ref=master")), "blender"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nixpkgs#hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix#packages.x86_64-linux.default")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix#")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("gitlab:cachix/devenv/main#packages.x86_64-linux.default")), "devenv"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nixpkgs#hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix#packages.x86_64-linux.default")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix#")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix")), "nix"); ASSERT_EQ(getNameFromURL(parseURL("sourcehut:cachix/devenv/main#packages.x86_64-linux.default")), "devenv"); ASSERT_EQ(getNameFromURL(parseURL("git://github.com/edolstra/dwarffs")), "dwarffs"); ASSERT_EQ(getNameFromURL(parseURL("git://github.com/edolstra/nix-warez?dir=blender")), "blender"); ASSERT_EQ(getNameFromURL(parseURL("git+file:///home/user/project")), "project"); ASSERT_EQ(getNameFromURL(parseURL("git+file:///home/user/project?ref=fa1e2d23a22")), "project"); ASSERT_EQ(getNameFromURL(parseURL("git+ssh://git@github.com/someuser/my-repo#")), "my-repo"); ASSERT_EQ(getNameFromURL(parseURL("git+git://github.com/someuser/my-repo?rev=v1.2.3")), "my-repo"); ASSERT_EQ(getNameFromURL(parseURL("git+ssh:///home/user/project?dir=subproject&rev=v2.4")), "subproject"); ASSERT_EQ(getNameFromURL(parseURL("git+http://not-even-real#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("git+https://not-even-real#packages.aarch64-darwin.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("tarball+http://github.com/NixOS/nix/archive/refs/tags/2.18.1#packages.x86_64-linux.jq")), "jq"); ASSERT_EQ(getNameFromURL(parseURL("tarball+https://github.com/NixOS/nix/archive/refs/tags/2.18.1#packages.x86_64-linux.hg")), "hg"); ASSERT_EQ(getNameFromURL(parseURL("tarball+file:///home/user/Downloads/nixpkgs-2.18.1#packages.aarch64-darwin.ripgrep")), "ripgrep"); ASSERT_EQ(getNameFromURL(parseURL("https://github.com/NixOS/nix/archive/refs/tags/2.18.1.tar.gz#packages.x86_64-linux.pv")), "pv"); ASSERT_EQ(getNameFromURL(parseURL("http://github.com/NixOS/nix/archive/refs/tags/2.18.1.tar.gz#packages.x86_64-linux.pv")), "pv"); ASSERT_EQ(getNameFromURL(parseURL("file:///home/user/project?ref=fa1e2d23a22")), "project"); ASSERT_EQ(getNameFromURL(parseURL("file+file:///home/user/project?ref=fa1e2d23a22")), "project"); ASSERT_EQ(getNameFromURL(parseURL("file+http://not-even-real#packages.x86_64-linux.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("file+http://gitfantasy.com/org/user/notaflake")), "notaflake"); ASSERT_EQ(getNameFromURL(parseURL("file+https://not-even-real#packages.aarch64-darwin.hello")), "hello"); ASSERT_EQ(getNameFromURL(parseURL("https://www.github.com/")), std::nullopt); ASSERT_EQ(getNameFromURL(parseURL("path:.")), std::nullopt); ASSERT_EQ(getNameFromURL(parseURL("file:.#")), std::nullopt); ASSERT_EQ(getNameFromURL(parseURL("path:.#packages.x86_64-linux.default")), std::nullopt); ASSERT_EQ(getNameFromURL(parseURL("path:.#packages.x86_64-linux.default^*")), std::nullopt); } }
5,539
C++
.cc
59
85.508475
141
0.695557
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,065
flakeref.cc
NixOS_nix/src/libflake-tests/flakeref.cc
#include <gtest/gtest.h> #include "fetch-settings.hh" #include "flake/flakeref.hh" namespace nix { /* ----------- tests for flake/flakeref.hh --------------------------------------------------*/ /* ---------------------------------------------------------------------------- * to_string * --------------------------------------------------------------------------*/ TEST(to_string, doesntReencodeUrl) { fetchers::Settings fetchSettings; auto s = "http://localhost:8181/test/+3d.tar.gz"; auto flakeref = parseFlakeRef(fetchSettings, s); auto parsed = flakeref.to_string(); auto expected = "http://localhost:8181/test/%2B3d.tar.gz"; ASSERT_EQ(parsed, expected); } }
741
C++
.cc
17
38.058824
95
0.453278
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,066
nix_api_util.cc
NixOS_nix/src/libutil-c/nix_api_util.cc
#include "nix_api_util.h" #include "config-global.hh" #include "error.hh" #include "nix_api_util_internal.h" #include "util.hh" #include <cxxabi.h> #include <typeinfo> nix_c_context * nix_c_context_create() { return new nix_c_context(); } void nix_c_context_free(nix_c_context * context) { delete context; } nix_err nix_context_error(nix_c_context * context) { if (context == nullptr) { throw; } try { throw; } catch (nix::Error & e) { /* Storing this exception is annoying, take what we need here */ context->last_err = e.what(); context->info = e.info(); int status; const char * demangled = abi::__cxa_demangle(typeid(e).name(), 0, 0, &status); if (demangled) { context->name = demangled; // todo: free(demangled); } else { context->name = typeid(e).name(); } context->last_err_code = NIX_ERR_NIX_ERROR; return context->last_err_code; } catch (const std::exception & e) { context->last_err = e.what(); context->last_err_code = NIX_ERR_UNKNOWN; return context->last_err_code; } // unreachable } nix_err nix_set_err_msg(nix_c_context * context, nix_err err, const char * msg) { if (context == nullptr) { // todo last_err_code throw nix::Error("Nix C api error: %s", msg); } context->last_err_code = err; context->last_err = msg; return err; } void nix_clear_err(nix_c_context * context) { if (context) context->last_err_code = NIX_OK; } const char * nix_version_get() { return PACKAGE_VERSION; } // Implementations nix_err nix_setting_get(nix_c_context * context, const char * key, nix_get_string_callback callback, void * user_data) { if (context) context->last_err_code = NIX_OK; try { std::map<std::string, nix::AbstractConfig::SettingInfo> settings; nix::globalConfig.getSettings(settings); if (settings.contains(key)) { return call_nix_get_string_callback(settings[key].value, callback, user_data); } else { return nix_set_err_msg(context, NIX_ERR_KEY, "Setting not found"); } } NIXC_CATCH_ERRS } nix_err nix_setting_set(nix_c_context * context, const char * key, const char * value) { if (context) context->last_err_code = NIX_OK; if (nix::globalConfig.set(key, value)) return NIX_OK; else { return nix_set_err_msg(context, NIX_ERR_KEY, "Setting not found"); } } nix_err nix_libutil_init(nix_c_context * context) { if (context) context->last_err_code = NIX_OK; try { nix::initLibUtil(); return NIX_OK; } NIXC_CATCH_ERRS } const char * nix_err_msg(nix_c_context * context, const nix_c_context * read_context, unsigned int * n) { if (context) context->last_err_code = NIX_OK; if (read_context->last_err && read_context->last_err_code != NIX_OK) { if (n) *n = read_context->last_err->size(); return read_context->last_err->c_str(); } nix_set_err_msg(context, NIX_ERR_UNKNOWN, "No error message"); return nullptr; } nix_err nix_err_name( nix_c_context * context, const nix_c_context * read_context, nix_get_string_callback callback, void * user_data) { if (context) context->last_err_code = NIX_OK; if (read_context->last_err_code != NIX_ERR_NIX_ERROR) { return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Last error was not a nix error"); } return call_nix_get_string_callback(read_context->name, callback, user_data); } nix_err nix_err_info_msg( nix_c_context * context, const nix_c_context * read_context, nix_get_string_callback callback, void * user_data) { if (context) context->last_err_code = NIX_OK; if (read_context->last_err_code != NIX_ERR_NIX_ERROR) { return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Last error was not a nix error"); } return call_nix_get_string_callback(read_context->info->msg.str(), callback, user_data); } nix_err nix_err_code(const nix_c_context * read_context) { return read_context->last_err_code; } // internal nix_err call_nix_get_string_callback(const std::string str, nix_get_string_callback callback, void * user_data) { callback(str.c_str(), str.size(), user_data); return NIX_OK; }
4,398
C++
.cc
140
26.357143
118
0.635314
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,067
store-path-accessor.cc
NixOS_nix/src/libfetchers/store-path-accessor.cc
#include "store-path-accessor.hh" #include "store-api.hh" namespace nix { ref<SourceAccessor> makeStorePathAccessor(ref<Store> store, const StorePath & storePath) { // FIXME: should use `store->getFSAccessor()` auto root = std::filesystem::path{store->toRealPath(storePath)}; auto accessor = makeFSSourceAccessor(root); accessor->setPathDisplay(root.string()); return accessor; } }
405
C++
.cc
12
30.833333
88
0.748718
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,068
fetch-to-store.cc
NixOS_nix/src/libfetchers/fetch-to-store.cc
#include "fetch-to-store.hh" #include "fetchers.hh" #include "cache.hh" namespace nix { StorePath fetchToStore( Store & store, const SourcePath & path, FetchMode mode, std::string_view name, ContentAddressMethod method, PathFilter * filter, RepairFlag repair) { // FIXME: add an optimisation for the case where the accessor is // a `PosixSourceAccessor` pointing to a store path. std::optional<fetchers::Cache::Key> cacheKey; if (!filter && path.accessor->fingerprint) { cacheKey = fetchers::Cache::Key{"fetchToStore", { {"name", std::string{name}}, {"fingerprint", *path.accessor->fingerprint}, {"method", std::string{method.render()}}, {"path", path.path.abs()} }}; if (auto res = fetchers::getCache()->lookupStorePath(*cacheKey, store)) { debug("store path cache hit for '%s'", path); return res->storePath; } } else debug("source path '%s' is uncacheable", path); Activity act(*logger, lvlChatty, actUnknown, fmt(mode == FetchMode::DryRun ? "hashing '%s'" : "copying '%s' to the store", path)); auto filter2 = filter ? *filter : defaultPathFilter; auto storePath = mode == FetchMode::DryRun ? store.computeStorePath( name, path, method, HashAlgorithm::SHA256, {}, filter2).first : store.addToStore( name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); if (cacheKey && mode == FetchMode::Copy) fetchers::getCache()->upsert(*cacheKey, store, {}, storePath); return storePath; } }
1,648
C++
.cc
43
31.418605
93
0.625705
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,069
git-utils.cc
NixOS_nix/src/libfetchers/git-utils.cc
#include "git-utils.hh" #include "cache.hh" #include "finally.hh" #include "processes.hh" #include "signals.hh" #include "users.hh" #include "fs-sink.hh" #include <git2/attr.h> #include <git2/blob.h> #include <git2/commit.h> #include <git2/config.h> #include <git2/describe.h> #include <git2/errors.h> #include <git2/global.h> #include <git2/indexer.h> #include <git2/object.h> #include <git2/odb.h> #include <git2/refs.h> #include <git2/remote.h> #include <git2/repository.h> #include <git2/revparse.h> #include <git2/status.h> #include <git2/submodule.h> #include <git2/sys/odb_backend.h> #include <git2/sys/mempack.h> #include <git2/tree.h> #include <iostream> #include <unordered_set> #include <queue> #include <regex> #include <span> namespace std { template<> struct hash<git_oid> { size_t operator()(const git_oid & oid) const { return * (size_t *) oid.id; } }; } std::ostream & operator << (std::ostream & str, const git_oid & oid) { str << git_oid_tostr_s(&oid); return str; } bool operator == (const git_oid & oid1, const git_oid & oid2) { return git_oid_equal(&oid1, &oid2); } namespace nix { struct GitSourceAccessor; // Some wrapper types that ensure that the git_*_free functions get called. template<auto del> struct Deleter { template <typename T> void operator()(T * p) const { del(p); }; }; typedef std::unique_ptr<git_repository, Deleter<git_repository_free>> Repository; typedef std::unique_ptr<git_tree_entry, Deleter<git_tree_entry_free>> TreeEntry; typedef std::unique_ptr<git_tree, Deleter<git_tree_free>> Tree; typedef std::unique_ptr<git_treebuilder, Deleter<git_treebuilder_free>> TreeBuilder; typedef std::unique_ptr<git_blob, Deleter<git_blob_free>> Blob; typedef std::unique_ptr<git_object, Deleter<git_object_free>> Object; typedef std::unique_ptr<git_commit, Deleter<git_commit_free>> Commit; typedef std::unique_ptr<git_reference, Deleter<git_reference_free>> Reference; typedef std::unique_ptr<git_describe_result, Deleter<git_describe_result_free>> DescribeResult; typedef std::unique_ptr<git_status_list, Deleter<git_status_list_free>> StatusList; typedef std::unique_ptr<git_remote, Deleter<git_remote_free>> Remote; typedef std::unique_ptr<git_config, Deleter<git_config_free>> GitConfig; typedef std::unique_ptr<git_config_iterator, Deleter<git_config_iterator_free>> ConfigIterator; typedef std::unique_ptr<git_odb, Deleter<git_odb_free>> ObjectDb; typedef std::unique_ptr<git_packbuilder, Deleter<git_packbuilder_free>> PackBuilder; typedef std::unique_ptr<git_indexer, Deleter<git_indexer_free>> Indexer; // A helper to ensure that we don't leak objects returned by libgit2. template<typename T> struct Setter { T & t; typename T::pointer p = nullptr; Setter(T & t) : t(t) { } ~Setter() { if (p) t = T(p); } operator typename T::pointer * () { return &p; } }; Hash toHash(const git_oid & oid) { #ifdef GIT_EXPERIMENTAL_SHA256 assert(oid.type == GIT_OID_SHA1); #endif Hash hash(HashAlgorithm::SHA1); memcpy(hash.hash, oid.id, hash.hashSize); return hash; } static void initLibGit2() { if (git_libgit2_init() < 0) throw Error("initialising libgit2: %s", git_error_last()->message); } git_oid hashToOID(const Hash & hash) { git_oid oid; if (git_oid_fromstr(&oid, hash.gitRev().c_str())) throw Error("cannot convert '%s' to a Git OID", hash.gitRev()); return oid; } Object lookupObject(git_repository * repo, const git_oid & oid, git_object_t type = GIT_OBJECT_ANY) { Object obj; if (git_object_lookup(Setter(obj), repo, &oid, type)) { auto err = git_error_last(); throw Error("getting Git object '%s': %s", oid, err->message); } return obj; } template<typename T> T peelObject(git_object * obj, git_object_t type) { T obj2; if (git_object_peel((git_object * *) (typename T::pointer *) Setter(obj2), obj, type)) { auto err = git_error_last(); throw Error("peeling Git object '%s': %s", *git_object_id(obj), err->message); } return obj2; } template<typename T> T dupObject(typename T::pointer obj) { T obj2; if (git_object_dup((git_object * *) (typename T::pointer *) Setter(obj2), (git_object *) obj)) throw Error("duplicating object '%s': %s", *git_object_id((git_object *) obj), git_error_last()->message); return obj2; } /** * Peel the specified object (i.e. follow tag and commit objects) to * either a blob or a tree. */ static Object peelToTreeOrBlob(git_object * obj) { /* git_object_peel() doesn't handle blob objects, so handle those specially. */ if (git_object_type(obj) == GIT_OBJECT_BLOB) return dupObject<Object>(obj); else return peelObject<Object>(obj, GIT_OBJECT_TREE); } struct PackBuilderContext { std::exception_ptr exception; void handleException(const char * activity, int errCode) { switch (errCode) { case GIT_OK: break; case GIT_EUSER: if (!exception) panic("PackBuilderContext::handleException: user error, but exception was not set"); std::rethrow_exception(exception); default: throw Error("%s: %i, %s", Uncolored(activity), errCode, git_error_last()->message); } } }; extern "C" { /** * A `git_packbuilder_progress` implementation that aborts the pack building if needed. */ static int packBuilderProgressCheckInterrupt(int stage, uint32_t current, uint32_t total, void *payload) { PackBuilderContext & args = * (PackBuilderContext *) payload; try { checkInterrupt(); return GIT_OK; } catch (const std::exception & e) { args.exception = std::current_exception(); return GIT_EUSER; } }; static git_packbuilder_progress PACKBUILDER_PROGRESS_CHECK_INTERRUPT = &packBuilderProgressCheckInterrupt; } // extern "C" static void initRepoAtomically(std::filesystem::path &path, bool bare) { if (pathExists(path.string())) return; Path tmpDir = createTempDir(os_string_to_string(PathViewNG { std::filesystem::path(path).parent_path() })); AutoDelete delTmpDir(tmpDir, true); Repository tmpRepo; if (git_repository_init(Setter(tmpRepo), tmpDir.c_str(), bare)) throw Error("creating Git repository %s: %s", path, git_error_last()->message); try { std::filesystem::rename(tmpDir, path); } catch (std::filesystem::filesystem_error & e) { // Someone may race us to create the repository. if (e.code() == std::errc::file_exists // `path` may be attempted to be deleted by s::f::rename, in which case the code is: || e.code() == std::errc::directory_not_empty) { return; } else throw SysError("moving temporary git repository from %s to %s", tmpDir, path); } // we successfully moved the repository, so the temporary directory no longer exists. delTmpDir.cancel(); } struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl> { /** Location of the repository on disk. */ std::filesystem::path path; /** * libgit2 repository. Note that new objects are not written to disk, * because we are using a mempack backend. For writing to disk, see * `flush()`, which is also called by `GitFileSystemObjectSink::sync()`. */ Repository repo; /** * In-memory object store for efficient batched writing to packfiles. * Owned by `repo`. */ git_odb_backend * mempack_backend; GitRepoImpl(std::filesystem::path _path, bool create, bool bare) : path(std::move(_path)) { initLibGit2(); initRepoAtomically(path, bare); if (git_repository_open(Setter(repo), path.string().c_str())) throw Error("opening Git repository %s: %s", path, git_error_last()->message); ObjectDb odb; if (git_repository_odb(Setter(odb), repo.get())) throw Error("getting Git object database: %s", git_error_last()->message); // mempack_backend will be owned by the repository, so we are not expected to free it ourselves. if (git_mempack_new(&mempack_backend)) throw Error("creating mempack backend: %s", git_error_last()->message); if (git_odb_add_backend(odb.get(), mempack_backend, 999)) throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); } operator git_repository * () { return repo.get(); } void flush() override { checkInterrupt(); git_buf buf = GIT_BUF_INIT; Finally _disposeBuf { [&] { git_buf_dispose(&buf); } }; PackBuilder packBuilder; PackBuilderContext packBuilderContext; git_packbuilder_new(Setter(packBuilder), *this); git_packbuilder_set_callbacks(packBuilder.get(), PACKBUILDER_PROGRESS_CHECK_INTERRUPT, &packBuilderContext); git_packbuilder_set_threads(packBuilder.get(), 0 /* autodetect */); packBuilderContext.handleException( "preparing packfile", git_mempack_write_thin_pack(mempack_backend, packBuilder.get()) ); checkInterrupt(); packBuilderContext.handleException( "writing packfile", git_packbuilder_write_buf(&buf, packBuilder.get()) ); checkInterrupt(); std::string repo_path = std::string(git_repository_path(repo.get())); while (!repo_path.empty() && repo_path.back() == '/') repo_path.pop_back(); std::string pack_dir_path = repo_path + "/objects/pack"; // TODO (performance): could the indexing be done in a separate thread? // we'd need a more streaming variation of // git_packbuilder_write_buf, or incur the cost of // copying parts of the buffer to a separate thread. // (synchronously on the git_packbuilder_write_buf thread) Indexer indexer; git_indexer_progress stats; if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr)) throw Error("creating git packfile indexer: %s", git_error_last()->message); // TODO: provide index callback for checkInterrupt() termination // though this is about an order of magnitude faster than the packbuilder // expect up to 1 sec latency due to uninterruptible git_indexer_append. constexpr size_t chunkSize = 128 * 1024; for (size_t offset = 0; offset < buf.size; offset += chunkSize) { if (git_indexer_append(indexer.get(), buf.ptr + offset, std::min(chunkSize, buf.size - offset), &stats)) throw Error("appending to git packfile index: %s", git_error_last()->message); checkInterrupt(); } if (git_indexer_commit(indexer.get(), &stats)) throw Error("committing git packfile index: %s", git_error_last()->message); if (git_mempack_reset(mempack_backend)) throw Error("resetting git mempack backend: %s", git_error_last()->message); checkInterrupt(); } uint64_t getRevCount(const Hash & rev) override { std::unordered_set<git_oid> done; std::queue<Commit> todo; todo.push(peelObject<Commit>(lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT)); while (auto commit = pop(todo)) { if (!done.insert(*git_commit_id(commit->get())).second) continue; for (size_t n = 0; n < git_commit_parentcount(commit->get()); ++n) { git_commit * parent; if (git_commit_parent(&parent, commit->get(), n)) throw Error("getting parent of Git commit '%s': %s", *git_commit_id(commit->get()), git_error_last()->message); todo.push(Commit(parent)); } } return done.size(); } uint64_t getLastModified(const Hash & rev) override { auto commit = peelObject<Commit>(lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT); return git_commit_time(commit.get()); } bool isShallow() override { return git_repository_is_shallow(*this); } void setRemote(const std::string & name, const std::string & url) override { if (git_remote_set_url(*this, name.c_str(), url.c_str())) throw Error("setting remote '%s' URL to '%s': %s", name, url, git_error_last()->message); } Hash resolveRef(std::string ref) override { Object object; if (git_revparse_single(Setter(object), *this, ref.c_str())) throw Error("resolving Git reference '%s': %s", ref, git_error_last()->message); auto oid = git_object_id(object.get()); return toHash(*oid); } std::vector<Submodule> parseSubmodules(const std::filesystem::path & configFile) { GitConfig config; if (git_config_open_ondisk(Setter(config), configFile.string().c_str())) throw Error("parsing .gitmodules file: %s", git_error_last()->message); ConfigIterator it; if (git_config_iterator_glob_new(Setter(it), config.get(), "^submodule\\..*\\.(path|url|branch)$")) throw Error("iterating over .gitmodules: %s", git_error_last()->message); std::map<std::string, std::string> entries; while (true) { git_config_entry * entry = nullptr; if (auto err = git_config_next(&entry, it.get())) { if (err == GIT_ITEROVER) break; throw Error("iterating over .gitmodules: %s", git_error_last()->message); } entries.emplace(entry->name + 10, entry->value); } std::vector<Submodule> result; for (auto & [key, value] : entries) { if (!hasSuffix(key, ".path")) continue; std::string key2(key, 0, key.size() - 5); auto path = CanonPath(value); result.push_back(Submodule { .path = path, .url = entries[key2 + ".url"], .branch = entries[key2 + ".branch"], }); } return result; } // Helper for statusCallback below. static int statusCallbackTrampoline(const char * path, unsigned int statusFlags, void * payload) { return (*((std::function<int(const char * path, unsigned int statusFlags)> *) payload))(path, statusFlags); } WorkdirInfo getWorkdirInfo() override { WorkdirInfo info; /* Get the head revision, if any. */ git_oid headRev; if (auto err = git_reference_name_to_id(&headRev, *this, "HEAD")) { if (err != GIT_ENOTFOUND) throw Error("resolving HEAD: %s", git_error_last()->message); } else info.headRev = toHash(headRev); /* Get all tracked files and determine whether the working directory is dirty. */ std::function<int(const char * path, unsigned int statusFlags)> statusCallback = [&](const char * path, unsigned int statusFlags) { if (!(statusFlags & GIT_STATUS_INDEX_DELETED) && !(statusFlags & GIT_STATUS_WT_DELETED)) info.files.insert(CanonPath(path)); if (statusFlags != GIT_STATUS_CURRENT) info.isDirty = true; return 0; }; git_status_options options = GIT_STATUS_OPTIONS_INIT; options.flags |= GIT_STATUS_OPT_INCLUDE_UNMODIFIED; options.flags |= GIT_STATUS_OPT_EXCLUDE_SUBMODULES; if (git_status_foreach_ext(*this, &options, &statusCallbackTrampoline, &statusCallback)) throw Error("getting working directory status: %s", git_error_last()->message); /* Get submodule info. */ auto modulesFile = path / ".gitmodules"; if (pathExists(modulesFile.string())) info.submodules = parseSubmodules(modulesFile); return info; } std::optional<std::string> getWorkdirRef() override { Reference ref; if (git_reference_lookup(Setter(ref), *this, "HEAD")) throw Error("looking up HEAD: %s", git_error_last()->message); if (auto target = git_reference_symbolic_target(ref.get())) return target; return std::nullopt; } std::vector<std::tuple<Submodule, Hash>> getSubmodules(const Hash & rev, bool exportIgnore) override; std::string resolveSubmoduleUrl(const std::string & url) override { git_buf buf = GIT_BUF_INIT; if (git_submodule_resolve_url(&buf, *this, url.c_str())) throw Error("resolving Git submodule URL '%s'", url); Finally cleanup = [&]() { git_buf_dispose(&buf); }; std::string res(buf.ptr); return res; } bool hasObject(const Hash & oid_) override { auto oid = hashToOID(oid_); Object obj; if (auto errCode = git_object_lookup(Setter(obj), *this, &oid, GIT_OBJECT_ANY)) { if (errCode == GIT_ENOTFOUND) return false; auto err = git_error_last(); throw Error("getting Git object '%s': %s", oid, err->message); } return true; } /** * A 'GitSourceAccessor' with no regard for export-ignore or any other transformations. */ ref<GitSourceAccessor> getRawAccessor(const Hash & rev); ref<SourceAccessor> getAccessor(const Hash & rev, bool exportIgnore) override; ref<SourceAccessor> getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError e) override; ref<GitFileSystemObjectSink> getFileSystemObjectSink() override; static int sidebandProgressCallback(const char * str, int len, void * payload) { auto act = (Activity *) payload; act->result(resFetchStatus, trim(std::string_view(str, len))); return getInterrupted() ? -1 : 0; } static int transferProgressCallback(const git_indexer_progress * stats, void * payload) { auto act = (Activity *) payload; act->result(resFetchStatus, fmt("%d/%d objects received, %d/%d deltas indexed, %.1f MiB", stats->received_objects, stats->total_objects, stats->indexed_deltas, stats->total_deltas, stats->received_bytes / (1024.0 * 1024.0))); return getInterrupted() ? -1 : 0; } void fetch( const std::string & url, const std::string & refspec, bool shallow) override { Activity act(*logger, lvlTalkative, actFetchTree, fmt("fetching Git repository '%s'", url)); // TODO: implement git-credential helper support (preferably via libgit2, which as of 2024-01 does not support that) // then use code that was removed in this commit (see blame) auto dir = this->path; Strings gitArgs; if (shallow) { gitArgs = { "-C", dir.string(), "fetch", "--quiet", "--force", "--depth", "1", "--", url, refspec }; } else { gitArgs = { "-C", dir.string(), "fetch", "--quiet", "--force", "--", url, refspec }; } runProgram(RunOptions { .program = "git", .lookupPath = true, // FIXME: git stderr messes up our progress indicator, so // we're using --quiet for now. Should process its stderr. .args = gitArgs, .input = {}, .isInteractive = true }); } void verifyCommit( const Hash & rev, const std::vector<fetchers::PublicKey> & publicKeys) override { // Create ad-hoc allowedSignersFile and populate it with publicKeys auto allowedSignersFile = createTempFile().second; std::string allowedSigners; for (const fetchers::PublicKey & k : publicKeys) { if (k.type != "ssh-dsa" && k.type != "ssh-ecdsa" && k.type != "ssh-ecdsa-sk" && k.type != "ssh-ed25519" && k.type != "ssh-ed25519-sk" && k.type != "ssh-rsa") throw Error("Unknown key type '%s'.\n" "Please use one of\n" "- ssh-dsa\n" " ssh-ecdsa\n" " ssh-ecdsa-sk\n" " ssh-ed25519\n" " ssh-ed25519-sk\n" " ssh-rsa", k.type); allowedSigners += "* " + k.type + " " + k.key + "\n"; } writeFile(allowedSignersFile, allowedSigners); // Run verification command auto [status, output] = runProgram(RunOptions { .program = "git", .args = { "-c", "gpg.ssh.allowedSignersFile=" + allowedSignersFile, "-C", path.string(), "verify-commit", rev.gitRev() }, .mergeStderrToStdout = true, }); /* Evaluate result through status code and checking if public key fingerprints appear on stderr. This is neccessary because the git command might also succeed due to the commit being signed by gpg keys that are present in the users key agent. */ std::string re = R"(Good "git" signature for \* with .* key SHA256:[)"; for (const fetchers::PublicKey & k : publicKeys){ // Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally std::string keyDecoded; try { keyDecoded = base64Decode(k.key); } catch (Error & e) { e.addTrace({}, "while decoding public key '%s' used for git signature", k.key); } auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "="); auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" ); re += "(" + escaped_fingerprint + ")"; } re += "]"; if (status == 0 && std::regex_search(output, std::regex(re))) printTalkative("Signature verification on commit %s succeeded.", rev.gitRev()); else throw Error("Commit signature verification on commit %s failed: %s", rev.gitRev(), output); } Hash treeHashToNarHash(const Hash & treeHash) override { auto accessor = getAccessor(treeHash, false); fetchers::Cache::Key cacheKey{"treeHashToNarHash", {{"treeHash", treeHash.gitRev()}}}; if (auto res = fetchers::getCache()->lookup(cacheKey)) return Hash::parseAny(fetchers::getStrAttr(*res, "narHash"), HashAlgorithm::SHA256); auto narHash = accessor->hashPath(CanonPath::root); fetchers::getCache()->upsert(cacheKey, fetchers::Attrs({{"narHash", narHash.to_string(HashFormat::SRI, true)}})); return narHash; } Hash dereferenceSingletonDirectory(const Hash & oid_) override { auto oid = hashToOID(oid_); auto _tree = lookupObject(*this, oid, GIT_OBJECT_TREE); auto tree = (const git_tree *) &*_tree; if (git_tree_entrycount(tree) == 1) { auto entry = git_tree_entry_byindex(tree, 0); auto mode = git_tree_entry_filemode(entry); if (mode == GIT_FILEMODE_TREE) oid = *git_tree_entry_id(entry); } return toHash(oid); } }; ref<GitRepo> GitRepo::openRepo(const std::filesystem::path & path, bool create, bool bare) { return make_ref<GitRepoImpl>(path, create, bare); } /** * Raw git tree input accessor. */ struct GitSourceAccessor : SourceAccessor { ref<GitRepoImpl> repo; Object root; GitSourceAccessor(ref<GitRepoImpl> repo_, const Hash & rev) : repo(repo_) , root(peelToTreeOrBlob(lookupObject(*repo, hashToOID(rev)).get())) { } std::string readBlob(const CanonPath & path, bool symlink) { auto blob = getBlob(path, symlink); auto data = std::string_view((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); return std::string(data); } std::string readFile(const CanonPath & path) override { return readBlob(path, false); } bool pathExists(const CanonPath & path) override { return path.isRoot() ? true : (bool) lookup(path); } std::optional<Stat> maybeLstat(const CanonPath & path) override { if (path.isRoot()) return Stat { .type = git_object_type(root.get()) == GIT_OBJECT_TREE ? tDirectory : tRegular }; auto entry = lookup(path); if (!entry) return std::nullopt; auto mode = git_tree_entry_filemode(entry); if (mode == GIT_FILEMODE_TREE) return Stat { .type = tDirectory }; else if (mode == GIT_FILEMODE_BLOB) return Stat { .type = tRegular }; else if (mode == GIT_FILEMODE_BLOB_EXECUTABLE) return Stat { .type = tRegular, .isExecutable = true }; else if (mode == GIT_FILEMODE_LINK) return Stat { .type = tSymlink }; else if (mode == GIT_FILEMODE_COMMIT) // Treat submodules as an empty directory. return Stat { .type = tDirectory }; else throw Error("file '%s' has an unsupported Git file type"); } DirEntries readDirectory(const CanonPath & path) override { return std::visit(overloaded { [&](Tree tree) { DirEntries res; auto count = git_tree_entrycount(tree.get()); for (size_t n = 0; n < count; ++n) { auto entry = git_tree_entry_byindex(tree.get(), n); // FIXME: add to cache res.emplace(std::string(git_tree_entry_name(entry)), DirEntry{}); } return res; }, [&](Submodule) { return DirEntries(); } }, getTree(path)); } std::string readLink(const CanonPath & path) override { return readBlob(path, true); } /** * If `path` exists and is a submodule, return its * revision. Otherwise return nothing. */ std::optional<Hash> getSubmoduleRev(const CanonPath & path) { auto entry = lookup(path); if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT) return std::nullopt; return toHash(*git_tree_entry_id(entry)); } std::unordered_map<CanonPath, TreeEntry> lookupCache; /* Recursively look up 'path' relative to the root. */ git_tree_entry * lookup(const CanonPath & path) { auto i = lookupCache.find(path); if (i != lookupCache.end()) return i->second.get(); auto parent = path.parent(); if (!parent) return nullptr; auto name = path.baseName().value(); auto parentTree = lookupTree(*parent); if (!parentTree) return nullptr; auto count = git_tree_entrycount(parentTree->get()); git_tree_entry * res = nullptr; /* Add all the tree entries to the cache to speed up subsequent lookups. */ for (size_t n = 0; n < count; ++n) { auto entry = git_tree_entry_byindex(parentTree->get(), n); TreeEntry copy; if (git_tree_entry_dup(Setter(copy), entry)) throw Error("dupping tree entry: %s", git_error_last()->message); auto entryName = std::string_view(git_tree_entry_name(entry)); if (entryName == name) res = copy.get(); auto path2 = *parent; path2.push(entryName); lookupCache.emplace(path2, std::move(copy)).first->second.get(); } return res; } std::optional<Tree> lookupTree(const CanonPath & path) { if (path.isRoot()) { if (git_object_type(root.get()) == GIT_OBJECT_TREE) return dupObject<Tree>((git_tree *) &*root); else return std::nullopt; } auto entry = lookup(path); if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_TREE) return std::nullopt; Tree tree; if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(tree), *repo, entry)) throw Error("looking up directory '%s': %s", showPath(path), git_error_last()->message); return tree; } git_tree_entry * need(const CanonPath & path) { auto entry = lookup(path); if (!entry) throw Error("'%s' does not exist", showPath(path)); return entry; } struct Submodule { }; std::variant<Tree, Submodule> getTree(const CanonPath & path) { if (path.isRoot()) { if (git_object_type(root.get()) == GIT_OBJECT_TREE) return dupObject<Tree>((git_tree *) &*root); else throw Error("Git root object '%s' is not a directory", *git_object_id(root.get())); } auto entry = need(path); if (git_tree_entry_type(entry) == GIT_OBJECT_COMMIT) return Submodule(); if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) throw Error("'%s' is not a directory", showPath(path)); Tree tree; if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(tree), *repo, entry)) throw Error("looking up directory '%s': %s", showPath(path), git_error_last()->message); return tree; } Blob getBlob(const CanonPath & path, bool expectSymlink) { if (!expectSymlink && git_object_type(root.get()) == GIT_OBJECT_BLOB) return dupObject<Blob>((git_blob *) &*root); auto notExpected = [&]() { throw Error( expectSymlink ? "'%s' is not a symlink" : "'%s' is not a regular file", showPath(path)); }; if (path.isRoot()) notExpected(); auto entry = need(path); if (git_tree_entry_type(entry) != GIT_OBJECT_BLOB) notExpected(); auto mode = git_tree_entry_filemode(entry); if (expectSymlink) { if (mode != GIT_FILEMODE_LINK) notExpected(); } else { if (mode != GIT_FILEMODE_BLOB && mode != GIT_FILEMODE_BLOB_EXECUTABLE) notExpected(); } Blob blob; if (git_tree_entry_to_object((git_object * *) (git_blob * *) Setter(blob), *repo, entry)) throw Error("looking up file '%s': %s", showPath(path), git_error_last()->message); return blob; } }; struct GitExportIgnoreSourceAccessor : CachingFilteringSourceAccessor { ref<GitRepoImpl> repo; std::optional<Hash> rev; GitExportIgnoreSourceAccessor(ref<GitRepoImpl> repo, ref<SourceAccessor> next, std::optional<Hash> rev) : CachingFilteringSourceAccessor(next, [&](const CanonPath & path) { return RestrictedPathError(fmt("'%s' does not exist because it was fetched with exportIgnore enabled", path)); }) , repo(repo) , rev(rev) { } bool gitAttrGet(const CanonPath & path, const char * attrName, const char * & valueOut) { const char * pathCStr = path.rel_c_str(); if (rev) { git_attr_options opts = GIT_ATTR_OPTIONS_INIT; opts.attr_commit_id = hashToOID(*rev); // TODO: test that gitattributes from global and system are not used // (ie more or less: home and etc - both of them!) opts.flags = GIT_ATTR_CHECK_INCLUDE_COMMIT | GIT_ATTR_CHECK_NO_SYSTEM; return git_attr_get_ext( &valueOut, *repo, &opts, pathCStr, attrName ); } else { return git_attr_get( &valueOut, *repo, GIT_ATTR_CHECK_INDEX_ONLY | GIT_ATTR_CHECK_NO_SYSTEM, pathCStr, attrName); } } bool isExportIgnored(const CanonPath & path) { const char *exportIgnoreEntry = nullptr; // GIT_ATTR_CHECK_INDEX_ONLY: // > It will use index only for creating archives or for a bare repo // > (if an index has been specified for the bare repo). // -- https://github.com/libgit2/libgit2/blob/HEAD/include/git2/attr.h#L113C62-L115C48 if (gitAttrGet(path, "export-ignore", exportIgnoreEntry)) { if (git_error_last()->klass == GIT_ENOTFOUND) return false; else throw Error("looking up '%s': %s", showPath(path), git_error_last()->message); } else { // Official git will silently reject export-ignore lines that have // values. We do the same. return GIT_ATTR_IS_TRUE(exportIgnoreEntry); } } bool isAllowedUncached(const CanonPath & path) override { return !isExportIgnored(path); } }; struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { ref<GitRepoImpl> repo; struct PendingDir { std::string name; TreeBuilder builder; }; std::vector<PendingDir> pendingDirs; void pushBuilder(std::string name) { const git_tree_entry * entry; Tree prevTree = nullptr; if (!pendingDirs.empty() && (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) { /* Clone a tree that we've already finished. This happens if a tarball has directory entries that are not contiguous. */ if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) throw Error("parent of '%s' is not a directory", name); if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(prevTree), *repo, entry)) throw Error("looking up parent of '%s': %s", name, git_error_last()->message); } git_treebuilder * b; if (git_treebuilder_new(&b, *repo, prevTree.get())) throw Error("creating a tree builder: %s", git_error_last()->message); pendingDirs.push_back({ .name = std::move(name), .builder = TreeBuilder(b) }); }; GitFileSystemObjectSinkImpl(ref<GitRepoImpl> repo) : repo(repo) { pushBuilder(""); } std::pair<git_oid, std::string> popBuilder() { assert(!pendingDirs.empty()); auto pending = std::move(pendingDirs.back()); git_oid oid; if (git_treebuilder_write(&oid, pending.builder.get())) throw Error("creating a tree object: %s", git_error_last()->message); pendingDirs.pop_back(); return {oid, pending.name}; }; void addToTree(const std::string & name, const git_oid & oid, git_filemode_t mode) { assert(!pendingDirs.empty()); auto & pending = pendingDirs.back(); if (git_treebuilder_insert(nullptr, pending.builder.get(), name.c_str(), &oid, mode)) throw Error("adding a file to a tree builder: %s", git_error_last()->message); }; void updateBuilders(std::span<const std::string> names) { // Find the common prefix of pendingDirs and names. size_t prefixLen = 0; for (; prefixLen < names.size() && prefixLen + 1 < pendingDirs.size(); ++prefixLen) if (names[prefixLen] != pendingDirs[prefixLen + 1].name) break; // Finish the builders that are not part of the common prefix. for (auto n = pendingDirs.size(); n > prefixLen + 1; --n) { auto [oid, name] = popBuilder(); addToTree(name, oid, GIT_FILEMODE_TREE); } // Create builders for the new directories. for (auto n = prefixLen; n < names.size(); ++n) pushBuilder(names[n]); }; bool prepareDirs(const std::vector<std::string> & pathComponents, bool isDir) { std::span<const std::string> pathComponents2{pathComponents}; updateBuilders( isDir ? pathComponents2 : pathComponents2.first(pathComponents2.size() - 1)); return true; } void createRegularFile( const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override { auto pathComponents = tokenizeString<std::vector<std::string>>(path.rel(), "/"); if (!prepareDirs(pathComponents, false)) return; git_writestream * stream = nullptr; if (git_blob_create_from_stream(&stream, *repo, nullptr)) throw Error("creating a blob stream object: %s", git_error_last()->message); struct CRF : CreateRegularFileSink { const CanonPath & path; GitFileSystemObjectSinkImpl & back; git_writestream * stream; bool executable = false; CRF(const CanonPath & path, GitFileSystemObjectSinkImpl & back, git_writestream * stream) : path(path), back(back), stream(stream) {} void operator () (std::string_view data) override { if (stream->write(stream, data.data(), data.size())) throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); } void isExecutable() override { executable = true; } } crf { path, *this, stream }; func(crf); git_oid oid; if (git_blob_create_from_stream_commit(&oid, stream)) throw Error("creating a blob object for tarball member '%s': %s", path, git_error_last()->message); addToTree(*pathComponents.rbegin(), oid, crf.executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB); } void createDirectory(const CanonPath & path) override { auto pathComponents = tokenizeString<std::vector<std::string>>(path.rel(), "/"); (void) prepareDirs(pathComponents, true); } void createSymlink(const CanonPath & path, const std::string & target) override { auto pathComponents = tokenizeString<std::vector<std::string>>(path.rel(), "/"); if (!prepareDirs(pathComponents, false)) return; git_oid oid; if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) throw Error("creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); addToTree(*pathComponents.rbegin(), oid, GIT_FILEMODE_LINK); } void createHardlink(const CanonPath & path, const CanonPath & target) override { std::vector<std::string> pathComponents; for (auto & c : path) pathComponents.emplace_back(c); if (!prepareDirs(pathComponents, false)) return; // We can't just look up the path from the start of the root, since // some parent directories may not have finished yet, so we compute // a relative path that helps us find the right git_tree_builder or object. auto relTarget = CanonPath(path).parent()->makeRelative(target); auto dir = pendingDirs.rbegin(); // For each ../ component at the start, go up one directory. // CanonPath::makeRelative() always puts all .. elements at the start, // so they're all handled by this loop: std::string_view relTargetLeft(relTarget); while (hasPrefix(relTargetLeft, "../")) { if (dir == pendingDirs.rend()) throw Error("invalid hard link target '%s' for path '%s'", target, path); ++dir; relTargetLeft = relTargetLeft.substr(3); } if (dir == pendingDirs.rend()) throw Error("invalid hard link target '%s' for path '%s'", target, path); // Look up the remainder of the target, starting at the // top-most `git_treebuilder`. std::variant<git_treebuilder *, git_oid> curDir{dir->builder.get()}; Object tree; // needed to keep `entry` alive const git_tree_entry * entry = nullptr; for (auto & c : CanonPath(relTargetLeft)) { if (auto builder = std::get_if<git_treebuilder *>(&curDir)) { assert(*builder); if (!(entry = git_treebuilder_get(*builder, std::string(c).c_str()))) throw Error("cannot find hard link target '%s' for path '%s'", target, path); curDir = *git_tree_entry_id(entry); } else if (auto oid = std::get_if<git_oid>(&curDir)) { tree = lookupObject(*repo, *oid, GIT_OBJECT_TREE); if (!(entry = git_tree_entry_byname((const git_tree *) &*tree, std::string(c).c_str()))) throw Error("cannot find hard link target '%s' for path '%s'", target, path); curDir = *git_tree_entry_id(entry); } } assert(entry); addToTree(*pathComponents.rbegin(), *git_tree_entry_id(entry), git_tree_entry_filemode(entry)); } Hash flush() override { updateBuilders({}); auto [oid, _name] = popBuilder(); repo->flush(); return toHash(oid); } }; ref<GitSourceAccessor> GitRepoImpl::getRawAccessor(const Hash & rev) { auto self = ref<GitRepoImpl>(shared_from_this()); return make_ref<GitSourceAccessor>(self, rev); } ref<SourceAccessor> GitRepoImpl::getAccessor(const Hash & rev, bool exportIgnore) { auto self = ref<GitRepoImpl>(shared_from_this()); ref<GitSourceAccessor> rawGitAccessor = getRawAccessor(rev); if (exportIgnore) { return make_ref<GitExportIgnoreSourceAccessor>(self, rawGitAccessor, rev); } else { return rawGitAccessor; } } ref<SourceAccessor> GitRepoImpl::getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError makeNotAllowedError) { auto self = ref<GitRepoImpl>(shared_from_this()); /* In case of an empty workdir, return an empty in-memory tree. We cannot use AllowListSourceAccessor because it would return an error for the root (and we can't add the root to the allow-list since that would allow access to all its children). */ ref<SourceAccessor> fileAccessor = wd.files.empty() ? makeEmptySourceAccessor() : AllowListSourceAccessor::create( makeFSSourceAccessor(path), std::set<CanonPath> { wd.files }, std::move(makeNotAllowedError)).cast<SourceAccessor>(); if (exportIgnore) return make_ref<GitExportIgnoreSourceAccessor>(self, fileAccessor, std::nullopt); else return fileAccessor; } ref<GitFileSystemObjectSink> GitRepoImpl::getFileSystemObjectSink() { return make_ref<GitFileSystemObjectSinkImpl>(ref<GitRepoImpl>(shared_from_this())); } std::vector<std::tuple<GitRepoImpl::Submodule, Hash>> GitRepoImpl::getSubmodules(const Hash & rev, bool exportIgnore) { /* Read the .gitmodules files from this revision. */ CanonPath modulesFile(".gitmodules"); auto accessor = getAccessor(rev, exportIgnore); if (!accessor->pathExists(modulesFile)) return {}; /* Parse it and get the revision of each submodule. */ auto configS = accessor->readFile(modulesFile); auto [fdTemp, pathTemp] = createTempFile("nix-git-submodules"); writeFull(fdTemp.get(), configS); std::vector<std::tuple<Submodule, Hash>> result; auto rawAccessor = getRawAccessor(rev); for (auto & submodule : parseSubmodules(pathTemp)) { /* Filter out .gitmodules entries that don't exist or are not submodules. */ if (auto rev = rawAccessor->getSubmoduleRev(submodule.path)) result.push_back({std::move(submodule), *rev}); } return result; } ref<GitRepo> getTarballCache() { static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache"; return GitRepo::openRepo(repoDir, true, true); } }
44,371
C++
.cc
1,043
33.917546
137
0.605925
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,070
filtering-source-accessor.cc
NixOS_nix/src/libfetchers/filtering-source-accessor.cc
#include "filtering-source-accessor.hh" namespace nix { std::optional<std::filesystem::path> FilteringSourceAccessor::getPhysicalPath(const CanonPath & path) { checkAccess(path); return next->getPhysicalPath(prefix / path); } std::string FilteringSourceAccessor::readFile(const CanonPath & path) { checkAccess(path); return next->readFile(prefix / path); } bool FilteringSourceAccessor::pathExists(const CanonPath & path) { return isAllowed(path) && next->pathExists(prefix / path); } std::optional<SourceAccessor::Stat> FilteringSourceAccessor::maybeLstat(const CanonPath & path) { checkAccess(path); return next->maybeLstat(prefix / path); } SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path) { checkAccess(path); DirEntries entries; for (auto & entry : next->readDirectory(prefix / path)) { if (isAllowed(path / entry.first)) entries.insert(std::move(entry)); } return entries; } std::string FilteringSourceAccessor::readLink(const CanonPath & path) { checkAccess(path); return next->readLink(prefix / path); } std::string FilteringSourceAccessor::showPath(const CanonPath & path) { return displayPrefix + next->showPath(prefix / path) + displaySuffix; } void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) throw makeNotAllowedError ? makeNotAllowedError(path) : RestrictedPathError("access to path '%s' is forbidden", showPath(path)); } struct AllowListSourceAccessorImpl : AllowListSourceAccessor { std::set<CanonPath> allowedPrefixes; AllowListSourceAccessorImpl( ref<SourceAccessor> next, std::set<CanonPath> && allowedPrefixes, MakeNotAllowedError && makeNotAllowedError) : AllowListSourceAccessor(SourcePath(next), std::move(makeNotAllowedError)) , allowedPrefixes(std::move(allowedPrefixes)) { } bool isAllowed(const CanonPath & path) override { return path.isAllowed(allowedPrefixes); } void allowPrefix(CanonPath prefix) override { allowedPrefixes.insert(std::move(prefix)); } }; ref<AllowListSourceAccessor> AllowListSourceAccessor::create( ref<SourceAccessor> next, std::set<CanonPath> && allowedPrefixes, MakeNotAllowedError && makeNotAllowedError) { return make_ref<AllowListSourceAccessorImpl>(next, std::move(allowedPrefixes), std::move(makeNotAllowedError)); } bool CachingFilteringSourceAccessor::isAllowed(const CanonPath & path) { auto i = cache.find(path); if (i != cache.end()) return i->second; auto res = isAllowedUncached(path); cache.emplace(path, res); return res; } }
2,735
C++
.cc
82
29.182927
115
0.736443
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,071
path.cc
NixOS_nix/src/libfetchers/path.cc
#include "fetchers.hh" #include "store-api.hh" #include "archive.hh" #include "store-path-accessor.hh" namespace nix::fetchers { struct PathInputScheme : InputScheme { std::optional<Input> inputFromURL( const Settings & settings, const ParsedURL & url, bool requireTree) const override { if (url.scheme != "path") return {}; if (url.authority && *url.authority != "") throw Error("path URL '%s' should not have an authority ('%s')", url.url, *url.authority); Input input{settings}; input.attrs.insert_or_assign("type", "path"); input.attrs.insert_or_assign("path", url.path); for (auto & [name, value] : url.query) if (name == "rev" || name == "narHash") input.attrs.insert_or_assign(name, value); else if (name == "revCount" || name == "lastModified") { if (auto n = string2Int<uint64_t>(value)) input.attrs.insert_or_assign(name, *n); else throw Error("path URL '%s' has invalid parameter '%s'", url.to_string(), name); } else throw Error("path URL '%s' has unsupported parameter '%s'", url.to_string(), name); return input; } std::string_view schemeName() const override { return "path"; } StringSet allowedAttrs() const override { return { "path", /* Allow the user to pass in "fake" tree info attributes. This is useful for making a pinned tree work the same as the repository from which is exported (e.g. path:/nix/store/...-source?lastModified=1585388205&rev=b0c285...). */ "rev", "revCount", "lastModified", "narHash", }; } std::optional<Input> inputFromAttrs( const Settings & settings, const Attrs & attrs) const override { getStrAttr(attrs, "path"); Input input{settings}; input.attrs = attrs; return input; } ParsedURL toURL(const Input & input) const override { auto query = attrsToQuery(input.attrs); query.erase("path"); query.erase("type"); query.erase("__final"); return ParsedURL { .scheme = "path", .path = getStrAttr(input.attrs, "path"), .query = query, }; } std::optional<Path> getSourcePath(const Input & input) const override { return getStrAttr(input.attrs, "path"); } void putFile( const Input & input, const CanonPath & path, std::string_view contents, std::optional<std::string> commitMsg) const override { writeFile((CanonPath(getAbsPath(input)) / path).abs(), contents); } std::optional<std::string> isRelative(const Input & input) const { auto path = getStrAttr(input.attrs, "path"); if (hasPrefix(path, "/")) return std::nullopt; else return path; } bool isLocked(const Input & input) const override { return (bool) input.getNarHash(); } CanonPath getAbsPath(const Input & input) const { auto path = getStrAttr(input.attrs, "path"); if (path[0] == '/') return CanonPath(path); throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string()); } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { Input input(_input); std::string absPath; auto path = getStrAttr(input.attrs, "path"); if (path[0] != '/') { if (!input.parent) throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string()); auto parent = canonPath(*input.parent); // the path isn't relative, prefix it absPath = nix::absPath(path, parent); // for security, ensure that if the parent is a store path, it's inside it if (store->isInStore(parent)) { auto storePath = store->printStorePath(store->toStorePath(parent).first); if (!isDirOrInDir(absPath, storePath)) throw BadStorePath("relative path '%s' points outside of its parent's store path '%s'", path, storePath); } } else absPath = path; Activity act(*logger, lvlTalkative, actUnknown, fmt("copying '%s'", absPath)); // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath); if (storePath) store->addTempRoot(*storePath); time_t mtime = 0; if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { // FIXME: try to substitute storePath. auto src = sinkToSource([&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath, sink, defaultPathFilter); }); storePath = store->addToStoreFromDump(*src, "source"); } /* Trust the lastModified value supplied by the user, if any. It's not a "secure" attribute so we don't care. */ if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); return {makeStorePathAccessor(store, *storePath), std::move(input)}; } std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override { if (isRelative(input)) return std::nullopt; /* If this path is in the Nix store, use the hash of the store object and the subpath. */ auto path = getAbsPath(input); try { auto [storePath, subPath] = store->toStorePath(path.abs()); auto info = store->queryPathInfo(storePath); return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); } catch (Error &) { return std::nullopt; } } std::optional<ExperimentalFeature> experimentalFeature() const override { return Xp::Flakes; } }; static auto rPathInputScheme = OnStartup([] { registerInputScheme(std::make_unique<PathInputScheme>()); }); }
6,391
C++
.cc
161
30.236025
125
0.582473
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,072
git.cc
NixOS_nix/src/libfetchers/git.cc
#include "error.hh" #include "fetchers.hh" #include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" #include "store-api.hh" #include "url-parts.hh" #include "pathlocks.hh" #include "processes.hh" #include "git.hh" #include "mounted-source-accessor.hh" #include "git-utils.hh" #include "logging.hh" #include "finally.hh" #include "fetch-settings.hh" #include "json-utils.hh" #include <regex> #include <string.h> #include <sys/time.h> #ifndef _WIN32 # include <sys/wait.h> #endif using namespace std::string_literals; namespace nix::fetchers { namespace { // Explicit initial branch of our bare repo to suppress warnings from new version of git. // The value itself does not matter, since we always fetch a specific revision or branch. // It is set with `-c init.defaultBranch=` instead of `--initial-branch=` to stay compatible with // old version of git, which will ignore unrecognized `-c` options. const std::string gitInitialBranch = "__nix_dummy_branch"; bool isCacheFileWithinTtl(time_t now, const struct stat & st) { return st.st_mtime + settings.tarballTtl > now; } Path getCachePath(std::string_view key, bool shallow) { return getCacheDir() + "/gitv3/" + hashString(HashAlgorithm::SHA256, key).to_string(HashFormat::Nix32, false) + (shallow ? "-shallow" : ""); } // Returns the name of the HEAD branch. // // Returns the head branch name as reported by git ls-remote --symref, e.g., if // ls-remote returns the output below, "main" is returned based on the ref line. // // ref: refs/heads/main HEAD // ... std::optional<std::string> readHead(const Path & path) { auto [status, output] = runProgram(RunOptions { .program = "git", // FIXME: use 'HEAD' to avoid returning all refs .args = {"ls-remote", "--symref", path}, .isInteractive = true, }); if (status != 0) return std::nullopt; std::string_view line = output; line = line.substr(0, line.find("\n")); if (const auto parseResult = git::parseLsRemoteLine(line)) { switch (parseResult->kind) { case git::LsRemoteRefLine::Kind::Symbolic: debug("resolved HEAD ref '%s' for repo '%s'", parseResult->target, path); break; case git::LsRemoteRefLine::Kind::Object: debug("resolved HEAD rev '%s' for repo '%s'", parseResult->target, path); break; } return parseResult->target; } return std::nullopt; } // Persist the HEAD ref from the remote repo in the local cached repo. bool storeCachedHead(const std::string & actualUrl, const std::string & headRef) { // set shallow=false as HEAD will never be queried for a shallow repo Path cacheDir = getCachePath(actualUrl, false); try { runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef }); } catch (ExecError &e) { if ( #ifndef WIN32 // TODO abstract over exit status handling on Windows !WIFEXITED(e.status) #else e.status != 0 #endif ) throw; return false; } /* No need to touch refs/HEAD, because `git symbolic-ref` updates the mtime. */ return true; } std::optional<std::string> readHeadCached(const std::string & actualUrl) { // Create a cache path to store the branch of the HEAD ref. Append something // in front of the URL to prevent collision with the repository itself. // set shallow=false as HEAD will never be queried for a shallow repo Path cacheDir = getCachePath(actualUrl, false); Path headRefFile = cacheDir + "/HEAD"; time_t now = time(0); struct stat st; std::optional<std::string> cachedRef; if (stat(headRefFile.c_str(), &st) == 0) { cachedRef = readHead(cacheDir); if (cachedRef != std::nullopt && *cachedRef != gitInitialBranch && isCacheFileWithinTtl(now, st)) { debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl); return cachedRef; } } auto ref = readHead(actualUrl); if (ref) return ref; if (cachedRef) { // If the cached git ref is expired in fetch() below, and the 'git fetch' // fails, it falls back to continuing with the most recent version. // This function must behave the same way, so we return the expired // cached ref here. warn("could not get HEAD ref for repository '%s'; using expired cached ref '%s'", actualUrl, *cachedRef); return *cachedRef; } return std::nullopt; } std::vector<PublicKey> getPublicKeys(const Attrs & attrs) { std::vector<PublicKey> publicKeys; if (attrs.contains("publicKeys")) { auto pubKeysJson = nlohmann::json::parse(getStrAttr(attrs, "publicKeys")); auto & pubKeys = getArray(pubKeysJson); for (auto & key : pubKeys) { publicKeys.push_back(key); } } if (attrs.contains("publicKey")) publicKeys.push_back(PublicKey{maybeGetStrAttr(attrs, "keytype").value_or("ssh-ed25519"),getStrAttr(attrs, "publicKey")}); return publicKeys; } } // end namespace static const Hash nullRev{HashAlgorithm::SHA1}; struct GitInputScheme : InputScheme { std::optional<Input> inputFromURL( const Settings & settings, const ParsedURL & url, bool requireTree) const override { if (url.scheme != "git" && url.scheme != "git+http" && url.scheme != "git+https" && url.scheme != "git+ssh" && url.scheme != "git+file") return {}; auto url2(url); if (hasPrefix(url2.scheme, "git+")) url2.scheme = std::string(url2.scheme, 4); url2.query.clear(); Attrs attrs; attrs.emplace("type", "git"); for (auto & [name, value] : url.query) { if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys") attrs.emplace(name, value); else if (name == "shallow" || name == "submodules" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit") attrs.emplace(name, Explicit<bool> { value == "1" }); else url2.query.emplace(name, value); } attrs.emplace("url", url2.to_string()); return inputFromAttrs(settings, attrs); } std::string_view schemeName() const override { return "git"; } StringSet allowedAttrs() const override { return { "url", "ref", "rev", "shallow", "submodules", "exportIgnore", "lastModified", "revCount", "narHash", "allRefs", "name", "dirtyRev", "dirtyShortRev", "verifyCommit", "keytype", "publicKey", "publicKeys", }; } std::optional<Input> inputFromAttrs( const Settings & settings, const Attrs & attrs) const override { for (auto & [name, _] : attrs) if (name == "verifyCommit" || name == "keytype" || name == "publicKey" || name == "publicKeys") experimentalFeatureSettings.require(Xp::VerifiedFetches); maybeGetBoolAttr(attrs, "verifyCommit"); if (auto ref = maybeGetStrAttr(attrs, "ref")) { if (std::regex_search(*ref, badGitRefRegex)) throw BadURL("invalid Git branch/tag name '%s'", *ref); } Input input{settings}; input.attrs = attrs; auto url = fixGitURL(getStrAttr(attrs, "url")); parseURL(url); input.attrs["url"] = url; getShallowAttr(input); getSubmodulesAttr(input); getAllRefsAttr(input); return input; } ParsedURL toURL(const Input & input) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); if (url.scheme != "git") url.scheme = "git+" + url.scheme; if (auto rev = input.getRev()) url.query.insert_or_assign("rev", rev->gitRev()); if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); if (getSubmodulesAttr(input)) url.query.insert_or_assign("submodules", "1"); if (maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false)) url.query.insert_or_assign("exportIgnore", "1"); if (maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(false)) url.query.insert_or_assign("verifyCommit", "1"); auto publicKeys = getPublicKeys(input.attrs); if (publicKeys.size() == 1) { url.query.insert_or_assign("keytype", publicKeys.at(0).type); url.query.insert_or_assign("publicKey", publicKeys.at(0).key); } else if (publicKeys.size() > 1) url.query.insert_or_assign("publicKeys", publicKeys_to_string(publicKeys)); return url; } Input applyOverrides( const Input & input, std::optional<std::string> ref, std::optional<Hash> rev) const override { auto res(input); if (rev) res.attrs.insert_or_assign("rev", rev->gitRev()); if (ref) res.attrs.insert_or_assign("ref", *ref); if (!res.getRef() && res.getRev()) throw Error("Git input '%s' has a commit hash but no branch/tag name", res.to_string()); return res; } void clone(const Input & input, const Path & destDir) const override { auto repoInfo = getRepoInfo(input); Strings args = {"clone"}; args.push_back(repoInfo.url); if (auto ref = input.getRef()) { args.push_back("--branch"); args.push_back(*ref); } if (input.getRev()) throw UnimplementedError("cloning a specific revision is not implemented"); args.push_back(destDir); runProgram("git", true, args, {}, true); } std::optional<Path> getSourcePath(const Input & input) const override { auto repoInfo = getRepoInfo(input); if (repoInfo.isLocal) return repoInfo.url; return std::nullopt; } void putFile( const Input & input, const CanonPath & path, std::string_view contents, std::optional<std::string> commitMsg) const override { auto repoInfo = getRepoInfo(input); if (!repoInfo.isLocal) throw Error("cannot commit '%s' to Git repository '%s' because it's not a working tree", path, input.to_string()); writeFile((CanonPath(repoInfo.url) / path).abs(), contents); auto result = runProgram(RunOptions { .program = "git", .args = {"-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "check-ignore", "--quiet", std::string(path.rel())}, }); auto exitCode = #ifndef WIN32 // TODO abstract over exit status handling on Windows WEXITSTATUS(result.first) #else result.first #endif ; if (exitCode != 0) { // The path is not `.gitignore`d, we can add the file. runProgram("git", true, { "-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "add", "--intent-to-add", "--", std::string(path.rel()) }); if (commitMsg) { // Pause the logger to allow for user input (such as a gpg passphrase) in `git commit` logger->pause(); Finally restoreLogger([]() { logger->resume(); }); runProgram("git", true, { "-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "commit", std::string(path.rel()), "-F", "-" }, *commitMsg); } } } struct RepoInfo { /* Whether this is a local, non-bare repository. */ bool isLocal = false; /* Working directory info: the complete list of files, and whether the working directory is dirty compared to HEAD. */ GitRepo::WorkdirInfo workdirInfo; /* URL of the repo, or its path if isLocal. Never a `file` URL. */ std::string url; void warnDirty(const Settings & settings) const { if (workdirInfo.isDirty) { if (!settings.allowDirty) throw Error("Git tree '%s' is dirty", url); if (settings.warnDirty) warn("Git tree '%s' is dirty", url); } } std::string gitDir = ".git"; }; bool getShallowAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "shallow").value_or(false); } bool getSubmodulesAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "submodules").value_or(false); } bool getExportIgnoreAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false); } bool getAllRefsAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "allRefs").value_or(false); } RepoInfo getRepoInfo(const Input & input) const { auto checkHashAlgorithm = [&](const std::optional<Hash> & hash) { if (hash.has_value() && !(hash->algo == HashAlgorithm::SHA1 || hash->algo == HashAlgorithm::SHA256)) throw Error("Hash '%s' is not supported by Git. Supported types are sha1 and sha256.", hash->to_string(HashFormat::Base16, true)); }; if (auto rev = input.getRev()) checkHashAlgorithm(rev); RepoInfo repoInfo; // file:// URIs are normally not cloned (but otherwise treated the // same as remote URIs, i.e. we don't use the working tree or // HEAD). Exception: If _NIX_FORCE_HTTP is set, or the repo is a bare git // repo, treat as a remote URI to force a clone. static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing auto url = parseURL(getStrAttr(input.attrs, "url")); bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git"); repoInfo.isLocal = url.scheme == "file" && !forceHttp && !isBareRepository; repoInfo.url = repoInfo.isLocal ? url.path : url.base; // If this is a local directory and no ref or revision is // given, then allow the use of an unclean working tree. if (!input.getRef() && !input.getRev() && repoInfo.isLocal) repoInfo.workdirInfo = GitRepo::openRepo(repoInfo.url)->getWorkdirInfo(); return repoInfo; } uint64_t getLastModified(const RepoInfo & repoInfo, const std::string & repoDir, const Hash & rev) const { Cache::Key key{"gitLastModified", {{"rev", rev.gitRev()}}}; auto cache = getCache(); if (auto res = cache->lookup(key)) return getIntAttr(*res, "lastModified"); auto lastModified = GitRepo::openRepo(repoDir)->getLastModified(rev); cache->upsert(key, {{"lastModified", lastModified}}); return lastModified; } uint64_t getRevCount(const RepoInfo & repoInfo, const std::string & repoDir, const Hash & rev) const { Cache::Key key{"gitRevCount", {{"rev", rev.gitRev()}}}; auto cache = getCache(); if (auto revCountAttrs = cache->lookup(key)) return getIntAttr(*revCountAttrs, "revCount"); Activity act(*logger, lvlChatty, actUnknown, fmt("getting Git revision count of '%s'", repoInfo.url)); auto revCount = GitRepo::openRepo(repoDir)->getRevCount(rev); cache->upsert(key, Attrs{{"revCount", revCount}}); return revCount; } std::string getDefaultRef(const RepoInfo & repoInfo) const { auto head = repoInfo.isLocal ? GitRepo::openRepo(repoInfo.url)->getWorkdirRef() : readHeadCached(repoInfo.url); if (!head) { warn("could not read HEAD ref from repo at '%s', using 'master'", repoInfo.url); return "master"; } return *head; } static MakeNotAllowedError makeNotAllowedError(std::string url) { return [url{std::move(url)}](const CanonPath & path) -> RestrictedPathError { if (nix::pathExists(path.abs())) return RestrictedPathError("access to path '%s' is forbidden because it is not under Git control; maybe you should 'git add' it to the repository '%s'?", path, url); else return RestrictedPathError("path '%s' does not exist in Git repository '%s'", path, url); }; } void verifyCommit(const Input & input, std::shared_ptr<GitRepo> repo) const { auto publicKeys = getPublicKeys(input.attrs); auto verifyCommit = maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(!publicKeys.empty()); if (verifyCommit) { if (input.getRev() && repo) repo->verifyCommit(*input.getRev(), publicKeys); else throw Error("commit verification is required for Git repository '%s', but it's dirty", input.to_string()); } } std::pair<ref<SourceAccessor>, Input> getAccessorFromCommit( ref<Store> store, RepoInfo & repoInfo, Input && input) const { assert(!repoInfo.workdirInfo.isDirty); auto origRev = input.getRev(); std::string name = input.getName(); auto originalRef = input.getRef(); auto ref = originalRef ? *originalRef : getDefaultRef(repoInfo); input.attrs.insert_or_assign("ref", ref); Path repoDir; if (repoInfo.isLocal) { repoDir = repoInfo.url; if (!input.getRev()) input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir)->resolveRef(ref).gitRev()); } else { Path cacheDir = getCachePath(repoInfo.url, getShallowAttr(input)); repoDir = cacheDir; repoInfo.gitDir = "."; createDirs(dirOf(cacheDir)); PathLocks cacheDirLock({cacheDir}); auto repo = GitRepo::openRepo(cacheDir, true, true); // We need to set the origin so resolving submodule URLs works repo->setRemote("origin", repoInfo.url); Path localRefFile = ref.compare(0, 5, "refs/") == 0 ? cacheDir + "/" + ref : cacheDir + "/refs/heads/" + ref; bool doFetch; time_t now = time(0); /* If a rev was specified, we need to fetch if it's not in the repo. */ if (auto rev = input.getRev()) { doFetch = !repo->hasObject(*rev); } else { if (getAllRefsAttr(input)) { doFetch = true; } else { /* If the local ref is older than ‘tarball-ttl’ seconds, do a git fetch to update the local ref to the remote ref. */ struct stat st; doFetch = stat(localRefFile.c_str(), &st) != 0 || !isCacheFileWithinTtl(now, st); } } if (doFetch) { try { auto fetchRef = getAllRefsAttr(input) ? "refs/*" : input.getRev() ? input.getRev()->gitRev() : ref.compare(0, 5, "refs/") == 0 ? ref : ref == "HEAD" ? ref : "refs/heads/" + ref; repo->fetch(repoInfo.url, fmt("%s:%s", fetchRef, fetchRef), getShallowAttr(input)); } catch (Error & e) { if (!pathExists(localRefFile)) throw; logError(e.info()); warn("could not update local clone of Git repository '%s'; continuing with the most recent version", repoInfo.url); } try { if (!input.getRev()) setWriteTime(localRefFile, now, now); } catch (Error & e) { warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg); } if (!originalRef && !storeCachedHead(repoInfo.url, ref)) warn("could not update cached head '%s' for '%s'", ref, repoInfo.url); } if (auto rev = input.getRev()) { if (!repo->hasObject(*rev)) throw Error( "Cannot find Git revision '%s' in ref '%s' of repository '%s'! " "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the " ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".", rev->gitRev(), ref, repoInfo.url ); } else input.attrs.insert_or_assign("rev", repo->resolveRef(ref).gitRev()); // cache dir lock is removed at scope end; we will only use read-only operations on specific revisions in the remainder } auto repo = GitRepo::openRepo(repoDir); auto isShallow = repo->isShallow(); if (isShallow && !getShallowAttr(input)) throw Error("'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", repoInfo.url); // FIXME: check whether rev is an ancestor of ref? auto rev = *input.getRev(); Attrs infoAttrs({ {"rev", rev.gitRev()}, {"lastModified", getLastModified(repoInfo, repoDir, rev)}, }); if (!getShallowAttr(input)) infoAttrs.insert_or_assign("revCount", getRevCount(repoInfo, repoDir, rev)); printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.url); verifyCommit(input, repo); bool exportIgnore = getExportIgnoreAttr(input); auto accessor = repo->getAccessor(rev, exportIgnore); accessor->setPathDisplay("«" + input.to_string() + "»"); /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level repo and the accessors for the submodules. */ if (getSubmodulesAttr(input)) { std::map<CanonPath, nix::ref<SourceAccessor>> mounts; for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, exportIgnore)) { auto resolved = repo->resolveSubmoduleUrl(submodule.url); debug("Git submodule %s: %s %s %s -> %s", submodule.path, submodule.url, submodule.branch, submoduleRev.gitRev(), resolved); fetchers::Attrs attrs; attrs.insert_or_assign("type", "git"); attrs.insert_or_assign("url", resolved); if (submodule.branch != "") attrs.insert_or_assign("ref", submodule.branch); attrs.insert_or_assign("rev", submoduleRev.gitRev()); attrs.insert_or_assign("exportIgnore", Explicit<bool>{ exportIgnore }); attrs.insert_or_assign("submodules", Explicit<bool>{ true }); attrs.insert_or_assign("allRefs", Explicit<bool>{ true }); auto submoduleInput = fetchers::Input::fromAttrs(*input.settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(store); submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); mounts.insert_or_assign(submodule.path, submoduleAccessor); } if (!mounts.empty()) { mounts.insert_or_assign(CanonPath::root, accessor); accessor = makeMountedSourceAccessor(std::move(mounts)); } } assert(!origRev || origRev == rev); if (!getShallowAttr(input)) input.attrs.insert_or_assign("revCount", getIntAttr(infoAttrs, "revCount")); input.attrs.insert_or_assign("lastModified", getIntAttr(infoAttrs, "lastModified")); return {accessor, std::move(input)}; } std::pair<ref<SourceAccessor>, Input> getAccessorFromWorkdir( ref<Store> store, RepoInfo & repoInfo, Input && input) const { if (getSubmodulesAttr(input)) /* Create mountpoints for the submodules. */ for (auto & submodule : repoInfo.workdirInfo.submodules) repoInfo.workdirInfo.files.insert(submodule.path); auto repo = GitRepo::openRepo(repoInfo.url, false, false); auto exportIgnore = getExportIgnoreAttr(input); ref<SourceAccessor> accessor = repo->getAccessor(repoInfo.workdirInfo, exportIgnore, makeNotAllowedError(repoInfo.url)); accessor->setPathDisplay(repoInfo.url); /* If the repo has submodules, return a mounted input accessor consisting of the accessor for the top-level repo and the accessors for the submodule workdirs. */ if (getSubmodulesAttr(input) && !repoInfo.workdirInfo.submodules.empty()) { std::map<CanonPath, nix::ref<SourceAccessor>> mounts; for (auto & submodule : repoInfo.workdirInfo.submodules) { auto submodulePath = CanonPath(repoInfo.url) / submodule.path; fetchers::Attrs attrs; attrs.insert_or_assign("type", "git"); attrs.insert_or_assign("url", submodulePath.abs()); attrs.insert_or_assign("exportIgnore", Explicit<bool>{ exportIgnore }); attrs.insert_or_assign("submodules", Explicit<bool>{ true }); // TODO: fall back to getAccessorFromCommit-like fetch when submodules aren't checked out // attrs.insert_or_assign("allRefs", Explicit<bool>{ true }); auto submoduleInput = fetchers::Input::fromAttrs(*input.settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(store); submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); /* If the submodule is dirty, mark this repo dirty as well. */ if (!submoduleInput2.getRev()) repoInfo.workdirInfo.isDirty = true; mounts.insert_or_assign(submodule.path, submoduleAccessor); } mounts.insert_or_assign(CanonPath::root, accessor); accessor = makeMountedSourceAccessor(std::move(mounts)); } if (!repoInfo.workdirInfo.isDirty) { auto repo = GitRepo::openRepo(repoInfo.url); if (auto ref = repo->getWorkdirRef()) input.attrs.insert_or_assign("ref", *ref); /* Return a rev of 000... if there are no commits yet. */ auto rev = repoInfo.workdirInfo.headRev.value_or(nullRev); input.attrs.insert_or_assign("rev", rev.gitRev()); input.attrs.insert_or_assign("revCount", rev == nullRev ? 0 : getRevCount(repoInfo, repoInfo.url, rev)); verifyCommit(input, repo); } else { repoInfo.warnDirty(*input.settings); if (repoInfo.workdirInfo.headRev) { input.attrs.insert_or_assign("dirtyRev", repoInfo.workdirInfo.headRev->gitRev() + "-dirty"); input.attrs.insert_or_assign("dirtyShortRev", repoInfo.workdirInfo.headRev->gitShortRev() + "-dirty"); } verifyCommit(input, nullptr); } input.attrs.insert_or_assign( "lastModified", repoInfo.workdirInfo.headRev ? getLastModified(repoInfo, repoInfo.url, *repoInfo.workdirInfo.headRev) : 0); return {accessor, std::move(input)}; } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { Input input(_input); auto repoInfo = getRepoInfo(input); if (getExportIgnoreAttr(input) && getSubmodulesAttr(input)) { /* In this situation, we don't have a git CLI behavior that we can copy. `git archive` does not support submodules, so it is unclear whether rules from the parent should affect the submodule or not. When git may eventually implement this, we need Nix to match its behavior. */ throw UnimplementedError("exportIgnore and submodules are not supported together yet"); } auto [accessor, final] = input.getRef() || input.getRev() || !repoInfo.isLocal ? getAccessorFromCommit(store, repoInfo, std::move(input)) : getAccessorFromWorkdir(store, repoInfo, std::move(input)); return {accessor, std::move(final)}; } std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override { if (auto rev = input.getRev()) return rev->gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : ""); else return std::nullopt; } bool isLocked(const Input & input) const override { return (bool) input.getRev(); } }; static auto rGitInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitInputScheme>()); }); }
30,058
C++
.cc
667
34.602699
181
0.585545
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,073
attrs.cc
NixOS_nix/src/libfetchers/attrs.cc
#include "attrs.hh" #include "fetchers.hh" #include <nlohmann/json.hpp> namespace nix::fetchers { Attrs jsonToAttrs(const nlohmann::json & json) { Attrs attrs; for (auto & i : json.items()) { if (i.value().is_number()) attrs.emplace(i.key(), i.value().get<uint64_t>()); else if (i.value().is_string()) attrs.emplace(i.key(), i.value().get<std::string>()); else if (i.value().is_boolean()) attrs.emplace(i.key(), Explicit<bool> { i.value().get<bool>() }); else throw Error("unsupported input attribute type in lock file"); } return attrs; } nlohmann::json attrsToJSON(const Attrs & attrs) { nlohmann::json json; for (auto & attr : attrs) { if (auto v = std::get_if<uint64_t>(&attr.second)) { json[attr.first] = *v; } else if (auto v = std::get_if<std::string>(&attr.second)) { json[attr.first] = *v; } else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) { json[attr.first] = v->t; } else unreachable(); } return json; } std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::string & name) { auto i = attrs.find(name); if (i == attrs.end()) return {}; if (auto v = std::get_if<std::string>(&i->second)) return *v; throw Error("input attribute '%s' is not a string %s", name, attrsToJSON(attrs).dump()); } std::string getStrAttr(const Attrs & attrs, const std::string & name) { auto s = maybeGetStrAttr(attrs, name); if (!s) throw Error("input attribute '%s' is missing", name); return *s; } std::optional<uint64_t> maybeGetIntAttr(const Attrs & attrs, const std::string & name) { auto i = attrs.find(name); if (i == attrs.end()) return {}; if (auto v = std::get_if<uint64_t>(&i->second)) return *v; throw Error("input attribute '%s' is not an integer", name); } uint64_t getIntAttr(const Attrs & attrs, const std::string & name) { auto s = maybeGetIntAttr(attrs, name); if (!s) throw Error("input attribute '%s' is missing", name); return *s; } std::optional<bool> maybeGetBoolAttr(const Attrs & attrs, const std::string & name) { auto i = attrs.find(name); if (i == attrs.end()) return {}; if (auto v = std::get_if<Explicit<bool>>(&i->second)) return v->t; throw Error("input attribute '%s' is not a Boolean", name); } bool getBoolAttr(const Attrs & attrs, const std::string & name) { auto s = maybeGetBoolAttr(attrs, name); if (!s) throw Error("input attribute '%s' is missing", name); return *s; } std::map<std::string, std::string> attrsToQuery(const Attrs & attrs) { std::map<std::string, std::string> query; for (auto & attr : attrs) { if (auto v = std::get_if<uint64_t>(&attr.second)) { query.insert_or_assign(attr.first, fmt("%d", *v)); } else if (auto v = std::get_if<std::string>(&attr.second)) { query.insert_or_assign(attr.first, *v); } else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) { query.insert_or_assign(attr.first, v->t ? "1" : "0"); } else unreachable(); } return query; } Hash getRevAttr(const Attrs & attrs, const std::string & name) { return Hash::parseAny(getStrAttr(attrs, name), HashAlgorithm::SHA1); } }
3,387
C++
.cc
97
29.639175
92
0.607634
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,074
indirect.cc
NixOS_nix/src/libfetchers/indirect.cc
#include "fetchers.hh" #include "url-parts.hh" #include "path.hh" namespace nix::fetchers { std::regex flakeRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript); struct IndirectInputScheme : InputScheme { std::optional<Input> inputFromURL( const Settings & settings, const ParsedURL & url, bool requireTree) const override { if (url.scheme != "flake") return {}; auto path = tokenizeString<std::vector<std::string>>(url.path, "/"); std::optional<Hash> rev; std::optional<std::string> ref; if (path.size() == 1) { } else if (path.size() == 2) { if (std::regex_match(path[1], revRegex)) rev = Hash::parseAny(path[1], HashAlgorithm::SHA1); else if (std::regex_match(path[1], refRegex)) ref = path[1]; else throw BadURL("in flake URL '%s', '%s' is not a commit hash or branch/tag name", url.url, path[1]); } else if (path.size() == 3) { if (!std::regex_match(path[1], refRegex)) throw BadURL("in flake URL '%s', '%s' is not a branch/tag name", url.url, path[1]); ref = path[1]; if (!std::regex_match(path[2], revRegex)) throw BadURL("in flake URL '%s', '%s' is not a commit hash", url.url, path[2]); rev = Hash::parseAny(path[2], HashAlgorithm::SHA1); } else throw BadURL("GitHub URL '%s' is invalid", url.url); std::string id = path[0]; if (!std::regex_match(id, flakeRegex)) throw BadURL("'%s' is not a valid flake ID", id); // FIXME: forbid query params? Input input{settings}; input.attrs.insert_or_assign("type", "indirect"); input.attrs.insert_or_assign("id", id); if (rev) input.attrs.insert_or_assign("rev", rev->gitRev()); if (ref) input.attrs.insert_or_assign("ref", *ref); return input; } std::string_view schemeName() const override { return "indirect"; } StringSet allowedAttrs() const override { return { "id", "ref", "rev", "narHash", }; } std::optional<Input> inputFromAttrs( const Settings & settings, const Attrs & attrs) const override { auto id = getStrAttr(attrs, "id"); if (!std::regex_match(id, flakeRegex)) throw BadURL("'%s' is not a valid flake ID", id); Input input{settings}; input.attrs = attrs; return input; } ParsedURL toURL(const Input & input) const override { ParsedURL url; url.scheme = "flake"; url.path = getStrAttr(input.attrs, "id"); if (auto ref = input.getRef()) { url.path += '/'; url.path += *ref; }; if (auto rev = input.getRev()) { url.path += '/'; url.path += rev->gitRev(); }; return url; } Input applyOverrides( const Input & _input, std::optional<std::string> ref, std::optional<Hash> rev) const override { auto input(_input); if (rev) input.attrs.insert_or_assign("rev", rev->gitRev()); if (ref) input.attrs.insert_or_assign("ref", *ref); return input; } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & input) const override { throw Error("indirect input '%s' cannot be fetched directly", input.to_string()); } std::optional<ExperimentalFeature> experimentalFeature() const override { return Xp::Flakes; } bool isDirect(const Input & input) const override { return false; } }; static auto rIndirectInputScheme = OnStartup([] { registerInputScheme(std::make_unique<IndirectInputScheme>()); }); }
3,804
C++
.cc
99
30.10101
115
0.575733
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,075
github.cc
NixOS_nix/src/libfetchers/github.cc
#include "filetransfer.hh" #include "cache.hh" #include "globals.hh" #include "store-api.hh" #include "types.hh" #include "url-parts.hh" #include "git.hh" #include "fetchers.hh" #include "fetch-settings.hh" #include "tarball.hh" #include "tarfile.hh" #include "git-utils.hh" #include <optional> #include <nlohmann/json.hpp> #include <fstream> namespace nix::fetchers { struct DownloadUrl { std::string url; Headers headers; }; // A github, gitlab, or sourcehut host const static std::string hostRegexS = "[a-zA-Z0-9.-]*"; // FIXME: check std::regex hostRegex(hostRegexS, std::regex::ECMAScript); struct GitArchiveInputScheme : InputScheme { virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0; std::optional<Input> inputFromURL( const fetchers::Settings & settings, const ParsedURL & url, bool requireTree) const override { if (url.scheme != schemeName()) return {}; auto path = tokenizeString<std::vector<std::string>>(url.path, "/"); std::optional<Hash> rev; std::optional<std::string> ref; std::optional<std::string> host_url; auto size = path.size(); if (size == 3) { if (std::regex_match(path[2], revRegex)) rev = Hash::parseAny(path[2], HashAlgorithm::SHA1); else if (std::regex_match(path[2], refRegex)) ref = path[2]; else throw BadURL("in URL '%s', '%s' is not a commit hash or branch/tag name", url.url, path[2]); } else if (size > 3) { std::string rs; for (auto i = std::next(path.begin(), 2); i != path.end(); i++) { rs += *i; if (std::next(i) != path.end()) { rs += "/"; } } if (std::regex_match(rs, refRegex)) { ref = rs; } else { throw BadURL("in URL '%s', '%s' is not a branch/tag name", url.url, rs); } } else if (size < 2) throw BadURL("URL '%s' is invalid", url.url); for (auto &[name, value] : url.query) { if (name == "rev") { if (rev) throw BadURL("URL '%s' contains multiple commit hashes", url.url); rev = Hash::parseAny(value, HashAlgorithm::SHA1); } else if (name == "ref") { if (!std::regex_match(value, refRegex)) throw BadURL("URL '%s' contains an invalid branch/tag name", url.url); if (ref) throw BadURL("URL '%s' contains multiple branch/tag names", url.url); ref = value; } else if (name == "host") { if (!std::regex_match(value, hostRegex)) throw BadURL("URL '%s' contains an invalid instance host", url.url); host_url = value; } // FIXME: barf on unsupported attributes } if (ref && rev) throw BadURL("URL '%s' contains both a commit hash and a branch/tag name %s %s", url.url, *ref, rev->gitRev()); Input input{settings}; input.attrs.insert_or_assign("type", std::string { schemeName() }); input.attrs.insert_or_assign("owner", path[0]); input.attrs.insert_or_assign("repo", path[1]); if (rev) input.attrs.insert_or_assign("rev", rev->gitRev()); if (ref) input.attrs.insert_or_assign("ref", *ref); if (host_url) input.attrs.insert_or_assign("host", *host_url); auto narHash = url.query.find("narHash"); if (narHash != url.query.end()) input.attrs.insert_or_assign("narHash", narHash->second); return input; } StringSet allowedAttrs() const override { return { "owner", "repo", "ref", "rev", "narHash", "lastModified", "host", "treeHash", }; } std::optional<Input> inputFromAttrs( const fetchers::Settings & settings, const Attrs & attrs) const override { getStrAttr(attrs, "owner"); getStrAttr(attrs, "repo"); Input input{settings}; input.attrs = attrs; return input; } ParsedURL toURL(const Input & input) const override { auto owner = getStrAttr(input.attrs, "owner"); auto repo = getStrAttr(input.attrs, "repo"); auto ref = input.getRef(); auto rev = input.getRev(); auto path = owner + "/" + repo; assert(!(ref && rev)); if (ref) path += "/" + *ref; if (rev) path += "/" + rev->to_string(HashFormat::Base16, false); auto url = ParsedURL { .scheme = std::string { schemeName() }, .path = path, }; if (auto narHash = input.getNarHash()) url.query.insert_or_assign("narHash", narHash->to_string(HashFormat::SRI, true)); return url; } Input applyOverrides( const Input & _input, std::optional<std::string> ref, std::optional<Hash> rev) const override { auto input(_input); if (rev && ref) throw BadURL("cannot apply both a commit hash (%s) and a branch/tag name ('%s') to input '%s'", rev->gitRev(), *ref, input.to_string()); if (rev) { input.attrs.insert_or_assign("rev", rev->gitRev()); input.attrs.erase("ref"); } if (ref) { input.attrs.insert_or_assign("ref", *ref); input.attrs.erase("rev"); } return input; } std::optional<std::string> getAccessToken(const fetchers::Settings & settings, const std::string & host) const { auto tokens = settings.accessTokens.get(); if (auto token = get(tokens, host)) return *token; return {}; } Headers makeHeadersWithAuthTokens( const fetchers::Settings & settings, const std::string & host) const { Headers headers; auto accessToken = getAccessToken(settings, host); if (accessToken) { auto hdr = accessHeaderFromToken(*accessToken); if (hdr) headers.push_back(*hdr); else warn("Unrecognized access token for host '%s'", host); } return headers; } struct RefInfo { Hash rev; std::optional<Hash> treeHash; }; virtual RefInfo getRevFromRef(nix::ref<Store> store, const Input & input) const = 0; virtual DownloadUrl getDownloadUrl(const Input & input) const = 0; struct TarballInfo { Hash treeHash; time_t lastModified; }; std::pair<Input, TarballInfo> downloadArchive(ref<Store> store, Input input) const { if (!maybeGetStrAttr(input.attrs, "ref")) input.attrs.insert_or_assign("ref", "HEAD"); std::optional<Hash> upstreamTreeHash; auto rev = input.getRev(); if (!rev) { auto refInfo = getRevFromRef(store, input); rev = refInfo.rev; upstreamTreeHash = refInfo.treeHash; debug("HEAD revision for '%s' is %s", input.to_string(), refInfo.rev.gitRev()); } input.attrs.erase("ref"); input.attrs.insert_or_assign("rev", rev->gitRev()); auto cache = getCache(); Cache::Key treeHashKey{"gitRevToTreeHash", {{"rev", rev->gitRev()}}}; Cache::Key lastModifiedKey{"gitRevToLastModified", {{"rev", rev->gitRev()}}}; if (auto treeHashAttrs = cache->lookup(treeHashKey)) { if (auto lastModifiedAttrs = cache->lookup(lastModifiedKey)) { auto treeHash = getRevAttr(*treeHashAttrs, "treeHash"); auto lastModified = getIntAttr(*lastModifiedAttrs, "lastModified"); if (getTarballCache()->hasObject(treeHash)) return {std::move(input), TarballInfo { .treeHash = treeHash, .lastModified = (time_t) lastModified }}; else debug("Git tree with hash '%s' has disappeared from the cache, refetching...", treeHash.gitRev()); } } /* Stream the tarball into the tarball cache. */ auto url = getDownloadUrl(input); auto source = sinkToSource([&](Sink & sink) { FileTransferRequest req(url.url); req.headers = url.headers; getFileTransfer()->download(std::move(req), sink); }); auto act = std::make_unique<Activity>(*logger, lvlInfo, actUnknown, fmt("unpacking '%s' into the Git cache", input.to_string())); TarArchive archive { *source }; auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); auto tree = parseSink->flush(); act.reset(); TarballInfo tarballInfo { .treeHash = tarballCache->dereferenceSingletonDirectory(tree), .lastModified = lastModified }; cache->upsert(treeHashKey, Attrs{{"treeHash", tarballInfo.treeHash.gitRev()}}); cache->upsert(lastModifiedKey, Attrs{{"lastModified", (uint64_t) tarballInfo.lastModified}}); #if 0 if (upstreamTreeHash != tarballInfo.treeHash) warn( "Git tree hash mismatch for revision '%s' of '%s': " "expected '%s', got '%s'. " "This can happen if the Git repository uses submodules.", rev->gitRev(), input.to_string(), upstreamTreeHash->gitRev(), tarballInfo.treeHash.gitRev()); #endif return {std::move(input), tarballInfo}; } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { auto [input, tarballInfo] = downloadArchive(store, _input); #if 0 input.attrs.insert_or_assign("treeHash", tarballInfo.treeHash.gitRev()); #endif input.attrs.insert_or_assign("lastModified", uint64_t(tarballInfo.lastModified)); auto accessor = getTarballCache()->getAccessor(tarballInfo.treeHash, false); accessor->setPathDisplay("«" + input.to_string() + "»"); return {accessor, input}; } bool isLocked(const Input & input) const override { /* Since we can't verify the integrity of the tarball from the Git revision alone, we also require a NAR hash for locking. FIXME: in the future, we may want to require a Git tree hash instead of a NAR hash. */ return input.getRev().has_value() && (input.settings->trustTarballsFromGitForges || input.getNarHash().has_value()); } std::optional<ExperimentalFeature> experimentalFeature() const override { return Xp::Flakes; } std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override { if (auto rev = input.getRev()) return rev->gitRev(); else return std::nullopt; } }; struct GitHubInputScheme : GitArchiveInputScheme { std::string_view schemeName() const override { return "github"; } std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override { // Github supports PAT/OAuth2 tokens and HTTP Basic // Authentication. The former simply specifies the token, the // latter can use the token as the password. Only the first // is used here. See // https://developer.github.com/v3/#authentication and // https://docs.github.com/en/developers/apps/authorizing-oath-apps return std::pair<std::string, std::string>("Authorization", fmt("token %s", token)); } std::string getHost(const Input & input) const { return maybeGetStrAttr(input.attrs, "host").value_or("github.com"); } std::string getOwner(const Input & input) const { return getStrAttr(input.attrs, "owner"); } std::string getRepo(const Input & input) const { return getStrAttr(input.attrs, "repo"); } RefInfo getRevFromRef(nix::ref<Store> store, const Input & input) const override { auto host = getHost(input); auto url = fmt( host == "github.com" ? "https://api.%s/repos/%s/%s/commits/%s" : "https://%s/api/v3/repos/%s/%s/commits/%s", host, getOwner(input), getRepo(input), *input.getRef()); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); auto json = nlohmann::json::parse( readFile( store->toRealPath( downloadFile(store, url, "source", headers).storePath))); return RefInfo { .rev = Hash::parseAny(std::string { json["sha"] }, HashAlgorithm::SHA1), .treeHash = Hash::parseAny(std::string { json["commit"]["tree"]["sha"] }, HashAlgorithm::SHA1) }; } DownloadUrl getDownloadUrl(const Input & input) const override { auto host = getHost(input); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); // If we have no auth headers then we default to the public archive // urls so we do not run into rate limits. const auto urlFmt = host != "github.com" ? "https://%s/api/v3/repos/%s/%s/tarball/%s" : headers.empty() ? "https://%s/%s/%s/archive/%s.tar.gz" : "https://api.%s/repos/%s/%s/tarball/%s"; const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->to_string(HashFormat::Base16, false)); return DownloadUrl { url, headers }; } void clone(const Input & input, const Path & destDir) const override { auto host = getHost(input); Input::fromURL(*input.settings, fmt("git+https://%s/%s/%s.git", host, getOwner(input), getRepo(input))) .applyOverrides(input.getRef(), input.getRev()) .clone(destDir); } }; struct GitLabInputScheme : GitArchiveInputScheme { std::string_view schemeName() const override { return "gitlab"; } std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override { // Gitlab supports 4 kinds of authorization, two of which are // relevant here: OAuth2 and PAT (Private Access Token). The // user can indicate which token is used by specifying the // token as <TYPE>:<VALUE>, where type is "OAuth2" or "PAT". // If the <TYPE> is unrecognized, this will fall back to // treating this simply has <HDRNAME>:<HDRVAL>. See // https://docs.gitlab.com/12.10/ee/api/README.html#authentication auto fldsplit = token.find_first_of(':'); // n.b. C++20 would allow: if (token.starts_with("OAuth2:")) ... if ("OAuth2" == token.substr(0, fldsplit)) return std::make_pair("Authorization", fmt("Bearer %s", token.substr(fldsplit+1))); if ("PAT" == token.substr(0, fldsplit)) return std::make_pair("Private-token", token.substr(fldsplit+1)); warn("Unrecognized GitLab token type %s", token.substr(0, fldsplit)); return std::make_pair(token.substr(0,fldsplit), token.substr(fldsplit+1)); } RefInfo getRevFromRef(nix::ref<Store> store, const Input & input) const override { auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); // See rate limiting note below auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/commits?ref_name=%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); auto json = nlohmann::json::parse( readFile( store->toRealPath( downloadFile(store, url, "source", headers).storePath))); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo { .rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1) }; } if (json.is_array() && json.size() == 0) { throw Error("No commits returned by GitLab API -- does the git ref really exist?"); } else { throw Error("Unexpected response received from GitLab: %s", json); } } DownloadUrl getDownloadUrl(const Input & input) const override { // This endpoint has a rate limit threshold that may be // server-specific and vary based whether the user is // authenticated via an accessToken or not, but the usual rate // is 10 reqs/sec/ip-addr. See // https://docs.gitlab.com/ee/user/gitlab_com/index.html#gitlabcom-specific-rate-limits auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/archive.tar.gz?sha=%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(HashFormat::Base16, false)); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); return DownloadUrl { url, headers }; } void clone(const Input & input, const Path & destDir) const override { auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); // FIXME: get username somewhere Input::fromURL(*input.settings, fmt("git+https://%s/%s/%s.git", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) .applyOverrides(input.getRef(), input.getRev()) .clone(destDir); } }; struct SourceHutInputScheme : GitArchiveInputScheme { std::string_view schemeName() const override { return "sourcehut"; } std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override { // SourceHut supports both PAT and OAuth2. See // https://man.sr.ht/meta.sr.ht/oauth.md return std::pair<std::string, std::string>("Authorization", fmt("Bearer %s", token)); // Note: This currently serves no purpose, as this kind of authorization // does not allow for downloading tarballs on sourcehut private repos. // Once it is implemented, however, should work as expected. } RefInfo getRevFromRef(nix::ref<Store> store, const Input & input) const override { // TODO: In the future, when the sourcehut graphql API is implemented for mercurial // and with anonymous access, this method should use it instead. auto ref = *input.getRef(); auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht"); auto base_url = fmt("https://%s/%s/%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); std::string refUri; if (ref == "HEAD") { auto file = store->toRealPath( downloadFile(store, fmt("%s/HEAD", base_url), "source", headers).storePath); std::ifstream is(file); std::string line; getline(is, line); auto remoteLine = git::parseLsRemoteLine(line); if (!remoteLine) { throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref); } refUri = remoteLine->target; } else { refUri = fmt("refs/(heads|tags)/%s", ref); } std::regex refRegex(refUri); auto file = store->toRealPath( downloadFile(store, fmt("%s/info/refs", base_url), "source", headers).storePath); std::ifstream is(file); std::string line; std::optional<std::string> id; while(!id && getline(is, line)) { auto parsedLine = git::parseLsRemoteLine(line); if (parsedLine && parsedLine->reference && std::regex_match(*parsedLine->reference, refRegex)) id = parsedLine->target; } if (!id) throw BadURL("in '%d', couldn't find ref '%d'", input.to_string(), ref); return RefInfo { .rev = Hash::parseAny(*id, HashAlgorithm::SHA1) }; } DownloadUrl getDownloadUrl(const Input & input) const override { auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht"); auto url = fmt("https://%s/%s/%s/archive/%s.tar.gz", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(HashFormat::Base16, false)); Headers headers = makeHeadersWithAuthTokens(*input.settings, host); return DownloadUrl { url, headers }; } void clone(const Input & input, const Path & destDir) const override { auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht"); Input::fromURL(*input.settings, fmt("git+https://%s/%s/%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) .applyOverrides(input.getRef(), input.getRev()) .clone(destDir); } }; static auto rGitHubInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitHubInputScheme>()); }); static auto rGitLabInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitLabInputScheme>()); }); static auto rSourceHutInputScheme = OnStartup([] { registerInputScheme(std::make_unique<SourceHutInputScheme>()); }); }
21,926
C++
.cc
486
35.679012
123
0.59732
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,076
fetchers.cc
NixOS_nix/src/libfetchers/fetchers.cc
#include "fetchers.hh" #include "store-api.hh" #include "source-path.hh" #include "fetch-to-store.hh" #include "json-utils.hh" #include "store-path-accessor.hh" #include <nlohmann/json.hpp> namespace nix::fetchers { using InputSchemeMap = std::map<std::string_view, std::shared_ptr<InputScheme>>; std::unique_ptr<InputSchemeMap> inputSchemes = nullptr; void registerInputScheme(std::shared_ptr<InputScheme> && inputScheme) { if (!inputSchemes) inputSchemes = std::make_unique<InputSchemeMap>(); auto schemeName = inputScheme->schemeName(); if (inputSchemes->count(schemeName) > 0) throw Error("Input scheme with name %s already registered", schemeName); inputSchemes->insert_or_assign(schemeName, std::move(inputScheme)); } nlohmann::json dumpRegisterInputSchemeInfo() { using nlohmann::json; auto res = json::object(); for (auto & [name, scheme] : *inputSchemes) { auto & r = res[name] = json::object(); r["allowedAttrs"] = scheme->allowedAttrs(); } return res; } Input Input::fromURL( const Settings & settings, const std::string & url, bool requireTree) { return fromURL(settings, parseURL(url), requireTree); } static void fixupInput(Input & input) { // Check common attributes. input.getType(); input.getRef(); input.getRevCount(); input.getLastModified(); } Input Input::fromURL( const Settings & settings, const ParsedURL & url, bool requireTree) { for (auto & [_, inputScheme] : *inputSchemes) { auto res = inputScheme->inputFromURL(settings, url, requireTree); if (res) { experimentalFeatureSettings.require(inputScheme->experimentalFeature()); res->scheme = inputScheme; fixupInput(*res); return std::move(*res); } } throw Error("input '%s' is unsupported", url.url); } Input Input::fromAttrs(const Settings & settings, Attrs && attrs) { auto schemeName = ({ auto schemeNameOpt = maybeGetStrAttr(attrs, "type"); if (!schemeNameOpt) throw Error("'type' attribute to specify input scheme is required but not provided"); *std::move(schemeNameOpt); }); auto raw = [&]() { // Return an input without a scheme; most operations will fail, // but not all of them. Doing this is to support those other // operations which are supposed to be robust on // unknown/uninterpretable inputs. Input input { settings }; input.attrs = attrs; fixupInput(input); return input; }; std::shared_ptr<InputScheme> inputScheme = ({ auto i = inputSchemes->find(schemeName); i == inputSchemes->end() ? nullptr : i->second; }); if (!inputScheme) return raw(); experimentalFeatureSettings.require(inputScheme->experimentalFeature()); auto allowedAttrs = inputScheme->allowedAttrs(); for (auto & [name, _] : attrs) if (name != "type" && name != "__final" && allowedAttrs.count(name) == 0) throw Error("input attribute '%s' not supported by scheme '%s'", name, schemeName); auto res = inputScheme->inputFromAttrs(settings, attrs); if (!res) return raw(); res->scheme = inputScheme; fixupInput(*res); return std::move(*res); } std::optional<std::string> Input::getFingerprint(ref<Store> store) const { return scheme ? scheme->getFingerprint(store, *this) : std::nullopt; } ParsedURL Input::toURL() const { if (!scheme) throw Error("cannot show unsupported input '%s'", attrsToJSON(attrs)); return scheme->toURL(*this); } std::string Input::toURLString(const std::map<std::string, std::string> & extraQuery) const { auto url = toURL(); for (auto & attr : extraQuery) url.query.insert(attr); return url.to_string(); } std::string Input::to_string() const { return toURL().to_string(); } bool Input::isDirect() const { return !scheme || scheme->isDirect(*this); } bool Input::isLocked() const { return scheme && scheme->isLocked(*this); } bool Input::isFinal() const { return maybeGetBoolAttr(attrs, "__final").value_or(false); } Attrs Input::toAttrs() const { return attrs; } bool Input::operator ==(const Input & other) const noexcept { return attrs == other.attrs; } bool Input::contains(const Input & other) const { if (*this == other) return true; auto other2(other); other2.attrs.erase("ref"); other2.attrs.erase("rev"); if (*this == other2) return true; return false; } std::pair<StorePath, Input> Input::fetchToStore(ref<Store> store) const { if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); auto [storePath, input] = [&]() -> std::pair<StorePath, Input> { try { auto [accessor, result] = getAccessorUnchecked(store); auto storePath = nix::fetchToStore(*store, SourcePath(accessor), FetchMode::Copy, result.getName()); auto narHash = store->queryPathInfo(storePath)->narHash; result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); // FIXME: we would like to mark inputs as final in // getAccessorUnchecked(), but then we can't add // narHash. Or maybe narHash should be excluded from the // concept of "final" inputs? result.attrs.insert_or_assign("__final", Explicit<bool>(true)); assert(result.isFinal()); checkLocks(*this, result); return {storePath, result}; } catch (Error & e) { e.addTrace({}, "while fetching the input '%s'", to_string()); throw; } }(); return {std::move(storePath), input}; } void Input::checkLocks(Input specified, Input & result) { /* If the original input is final, then we just return the original attributes, dropping any new fields returned by the fetcher. However, any fields that are in both the specified and result input must be identical. */ if (specified.isFinal()) { /* Backwards compatibility hack: we had some lock files in the past that 'narHash' fields with incorrect base-64 formatting (lacking the trailing '=', e.g. 'sha256-ri...Mw' instead of ''sha256-ri...Mw='). So fix that. */ if (auto prevNarHash = specified.getNarHash()) specified.attrs.insert_or_assign("narHash", prevNarHash->to_string(HashFormat::SRI, true)); for (auto & field : specified.attrs) { auto field2 = result.attrs.find(field.first); if (field2 != result.attrs.end() && field.second != field2->second) throw Error("mismatch in field '%s' of input '%s', got '%s'", field.first, attrsToJSON(specified.attrs), attrsToJSON(result.attrs)); } result.attrs = specified.attrs; return; } if (auto prevNarHash = specified.getNarHash()) { if (result.getNarHash() != prevNarHash) { if (result.getNarHash()) throw Error((unsigned int) 102, "NAR hash mismatch in input '%s', expected '%s' but got '%s'", specified.to_string(), prevNarHash->to_string(HashFormat::SRI, true), result.getNarHash()->to_string(HashFormat::SRI, true)); else throw Error((unsigned int) 102, "NAR hash mismatch in input '%s', expected '%s' but got none", specified.to_string(), prevNarHash->to_string(HashFormat::SRI, true)); } } if (auto prevLastModified = specified.getLastModified()) { if (result.getLastModified() != prevLastModified) throw Error("'lastModified' attribute mismatch in input '%s', expected %d, got %d", result.to_string(), *prevLastModified, result.getLastModified().value_or(-1)); } if (auto prevRev = specified.getRev()) { if (result.getRev() != prevRev) throw Error("'rev' attribute mismatch in input '%s', expected %s", result.to_string(), prevRev->gitRev()); } if (auto prevRevCount = specified.getRevCount()) { if (result.getRevCount() != prevRevCount) throw Error("'revCount' attribute mismatch in input '%s', expected %d", result.to_string(), *prevRevCount); } } std::pair<ref<SourceAccessor>, Input> Input::getAccessor(ref<Store> store) const { try { auto [accessor, result] = getAccessorUnchecked(store); checkLocks(*this, result); return {accessor, std::move(result)}; } catch (Error & e) { e.addTrace({}, "while fetching the input '%s'", to_string()); throw; } } std::pair<ref<SourceAccessor>, Input> Input::getAccessorUnchecked(ref<Store> store) const { // FIXME: cache the accessor if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); /* The tree may already be in the Nix store, or it could be substituted (which is often faster than fetching from the original source). So check that. We only do this for final inputs, otherwise there is a risk that we don't return the same attributes (like `lastModified`) that the "real" fetcher would return. FIXME: add a setting to disable this. FIXME: substituting may be slower than fetching normally, e.g. for fetchers like Git that are incremental! */ if (isFinal() && getNarHash()) { try { auto storePath = computeStorePath(*store); store->ensurePath(storePath); debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); auto accessor = makeStorePathAccessor(store, storePath); accessor->fingerprint = scheme->getFingerprint(store, *this); return {accessor, *this}; } catch (Error & e) { debug("substitution of input '%s' failed: %s", to_string(), e.what()); } } auto [accessor, result] = scheme->getAccessor(store, *this); assert(!accessor->fingerprint); accessor->fingerprint = scheme->getFingerprint(store, result); return {accessor, std::move(result)}; } Input Input::applyOverrides( std::optional<std::string> ref, std::optional<Hash> rev) const { if (!scheme) return *this; return scheme->applyOverrides(*this, ref, rev); } void Input::clone(const Path & destDir) const { assert(scheme); scheme->clone(*this, destDir); } std::optional<Path> Input::getSourcePath() const { assert(scheme); return scheme->getSourcePath(*this); } void Input::putFile( const CanonPath & path, std::string_view contents, std::optional<std::string> commitMsg) const { assert(scheme); return scheme->putFile(*this, path, contents, commitMsg); } std::string Input::getName() const { return maybeGetStrAttr(attrs, "name").value_or("source"); } StorePath Input::computeStorePath(Store & store) const { auto narHash = getNarHash(); if (!narHash) throw Error("cannot compute store path for unlocked input '%s'", to_string()); return store.makeFixedOutputPath(getName(), FixedOutputInfo { .method = FileIngestionMethod::NixArchive, .hash = *narHash, .references = {}, }); } std::string Input::getType() const { return getStrAttr(attrs, "type"); } std::optional<Hash> Input::getNarHash() const { if (auto s = maybeGetStrAttr(attrs, "narHash")) { auto hash = s->empty() ? Hash(HashAlgorithm::SHA256) : Hash::parseSRI(*s); if (hash.algo != HashAlgorithm::SHA256) throw UsageError("narHash must use SHA-256"); return hash; } return {}; } std::optional<std::string> Input::getRef() const { if (auto s = maybeGetStrAttr(attrs, "ref")) return *s; return {}; } std::optional<Hash> Input::getRev() const { std::optional<Hash> hash = {}; if (auto s = maybeGetStrAttr(attrs, "rev")) { try { hash = Hash::parseAnyPrefixed(*s); } catch (BadHash &e) { // Default to sha1 for backwards compatibility with existing // usages (e.g. `builtins.fetchTree` calls or flake inputs). hash = Hash::parseAny(*s, HashAlgorithm::SHA1); } } return hash; } std::optional<uint64_t> Input::getRevCount() const { if (auto n = maybeGetIntAttr(attrs, "revCount")) return *n; return {}; } std::optional<time_t> Input::getLastModified() const { if (auto n = maybeGetIntAttr(attrs, "lastModified")) return *n; return {}; } ParsedURL InputScheme::toURL(const Input & input) const { throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs)); } Input InputScheme::applyOverrides( const Input & input, std::optional<std::string> ref, std::optional<Hash> rev) const { if (ref) throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref); if (rev) throw Error("don't know how to set revision of input '%s' to '%s'", input.to_string(), rev->gitRev()); return input; } std::optional<Path> InputScheme::getSourcePath(const Input & input) const { return {}; } void InputScheme::putFile( const Input & input, const CanonPath & path, std::string_view contents, std::optional<std::string> commitMsg) const { throw Error("input '%s' does not support modifying file '%s'", input.to_string(), path); } void InputScheme::clone(const Input & input, const Path & destDir) const { throw Error("do not know how to clone input '%s'", input.to_string()); } std::optional<ExperimentalFeature> InputScheme::experimentalFeature() const { return {}; } std::string publicKeys_to_string(const std::vector<PublicKey>& publicKeys) { return ((nlohmann::json) publicKeys).dump(); } } namespace nlohmann { using namespace nix; #ifndef DOXYGEN_SKIP fetchers::PublicKey adl_serializer<fetchers::PublicKey>::from_json(const json & json) { fetchers::PublicKey res = { }; if (auto type = optionalValueAt(json, "type")) res.type = getString(*type); res.key = getString(valueAt(json, "key")); return res; } void adl_serializer<fetchers::PublicKey>::to_json(json & json, fetchers::PublicKey p) { json["type"] = p.type; json["key"] = p.key; } #endif }
14,550
C++
.cc
404
30.190594
145
0.646146
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,077
mercurial.cc
NixOS_nix/src/libfetchers/mercurial.cc
#include "fetchers.hh" #include "processes.hh" #include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" #include "store-api.hh" #include "url-parts.hh" #include "store-path-accessor.hh" #include "fetch-settings.hh" #include <sys/time.h> using namespace std::string_literals; namespace nix::fetchers { static RunOptions hgOptions(const Strings & args) { auto env = getEnv(); // Set HGPLAIN: this means we get consistent output from hg and avoids leakage from a user or system .hgrc. env["HGPLAIN"] = ""; return { .program = "hg", .lookupPath = true, .args = args, .environment = env }; } // runProgram wrapper that uses hgOptions instead of stock RunOptions. static std::string runHg(const Strings & args, const std::optional<std::string> & input = {}) { RunOptions opts = hgOptions(args); opts.input = input; auto res = runProgram(std::move(opts)); if (!statusOk(res.first)) throw ExecError(res.first, "hg %1%", statusToString(res.first)); return res.second; } struct MercurialInputScheme : InputScheme { std::optional<Input> inputFromURL( const Settings & settings, const ParsedURL & url, bool requireTree) const override { if (url.scheme != "hg+http" && url.scheme != "hg+https" && url.scheme != "hg+ssh" && url.scheme != "hg+file") return {}; auto url2(url); url2.scheme = std::string(url2.scheme, 3); url2.query.clear(); Attrs attrs; attrs.emplace("type", "hg"); for (auto &[name, value] : url.query) { if (name == "rev" || name == "ref") attrs.emplace(name, value); else url2.query.emplace(name, value); } attrs.emplace("url", url2.to_string()); return inputFromAttrs(settings, attrs); } std::string_view schemeName() const override { return "hg"; } StringSet allowedAttrs() const override { return { "url", "ref", "rev", "revCount", "narHash", "name", }; } std::optional<Input> inputFromAttrs( const Settings & settings, const Attrs & attrs) const override { parseURL(getStrAttr(attrs, "url")); if (auto ref = maybeGetStrAttr(attrs, "ref")) { if (!std::regex_match(*ref, refRegex)) throw BadURL("invalid Mercurial branch/tag name '%s'", *ref); } Input input{settings}; input.attrs = attrs; return input; } ParsedURL toURL(const Input & input) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); url.scheme = "hg+" + url.scheme; if (auto rev = input.getRev()) url.query.insert_or_assign("rev", rev->gitRev()); if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); return url; } Input applyOverrides( const Input & input, std::optional<std::string> ref, std::optional<Hash> rev) const override { auto res(input); if (rev) res.attrs.insert_or_assign("rev", rev->gitRev()); if (ref) res.attrs.insert_or_assign("ref", *ref); return res; } std::optional<Path> getSourcePath(const Input & input) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); if (url.scheme == "file" && !input.getRef() && !input.getRev()) return url.path; return {}; } void putFile( const Input & input, const CanonPath & path, std::string_view contents, std::optional<std::string> commitMsg) const override { auto [isLocal, repoPath] = getActualUrl(input); if (!isLocal) throw Error("cannot commit '%s' to Mercurial repository '%s' because it's not a working tree", path, input.to_string()); auto absPath = CanonPath(repoPath) / path; writeFile(absPath.abs(), contents); // FIXME: shut up if file is already tracked. runHg( { "add", absPath.abs() }); if (commitMsg) runHg( { "commit", absPath.abs(), "-m", *commitMsg }); } std::pair<bool, std::string> getActualUrl(const Input & input) const { auto url = parseURL(getStrAttr(input.attrs, "url")); bool isLocal = url.scheme == "file"; return {isLocal, isLocal ? url.path : url.base}; } StorePath fetchToStore(ref<Store> store, Input & input) const { auto origRev = input.getRev(); auto name = input.getName(); auto [isLocal, actualUrl_] = getActualUrl(input); auto actualUrl = actualUrl_; // work around clang bug // FIXME: return lastModified. // FIXME: don't clone local repositories. if (!input.getRef() && !input.getRev() && isLocal && pathExists(actualUrl + "/.hg")) { bool clean = runHg({ "status", "-R", actualUrl, "--modified", "--added", "--removed" }) == ""; if (!clean) { /* This is an unclean working tree. So copy all tracked files. */ if (!input.settings->allowDirty) throw Error("Mercurial tree '%s' is unclean", actualUrl); if (input.settings->warnDirty) warn("Mercurial tree '%s' is unclean", actualUrl); input.attrs.insert_or_assign("ref", chomp(runHg({ "branch", "-R", actualUrl }))); auto files = tokenizeString<std::set<std::string>>( runHg({ "status", "-R", actualUrl, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s); Path actualPath(absPath(actualUrl)); PathFilter filter = [&](const Path & p) -> bool { assert(hasPrefix(p, actualPath)); std::string file(p, actualPath.size() + 1); auto st = lstat(p); if (S_ISDIR(st.st_mode)) { auto prefix = file + "/"; auto i = files.lower_bound(prefix); return i != files.end() && hasPrefix(*i, prefix); } return files.count(file); }; auto storePath = store->addToStore( input.getName(), {getFSSourceAccessor(), CanonPath(actualPath)}, ContentAddressMethod::Raw::NixArchive, HashAlgorithm::SHA256, {}, filter); return storePath; } } if (!input.getRef()) input.attrs.insert_or_assign("ref", "default"); auto revInfoKey = [&](const Hash & rev) { if (rev.algo != HashAlgorithm::SHA1) throw Error("Hash '%s' is not supported by Mercurial. Only sha1 is supported.", rev.to_string(HashFormat::Base16, true)); return Cache::Key{"hgRev", { {"store", store->storeDir}, {"name", name}, {"rev", input.getRev()->gitRev()} }}; }; auto makeResult = [&](const Attrs & infoAttrs, const StorePath & storePath) -> StorePath { assert(input.getRev()); assert(!origRev || origRev == input.getRev()); input.attrs.insert_or_assign("revCount", getIntAttr(infoAttrs, "revCount")); return storePath; }; /* Check the cache for the most recent rev for this URL/ref. */ Cache::Key refToRevKey{"hgRefToRev", { {"url", actualUrl}, {"ref", *input.getRef()} }}; if (!input.getRev()) { if (auto res = getCache()->lookupWithTTL(refToRevKey)) input.attrs.insert_or_assign("rev", getRevAttr(*res, "rev").gitRev()); } /* If we have a rev, check if we have a cached store path. */ if (auto rev = input.getRev()) { if (auto res = getCache()->lookupStorePath(revInfoKey(*rev), *store)) return makeResult(res->value, res->storePath); } Path cacheDir = fmt("%s/hg/%s", getCacheDir(), hashString(HashAlgorithm::SHA256, actualUrl).to_string(HashFormat::Nix32, false)); /* If this is a commit hash that we already have, we don't have to pull again. */ if (!(input.getRev() && pathExists(cacheDir) && runProgram(hgOptions({ "log", "-R", cacheDir, "-r", input.getRev()->gitRev(), "--template", "1" })).second == "1")) { Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", actualUrl)); if (pathExists(cacheDir)) { try { runHg({ "pull", "-R", cacheDir, "--", actualUrl }); } catch (ExecError & e) { auto transJournal = cacheDir + "/.hg/store/journal"; /* hg throws "abandoned transaction" error only if this file exists */ if (pathExists(transJournal)) { runHg({ "recover", "-R", cacheDir }); runHg({ "pull", "-R", cacheDir, "--", actualUrl }); } else { throw ExecError(e.status, "'hg pull' %s", statusToString(e.status)); } } } else { createDirs(dirOf(cacheDir)); runHg({ "clone", "--noupdate", "--", actualUrl, cacheDir }); } } /* Fetch the remote rev or ref. */ auto tokens = tokenizeString<std::vector<std::string>>( runHg({ "log", "-R", cacheDir, "-r", input.getRev() ? input.getRev()->gitRev() : *input.getRef(), "--template", "{node} {rev} {branch}" })); assert(tokens.size() == 3); auto rev = Hash::parseAny(tokens[0], HashAlgorithm::SHA1); input.attrs.insert_or_assign("rev", rev.gitRev()); auto revCount = std::stoull(tokens[1]); input.attrs.insert_or_assign("ref", tokens[2]); /* Now that we have the rev, check the cache again for a cached store path. */ if (auto res = getCache()->lookupStorePath(revInfoKey(rev), *store)) return makeResult(res->value, res->storePath); Path tmpDir = createTempDir(); AutoDelete delTmpDir(tmpDir, true); runHg({ "archive", "-R", cacheDir, "-r", rev.gitRev(), tmpDir }); deletePath(tmpDir + "/.hg_archival.txt"); auto storePath = store->addToStore(name, {getFSSourceAccessor(), CanonPath(tmpDir)}); Attrs infoAttrs({ {"revCount", (uint64_t) revCount}, }); if (!origRev) getCache()->upsert(refToRevKey, {{"rev", rev.gitRev()}}); getCache()->upsert(revInfoKey(rev), *store, infoAttrs, storePath); return makeResult(infoAttrs, std::move(storePath)); } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { Input input(_input); auto storePath = fetchToStore(store, input); auto accessor = makeStorePathAccessor(store, storePath); accessor->setPathDisplay("«" + input.to_string() + "»"); return {accessor, input}; } bool isLocked(const Input & input) const override { return (bool) input.getRev(); } std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override { if (auto rev = input.getRev()) return rev->gitRev(); else return std::nullopt; } }; static auto rMercurialInputScheme = OnStartup([] { registerInputScheme(std::make_unique<MercurialInputScheme>()); }); }
11,997
C++
.cc
286
31.416084
137
0.550559
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,078
tarball.cc
NixOS_nix/src/libfetchers/tarball.cc
#include "tarball.hh" #include "fetchers.hh" #include "cache.hh" #include "filetransfer.hh" #include "store-api.hh" #include "archive.hh" #include "tarfile.hh" #include "types.hh" #include "store-path-accessor.hh" #include "store-api.hh" #include "git-utils.hh" namespace nix::fetchers { DownloadFileResult downloadFile( ref<Store> store, const std::string & url, const std::string & name, const Headers & headers) { // FIXME: check store Cache::Key key{"file", {{ {"url", url}, {"name", name}, }}}; auto cached = getCache()->lookupStorePath(key, *store); auto useCached = [&]() -> DownloadFileResult { return { .storePath = std::move(cached->storePath), .etag = getStrAttr(cached->value, "etag"), .effectiveUrl = getStrAttr(cached->value, "url"), .immutableUrl = maybeGetStrAttr(cached->value, "immutableUrl"), }; }; if (cached && !cached->expired) return useCached(); FileTransferRequest request(url); request.headers = headers; if (cached) request.expectedETag = getStrAttr(cached->value, "etag"); FileTransferResult res; try { res = getFileTransfer()->download(request); } catch (FileTransferError & e) { if (cached) { warn("%s; using cached version", e.msg()); return useCached(); } else throw; } Attrs infoAttrs({ {"etag", res.etag}, }); if (res.immutableUrl) infoAttrs.emplace("immutableUrl", *res.immutableUrl); std::optional<StorePath> storePath; if (res.cached) { assert(cached); storePath = std::move(cached->storePath); } else { StringSink sink; dumpString(res.data, sink); auto hash = hashString(HashAlgorithm::SHA256, res.data); ValidPathInfo info { *store, name, FixedOutputInfo { .method = FileIngestionMethod::Flat, .hash = hash, .references = {}, }, hashString(HashAlgorithm::SHA256, sink.s), }; info.narSize = sink.s.size(); auto source = StringSource { sink.s }; store->addToStore(info, source, NoRepair, NoCheckSigs); storePath = std::move(info.path); } /* Cache metadata for all URLs in the redirect chain. */ for (auto & url : res.urls) { key.second.insert_or_assign("url", url); assert(!res.urls.empty()); infoAttrs.insert_or_assign("url", *res.urls.rbegin()); getCache()->upsert(key, *store, infoAttrs, *storePath); } return { .storePath = std::move(*storePath), .etag = res.etag, .effectiveUrl = *res.urls.rbegin(), .immutableUrl = res.immutableUrl, }; } static DownloadTarballResult downloadTarball_( const std::string & url, const Headers & headers) { Cache::Key cacheKey{"tarball", {{"url", url}}}; auto cached = getCache()->lookupExpired(cacheKey); auto attrsToResult = [&](const Attrs & infoAttrs) { auto treeHash = getRevAttr(infoAttrs, "treeHash"); return DownloadTarballResult { .treeHash = treeHash, .lastModified = (time_t) getIntAttr(infoAttrs, "lastModified"), .immutableUrl = maybeGetStrAttr(infoAttrs, "immutableUrl"), .accessor = getTarballCache()->getAccessor(treeHash, false), }; }; if (cached && !getTarballCache()->hasObject(getRevAttr(cached->value, "treeHash"))) cached.reset(); if (cached && !cached->expired) /* We previously downloaded this tarball and it's younger than `tarballTtl`, so no need to check the server. */ return attrsToResult(cached->value); auto _res = std::make_shared<Sync<FileTransferResult>>(); auto source = sinkToSource([&](Sink & sink) { FileTransferRequest req(url); req.expectedETag = cached ? getStrAttr(cached->value, "etag") : ""; getFileTransfer()->download(std::move(req), sink, [_res](FileTransferResult r) { *_res->lock() = r; }); }); // TODO: fall back to cached value if download fails. auto act = std::make_unique<Activity>(*logger, lvlInfo, actUnknown, fmt("unpacking '%s' into the Git cache", url)); AutoDelete cleanupTemp; /* Note: if the download is cached, `importTarball()` will receive no data, which causes it to import an empty tarball. */ auto archive = hasSuffix(toLower(parseURL(url).path), ".zip") ? ({ /* In streaming mode, libarchive doesn't handle symlinks in zip files correctly (#10649). So write the entire file to disk so libarchive can access it in random-access mode. */ auto [fdTemp, path] = createTempFile("nix-zipfile"); cleanupTemp.reset(path); debug("downloading '%s' into '%s'...", url, path); { FdSink sink(fdTemp.get()); source->drainInto(sink); } TarArchive{path}; }) : TarArchive{*source}; auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); auto tree = parseSink->flush(); act.reset(); auto res(_res->lock()); Attrs infoAttrs; if (res->cached) { /* The server says that the previously downloaded version is still current. */ infoAttrs = cached->value; } else { infoAttrs.insert_or_assign("etag", res->etag); infoAttrs.insert_or_assign("treeHash", tarballCache->dereferenceSingletonDirectory(tree).gitRev()); infoAttrs.insert_or_assign("lastModified", uint64_t(lastModified)); if (res->immutableUrl) infoAttrs.insert_or_assign("immutableUrl", *res->immutableUrl); } /* Insert a cache entry for every URL in the redirect chain. */ for (auto & url : res->urls) { cacheKey.second.insert_or_assign("url", url); getCache()->upsert(cacheKey, infoAttrs); } // FIXME: add a cache entry for immutableUrl? That could allow // cache poisoning. return attrsToResult(infoAttrs); } ref<SourceAccessor> downloadTarball( ref<Store> store, const Settings & settings, const std::string & url) { /* Go through Input::getAccessor() to ensure that the resulting accessor has a fingerprint. */ fetchers::Attrs attrs; attrs.insert_or_assign("type", "tarball"); attrs.insert_or_assign("url", url); auto input = Input::fromAttrs(settings, std::move(attrs)); return input.getAccessor(store).first; } // An input scheme corresponding to a curl-downloadable resource. struct CurlInputScheme : InputScheme { const std::set<std::string> transportUrlSchemes = {"file", "http", "https"}; bool hasTarballExtension(std::string_view path) const { return hasSuffix(path, ".zip") || hasSuffix(path, ".tar") || hasSuffix(path, ".tgz") || hasSuffix(path, ".tar.gz") || hasSuffix(path, ".tar.xz") || hasSuffix(path, ".tar.bz2") || hasSuffix(path, ".tar.zst"); } virtual bool isValidURL(const ParsedURL & url, bool requireTree) const = 0; static const std::set<std::string> specialParams; std::optional<Input> inputFromURL( const Settings & settings, const ParsedURL & _url, bool requireTree) const override { if (!isValidURL(_url, requireTree)) return std::nullopt; Input input{settings}; auto url = _url; url.scheme = parseUrlScheme(url.scheme).transport; auto narHash = url.query.find("narHash"); if (narHash != url.query.end()) input.attrs.insert_or_assign("narHash", narHash->second); if (auto i = get(url.query, "rev")) input.attrs.insert_or_assign("rev", *i); if (auto i = get(url.query, "revCount")) if (auto n = string2Int<uint64_t>(*i)) input.attrs.insert_or_assign("revCount", *n); if (auto i = get(url.query, "lastModified")) if (auto n = string2Int<uint64_t>(*i)) input.attrs.insert_or_assign("lastModified", *n); /* The URL query parameters serve two roles: specifying fetch settings for Nix itself, and arbitrary data as part of the HTTP request. Now that we've processed the Nix-specific attributes above, remove them so we don't also send them as part of the HTTP request. */ for (auto & param : allowedAttrs()) url.query.erase(param); input.attrs.insert_or_assign("type", std::string { schemeName() }); input.attrs.insert_or_assign("url", url.to_string()); return input; } StringSet allowedAttrs() const override { return { "type", "url", "narHash", "name", "unpack", "rev", "revCount", "lastModified", }; } std::optional<Input> inputFromAttrs( const Settings & settings, const Attrs & attrs) const override { Input input{settings}; input.attrs = attrs; //input.locked = (bool) maybeGetStrAttr(input.attrs, "hash"); return input; } ParsedURL toURL(const Input & input) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); // NAR hashes are preferred over file hashes since tar/zip // files don't have a canonical representation. if (auto narHash = input.getNarHash()) url.query.insert_or_assign("narHash", narHash->to_string(HashFormat::SRI, true)); return url; } bool isLocked(const Input & input) const override { return (bool) input.getNarHash(); } }; struct FileInputScheme : CurlInputScheme { std::string_view schemeName() const override { return "file"; } bool isValidURL(const ParsedURL & url, bool requireTree) const override { auto parsedUrlScheme = parseUrlScheme(url.scheme); return transportUrlSchemes.count(std::string(parsedUrlScheme.transport)) && (parsedUrlScheme.application ? parsedUrlScheme.application.value() == schemeName() : (!requireTree && !hasTarballExtension(url.path))); } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { auto input(_input); /* Unlike TarballInputScheme, this stores downloaded files in the Nix store directly, since there is little deduplication benefit in using the Git cache for single big files like tarballs. */ auto file = downloadFile(store, getStrAttr(input.attrs, "url"), input.getName()); auto narHash = store->queryPathInfo(file.storePath)->narHash; input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); auto accessor = makeStorePathAccessor(store, file.storePath); accessor->setPathDisplay("«" + input.to_string() + "»"); return {accessor, input}; } }; struct TarballInputScheme : CurlInputScheme { std::string_view schemeName() const override { return "tarball"; } bool isValidURL(const ParsedURL & url, bool requireTree) const override { auto parsedUrlScheme = parseUrlScheme(url.scheme); return transportUrlSchemes.count(std::string(parsedUrlScheme.transport)) && (parsedUrlScheme.application ? parsedUrlScheme.application.value() == schemeName() : (requireTree || hasTarballExtension(url.path))); } std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override { auto input(_input); auto result = downloadTarball_(getStrAttr(input.attrs, "url"), {}); result.accessor->setPathDisplay("«" + input.to_string() + "»"); if (result.immutableUrl) { auto immutableInput = Input::fromURL(*input.settings, *result.immutableUrl); // FIXME: would be nice to support arbitrary flakerefs // here, e.g. git flakes. if (immutableInput.getType() != "tarball") throw Error("tarball 'Link' headers that redirect to non-tarball URLs are not supported"); input = immutableInput; } if (result.lastModified && !input.attrs.contains("lastModified")) input.attrs.insert_or_assign("lastModified", uint64_t(result.lastModified)); input.attrs.insert_or_assign("narHash", getTarballCache()->treeHashToNarHash(result.treeHash).to_string(HashFormat::SRI, true)); return {result.accessor, input}; } std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override { if (auto narHash = input.getNarHash()) return narHash->to_string(HashFormat::SRI, true); else if (auto rev = input.getRev()) return rev->gitRev(); else return std::nullopt; } }; static auto rTarballInputScheme = OnStartup([] { registerInputScheme(std::make_unique<TarballInputScheme>()); }); static auto rFileInputScheme = OnStartup([] { registerInputScheme(std::make_unique<FileInputScheme>()); }); }
13,658
C++
.cc
336
32.261905
113
0.618846
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,079
cache.cc
NixOS_nix/src/libfetchers/cache.cc
#include "cache.hh" #include "users.hh" #include "sqlite.hh" #include "sync.hh" #include "store-api.hh" #include <nlohmann/json.hpp> namespace nix::fetchers { static const char * schema = R"sql( create table if not exists Cache ( domain text not null, key text not null, value text not null, timestamp integer not null, primary key (domain, key) ); )sql"; // FIXME: we should periodically purge/nuke this cache to prevent it // from growing too big. struct CacheImpl : Cache { struct State { SQLite db; SQLiteStmt upsert, lookup; }; Sync<State> _state; CacheImpl() { auto state(_state.lock()); auto dbPath = getCacheDir() + "/fetcher-cache-v2.sqlite"; createDirs(dirOf(dbPath)); state->db = SQLite(dbPath); state->db.isCache(); state->db.exec(schema); state->upsert.create(state->db, "insert or replace into Cache(domain, key, value, timestamp) values (?, ?, ?, ?)"); state->lookup.create(state->db, "select value, timestamp from Cache where domain = ? and key = ?"); } void upsert( const Key & key, const Attrs & value) override { _state.lock()->upsert.use() (key.first) (attrsToJSON(key.second).dump()) (attrsToJSON(value).dump()) (time(0)).exec(); } std::optional<Attrs> lookup( const Key & key) override { if (auto res = lookupExpired(key)) return std::move(res->value); return {}; } std::optional<Attrs> lookupWithTTL( const Key & key) override { if (auto res = lookupExpired(key)) { if (!res->expired) return std::move(res->value); debug("ignoring expired cache entry '%s:%s'", key.first, attrsToJSON(key.second).dump()); } return {}; } std::optional<Result> lookupExpired( const Key & key) override { auto state(_state.lock()); auto keyJSON = attrsToJSON(key.second).dump(); auto stmt(state->lookup.use()(key.first)(keyJSON)); if (!stmt.next()) { debug("did not find cache entry for '%s:%s'", key.first, keyJSON); return {}; } auto valueJSON = stmt.getStr(0); auto timestamp = stmt.getInt(1); debug("using cache entry '%s:%s' -> '%s'", key.first, keyJSON, valueJSON); return Result { .expired = settings.tarballTtl.get() == 0 || timestamp + settings.tarballTtl < time(0), .value = jsonToAttrs(nlohmann::json::parse(valueJSON)), }; } void upsert( Key key, Store & store, Attrs value, const StorePath & storePath) override { /* Add the store prefix to the cache key to handle multiple store prefixes. */ key.second.insert_or_assign("store", store.storeDir); value.insert_or_assign("storePath", (std::string) storePath.to_string()); upsert(key, value); } std::optional<ResultWithStorePath> lookupStorePath( Key key, Store & store) override { key.second.insert_or_assign("store", store.storeDir); auto res = lookupExpired(key); if (!res) return std::nullopt; auto storePathS = getStrAttr(res->value, "storePath"); res->value.erase("storePath"); ResultWithStorePath res2(*res, StorePath(storePathS)); store.addTempRoot(res2.storePath); if (!store.isValidPath(res2.storePath)) { // FIXME: we could try to substitute 'storePath'. debug("ignoring disappeared cache entry '%s:%s' -> '%s'", key.first, attrsToJSON(key.second).dump(), store.printStorePath(res2.storePath)); return std::nullopt; } debug("using cache entry '%s:%s' -> '%s', '%s'", key.first, attrsToJSON(key.second).dump(), attrsToJSON(res2.value).dump(), store.printStorePath(res2.storePath)); return res2; } std::optional<ResultWithStorePath> lookupStorePathWithTTL( Key key, Store & store) override { auto res = lookupStorePath(std::move(key), store); return res && !res->expired ? res : std::nullopt; } }; ref<Cache> getCache() { static auto cache = std::make_shared<CacheImpl>(); return ref<Cache>(cache); } }
4,542
C++
.cc
137
25.19708
99
0.58156
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,080
registry.cc
NixOS_nix/src/libfetchers/registry.cc
#include "fetch-settings.hh" #include "registry.hh" #include "tarball.hh" #include "users.hh" #include "globals.hh" #include "store-api.hh" #include "local-fs-store.hh" #include <nlohmann/json.hpp> namespace nix::fetchers { std::shared_ptr<Registry> Registry::read( const Settings & settings, const Path & path, RegistryType type) { auto registry = std::make_shared<Registry>(settings, type); if (!pathExists(path)) return std::make_shared<Registry>(settings, type); try { auto json = nlohmann::json::parse(readFile(path)); auto version = json.value("version", 0); if (version == 2) { for (auto & i : json["flakes"]) { auto toAttrs = jsonToAttrs(i["to"]); Attrs extraAttrs; auto j = toAttrs.find("dir"); if (j != toAttrs.end()) { extraAttrs.insert(*j); toAttrs.erase(j); } auto exact = i.find("exact"); registry->entries.push_back( Entry { .from = Input::fromAttrs(settings, jsonToAttrs(i["from"])), .to = Input::fromAttrs(settings, std::move(toAttrs)), .extraAttrs = extraAttrs, .exact = exact != i.end() && exact.value() }); } } else throw Error("flake registry '%s' has unsupported version %d", path, version); } catch (nlohmann::json::exception & e) { warn("cannot parse flake registry '%s': %s", path, e.what()); } catch (Error & e) { warn("cannot read flake registry '%s': %s", path, e.what()); } return registry; } void Registry::write(const Path & path) { nlohmann::json arr; for (auto & entry : entries) { nlohmann::json obj; obj["from"] = attrsToJSON(entry.from.toAttrs()); obj["to"] = attrsToJSON(entry.to.toAttrs()); if (!entry.extraAttrs.empty()) obj["to"].update(attrsToJSON(entry.extraAttrs)); if (entry.exact) obj["exact"] = true; arr.emplace_back(std::move(obj)); } nlohmann::json json; json["version"] = 2; json["flakes"] = std::move(arr); createDirs(dirOf(path)); writeFile(path, json.dump(2)); } void Registry::add( const Input & from, const Input & to, const Attrs & extraAttrs) { entries.emplace_back( Entry { .from = from, .to = to, .extraAttrs = extraAttrs }); } void Registry::remove(const Input & input) { // FIXME: use C++20 std::erase. for (auto i = entries.begin(); i != entries.end(); ) if (i->from == input) i = entries.erase(i); else ++i; } static Path getSystemRegistryPath() { return settings.nixConfDir + "/registry.json"; } static std::shared_ptr<Registry> getSystemRegistry(const Settings & settings) { static auto systemRegistry = Registry::read(settings, getSystemRegistryPath(), Registry::System); return systemRegistry; } Path getUserRegistryPath() { return getConfigDir() + "/registry.json"; } std::shared_ptr<Registry> getUserRegistry(const Settings & settings) { static auto userRegistry = Registry::read(settings, getUserRegistryPath(), Registry::User); return userRegistry; } std::shared_ptr<Registry> getCustomRegistry(const Settings & settings, const Path & p) { static auto customRegistry = Registry::read(settings, p, Registry::Custom); return customRegistry; } std::shared_ptr<Registry> getFlagRegistry(const Settings & settings) { static auto flagRegistry = std::make_shared<Registry>(settings, Registry::Flag); return flagRegistry; } void overrideRegistry( const Input & from, const Input & to, const Attrs & extraAttrs) { getFlagRegistry(*from.settings)->add(from, to, extraAttrs); } static std::shared_ptr<Registry> getGlobalRegistry(const Settings & settings, ref<Store> store) { static auto reg = [&]() { auto path = settings.flakeRegistry.get(); if (path == "") { return std::make_shared<Registry>(settings, Registry::Global); // empty registry } if (!hasPrefix(path, "/")) { auto storePath = downloadFile(store, path, "flake-registry.json").storePath; if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>()) store2->addPermRoot(storePath, getCacheDir() + "/flake-registry.json"); path = store->toRealPath(storePath); } return Registry::read(settings, path, Registry::Global); }(); return reg; } Registries getRegistries(const Settings & settings, ref<Store> store) { Registries registries; registries.push_back(getFlagRegistry(settings)); registries.push_back(getUserRegistry(settings)); registries.push_back(getSystemRegistry(settings)); registries.push_back(getGlobalRegistry(settings, store)); return registries; } std::pair<Input, Attrs> lookupInRegistries( ref<Store> store, const Input & _input) { Attrs extraAttrs; int n = 0; Input input(_input); restart: n++; if (n > 100) throw Error("cycle detected in flake registry for '%s'", input.to_string()); for (auto & registry : getRegistries(*input.settings, store)) { // FIXME: O(n) for (auto & entry : registry->entries) { if (entry.exact) { if (entry.from == input) { input = entry.to; extraAttrs = entry.extraAttrs; goto restart; } } else { if (entry.from.contains(input)) { input = entry.to.applyOverrides( !entry.from.getRef() && input.getRef() ? input.getRef() : std::optional<std::string>(), !entry.from.getRev() && input.getRev() ? input.getRev() : std::optional<Hash>()); extraAttrs = entry.extraAttrs; goto restart; } } } } if (!input.isDirect()) throw Error("cannot find flake '%s' in the flake registries", input.to_string()); debug("looked up '%s' -> '%s'", _input.to_string(), input.to_string()); return {input, extraAttrs}; } }
6,427
C++
.cc
188
26.457447
111
0.591972
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,081
mounted-source-accessor.cc
NixOS_nix/src/libfetchers/mounted-source-accessor.cc
#include "mounted-source-accessor.hh" namespace nix { struct MountedSourceAccessor : SourceAccessor { std::map<CanonPath, ref<SourceAccessor>> mounts; MountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> _mounts) : mounts(std::move(_mounts)) { displayPrefix.clear(); // Currently we require a root filesystem. This could be relaxed. assert(mounts.contains(CanonPath::root)); // FIXME: return dummy parent directories automatically? } std::string readFile(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return accessor->readFile(subpath); } bool pathExists(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return accessor->pathExists(subpath); } std::optional<Stat> maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return accessor->maybeLstat(subpath); } DirEntries readDirectory(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return accessor->readDirectory(subpath); } std::string readLink(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return accessor->readLink(subpath); } std::string showPath(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); return displayPrefix + accessor->showPath(subpath) + displaySuffix; } std::pair<ref<SourceAccessor>, CanonPath> resolve(CanonPath path) { // Find the nearest parent of `path` that is a mount point. std::vector<std::string> subpath; while (true) { auto i = mounts.find(path); if (i != mounts.end()) { std::reverse(subpath.begin(), subpath.end()); return {i->second, CanonPath(subpath)}; } assert(!path.isRoot()); subpath.push_back(std::string(*path.baseName())); path.pop(); } } }; ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts) { return make_ref<MountedSourceAccessor>(std::move(mounts)); } }
2,272
C++
.cc
64
28.390625
94
0.644323
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,083
run.hh
NixOS_nix/src/nix/run.hh
#pragma once ///@file #include "store-api.hh" namespace nix { enum struct UseLookupPath { Use, DontUse }; void execProgramInStore(ref<Store> store, UseLookupPath useLookupPath, const std::string & program, const Strings & args, std::optional<std::string_view> system = std::nullopt); }
315
C++
.h
14
19.428571
59
0.722973
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,084
self-exe.hh
NixOS_nix/src/nix/self-exe.hh
#pragma once ///@file #include <filesystem> namespace nix { /** * Get a path to the given Nix binary. * * Normally, nix is installed according to `NIX_BIN_DIR`, which is set * at compile time, but can be overridden. * * However, it may not have been installed at all. For example, if it's * a static build, there's a good chance that it has been moved out of * its installation directory. That makes `NIX_BIN_DIR` useless. * Instead, we'll query the OS for the path to the current executable, * using `getSelfExe()`. * * As a last resort, we rely on `PATH`. Hopefully we find a `nix` there * that's compatible. If you're porting Nix to a new platform, that * might be good enough for a while, but you'll want to improve * `getSelfExe()` to work on your platform. * * @param binary_name the exact binary name we're looking up. Might be * `nix-*` instead of `nix` for the legacy CLI commands. Optional to use * current binary name. */ std::filesystem::path getNixBin(std::optional<std::string_view> binary_name = {}); }
1,042
C++
.h
27
36.703704
82
0.723046
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,085
filetransfer.hh
NixOS_nix/src/libstore/filetransfer.hh
#pragma once ///@file #include <string> #include <future> #include "logging.hh" #include "types.hh" #include "ref.hh" #include "config.hh" #include "serialise.hh" namespace nix { struct FileTransferSettings : Config { Setting<bool> enableHttp2{this, true, "http2", "Whether to enable HTTP/2 support."}; Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix", "String appended to the user agent in HTTP requests."}; Setting<size_t> httpConnections{ this, 25, "http-connections", R"( The maximum number of parallel TCP connections used to fetch files from binary caches and by other downloads. It defaults to 25. 0 means no limit. )", {"binary-caches-parallel-connections"}}; Setting<unsigned long> connectTimeout{ this, 0, "connect-timeout", R"( The timeout (in seconds) for establishing connections in the binary cache substituter. It corresponds to `curl`’s `--connect-timeout` option. A value of 0 means no limit. )"}; Setting<unsigned long> stalledDownloadTimeout{ this, 300, "stalled-download-timeout", R"( The timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration. )"}; Setting<unsigned int> tries{this, 5, "download-attempts", "How often Nix will attempt to download a file before giving up."}; Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size", R"( The size of Nix's internal download buffer during `curl` transfers. If data is not processed quickly enough to exceed the size of this buffer, downloads may stall. )"}; }; extern FileTransferSettings fileTransferSettings; struct FileTransferRequest { std::string uri; Headers headers; std::string expectedETag; bool verifyTLS = true; bool head = false; size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; bool decompress = true; std::optional<std::string> data; std::string mimeType; std::function<void(std::string_view data)> dataCallback; FileTransferRequest(std::string_view uri) : uri(uri), parentAct(getCurActivity()) { } std::string verb() { return data ? "upload" : "download"; } }; struct FileTransferResult { /** * Whether this is a cache hit (i.e. the ETag supplied in the * request is still valid). If so, `data` is empty. */ bool cached = false; /** * The ETag of the object. */ std::string etag; /** * All URLs visited in the redirect chain. */ std::vector<std::string> urls; /** * The response body. */ std::string data; uint64_t bodySize = 0; /** * An "immutable" URL for this resource (i.e. one whose contents * will never change), as returned by the `Link: <url>; * rel="immutable"` header. */ std::optional<std::string> immutableUrl; }; class Store; struct FileTransfer { virtual ~FileTransfer() { } /** * Enqueue a data transfer request, returning a future to the result of * the download. The future may throw a FileTransferError * exception. */ virtual void enqueueFileTransfer(const FileTransferRequest & request, Callback<FileTransferResult> callback) = 0; std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request); /** * Synchronously download a file. */ FileTransferResult download(const FileTransferRequest & request); /** * Synchronously upload a file. */ FileTransferResult upload(const FileTransferRequest & request); /** * Download a file, writing its data to a sink. The sink will be * invoked on the thread of the caller. */ void download( FileTransferRequest && request, Sink & sink, std::function<void(FileTransferResult)> resultCallback = {}); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; /** * @return a shared FileTransfer object. * * Using this object is preferred because it enables connection reuse * and HTTP/2 multiplexing. */ ref<FileTransfer> getFileTransfer(); /** * @return a new FileTransfer object * * Prefer getFileTransfer() to this; see its docs for why. */ ref<FileTransfer> makeFileTransfer(); class FileTransferError : public Error { public: FileTransfer::Error error; /// intentionally optional std::optional<std::string> response; template<typename... Args> FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args); }; }
4,844
C++
.h
148
27.405405
109
0.67403
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,086
common-ssh-store-config.hh
NixOS_nix/src/libstore/common-ssh-store-config.hh
#pragma once ///@file #include "store-api.hh" namespace nix { class SSHMaster; struct CommonSSHStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params); const Setting<Path> sshKey{this, "", "ssh-key", "Path to the SSH private key used to authenticate to the remote machine."}; const Setting<std::string> sshPublicHostKey{this, "", "base64-ssh-public-host-key", "The public host key of the remote machine."}; const Setting<bool> compress{this, false, "compress", "Whether to enable SSH compression."}; const Setting<std::string> remoteStore{this, "", "remote-store", R"( [Store URL](@docroot@/store/types/index.md#store-url-format) to be used on the remote machine. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly). )"}; /** * The `parseURL` function supports both IPv6 URIs as defined in * RFC2732, but also pure addresses. The latter one is needed here to * connect to a remote store via SSH (it's possible to do e.g. `ssh root@::1`). * * When initialized, the following adjustments are made: * * - If the URL looks like `root@[::1]` (which is allowed by the URL parser and probably * needed to pass further flags), it * will be transformed into `root@::1` for SSH (same for `[::1]` -> `::1`). * * - If the URL looks like `root@::1` it will be left as-is. * * - In any other case, the string will be left as-is. * * Will throw an error if `connStr` is empty too. */ std::string host; /** * Small wrapper around `SSHMaster::SSHMaster` that gets most * arguments from this configuration. * * See that constructor for details on the remaining two arguments. */ SSHMaster createSSHMaster( bool useMaster, Descriptor logFD = INVALID_DESCRIPTOR); }; }
2,021
C++
.h
50
34.56
96
0.653905
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,087
indirect-root-store.hh
NixOS_nix/src/libstore/indirect-root-store.hh
#pragma once ///@file #include "local-fs-store.hh" namespace nix { /** * Mix-in class for implementing permanent roots as a pair of a direct * (strong) reference and indirect weak reference to the first * reference. * * See methods for details on the operations it represents. * * @note * To understand the purpose of this class, it might help to do some * "closed-world" rather than "open-world" reasoning, and consider the * problem it solved for us. This class was factored out from * `LocalFSStore` in order to support the following table, which * contains 4 concrete store types (non-abstract classes, exposed to the * user), and how they implemented the two GC root methods: * * @note * | | `addPermRoot()` | `addIndirectRoot()` | * |-------------------|-----------------|---------------------| * | `LocalStore` | local | local | * | `UDSRemoteStore` | local | remote | * | `SSHStore` | doesn't have | doesn't have | * | `MountedSSHStore` | remote | doesn't have | * * @note * Note how only the local implementations of `addPermRoot()` need * `addIndirectRoot()`; that is what this class enforces. Without it, * and with `addPermRoot()` and `addIndirectRoot()` both `virtual`, we * would accidentally be allowing for a combinatorial explosion of * possible implementations many of which make no sense. Having this and * that invariant enforced cuts down that space. */ struct IndirectRootStore : public virtual LocalFSStore { inline static std::string operationName = "Indirect GC roots registration"; /** * Implementation of `LocalFSStore::addPermRoot` where the permanent * root is a pair of * * - The user-facing symlink which all implementations must create * * - An additional weak reference known as the "indirect root" that * points to that symlink. * * The garbage collector will automatically remove the indirect root * when it finds that the symlink has disappeared. * * The implementation of this method is concrete, but it delegates * to `addIndirectRoot()` which is abstract. */ Path addPermRoot(const StorePath & storePath, const Path & gcRoot) override final; /** * Add an indirect root, which is a weak reference to the * user-facing symlink created by `addPermRoot()`. * * @param path user-facing and user-controlled symlink to a store * path. * * The form this weak-reference takes is implementation-specific. */ virtual void addIndirectRoot(const Path & path) = 0; protected: void makeSymlink(const Path & link, const Path & target); }; }
2,741
C++
.h
68
36.794118
86
0.668417
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,088
nar-info-disk-cache.hh
NixOS_nix/src/libstore/nar-info-disk-cache.hh
#pragma once ///@file #include "ref.hh" #include "nar-info.hh" #include "realisation.hh" namespace nix { class NarInfoDiskCache { public: typedef enum { oValid, oInvalid, oUnknown } Outcome; virtual ~NarInfoDiskCache() { } virtual int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) = 0; struct CacheInfo { int id; bool wantMassQuery; int priority; }; virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0; virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo( const std::string & uri, const std::string & hashPart) = 0; virtual void upsertNarInfo( const std::string & uri, const std::string & hashPart, std::shared_ptr<const ValidPathInfo> info) = 0; virtual void upsertRealisation( const std::string & uri, const Realisation & realisation) = 0; virtual void upsertAbsentRealisation( const std::string & uri, const DrvOutput & id) = 0; virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation( const std::string & uri, const DrvOutput & id) = 0; }; /** * Return a singleton cache object that can be used concurrently by * multiple threads. */ ref<NarInfoDiskCache> getNarInfoDiskCache(); ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath); }
1,417
C++
.h
41
29.658537
86
0.694057
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,089
legacy-ssh-store.hh
NixOS_nix/src/libstore/legacy-ssh-store.hh
#pragma once ///@file #include "common-ssh-store-config.hh" #include "store-api.hh" #include "ssh.hh" #include "callback.hh" #include "pool.hh" namespace nix { struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig { using CommonSSHStoreConfig::CommonSSHStoreConfig; LegacySSHStoreConfig( std::string_view scheme, std::string_view authority, const Params & params); const Setting<Strings> remoteProgram{this, {"nix-store"}, "remote-program", "Path to the `nix-store` executable on the remote machine."}; const Setting<int> maxConnections{this, 1, "max-connections", "Maximum number of concurrent SSH connections."}; const std::string name() override { return "SSH Store"; } static std::set<std::string> uriSchemes() { return {"ssh"}; } std::string doc() override; }; struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store { #ifndef _WIN32 // Hack for getting remote build log output. // Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in // the documentation const Setting<int> logFD{this, INVALID_DESCRIPTOR, "log-fd", "file descriptor to which SSH's stderr is connected"}; #else Descriptor logFD = INVALID_DESCRIPTOR; #endif struct Connection; ref<Pool<Connection>> connections; SSHMaster master; LegacySSHStore( std::string_view scheme, std::string_view host, const Params & params); ref<Connection> openConnection(); std::string getUri() override; void queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override; void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; void narFromPath(const StorePath & path, Sink & sink) override; std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override { unsupported("queryPathFromHashPart"); } StorePath addToStore( std::string_view name, const SourcePath & path, ContentAddressMethod method, HashAlgorithm hashAlgo, const StorePathSet & references, PathFilter & filter, RepairFlag repair) override { unsupported("addToStore"); } virtual StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = StorePathSet(), RepairFlag repair = NoRepair) override { unsupported("addToStore"); } public: BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override; void ensurePath(const StorePath & path) override { unsupported("ensurePath"); } virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override { unsupported("getFSAccessor"); } /** * The default instance would schedule the work on the client side, but * for consistency with `buildPaths` and `buildDerivation` it should happen * on the remote side. * * We make this fail for now so we can add implement this properly later * without it being a breaking change. */ void repairPath(const StorePath & path) override { unsupported("repairPath"); } void computeFSClosure(const StorePathSet & paths, StorePathSet & out, bool flipDirection = false, bool includeOutputs = false, bool includeDerivers = false) override; StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override; void connect() override; unsigned int getProtocol() override; /** * The legacy ssh protocol doesn't support checking for trusted-user. * Try using ssh-ng:// instead if you want to know. */ std::optional<TrustedFlag> isTrustedClient() override { return std::nullopt; } void queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); } }; }
4,507
C++
.h
106
36.679245
127
0.718507
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,090
log-store.hh
NixOS_nix/src/libstore/log-store.hh
#pragma once ///@file #include "store-api.hh" namespace nix { struct LogStore : public virtual Store { inline static std::string operationName = "Build log storage and retrieval"; /** * Return the build log of the specified store path, if available, * or null otherwise. */ std::optional<std::string> getBuildLog(const StorePath & path); virtual std::optional<std::string> getBuildLogExact(const StorePath & path) = 0; virtual void addBuildLog(const StorePath & path, std::string_view log) = 0; static LogStore & require(Store & store); }; }
590
C++
.h
17
30.882353
84
0.710993
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,091
names.hh
NixOS_nix/src/libstore/names.hh
#pragma once ///@file #include <memory> #include "types.hh" namespace nix { struct Regex; struct DrvName { std::string fullName; std::string name; std::string version; unsigned int hits; DrvName(); DrvName(std::string_view s); ~DrvName(); bool matches(const DrvName & n); private: std::unique_ptr<Regex> regex; }; typedef std::list<DrvName> DrvNames; std::string_view nextComponent(std::string_view::const_iterator & p, const std::string_view::const_iterator end); std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2); DrvNames drvNamesFromArgs(const Strings & opArgs); }
661
C++
.h
25
23.4
91
0.7296
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,092
worker-protocol.hh
NixOS_nix/src/libstore/worker-protocol.hh
#pragma once ///@file #include <chrono> #include "common-protocol.hh" namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f /* Note: you generally shouldn't change the protocol version. Define a new `WorkerProto::Feature` instead. */ #define PROTOCOL_VERSION (1 << 8 | 38) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) #define STDERR_NEXT 0x6f6c6d67 #define STDERR_READ 0x64617461 // data needed from source #define STDERR_WRITE 0x64617416 // data for sink #define STDERR_LAST 0x616c7473 #define STDERR_ERROR 0x63787470 #define STDERR_START_ACTIVITY 0x53545254 #define STDERR_STOP_ACTIVITY 0x53544f50 #define STDERR_RESULT 0x52534c54 struct StoreDirConfig; struct Source; // items being serialised struct DerivedPath; struct BuildResult; struct KeyedBuildResult; struct ValidPathInfo; struct UnkeyedValidPathInfo; enum BuildMode : uint8_t; enum TrustedFlag : bool; /** * The "worker protocol", used by unix:// and ssh-ng:// stores. * * This `struct` is basically just a `namespace`; We use a type rather * than a namespace just so we can use it as a template argument. */ struct WorkerProto { /** * Enumeration of all the request types for the protocol. */ enum struct Op : uint64_t; /** * Version type for the protocol. * * @todo Convert to struct with separate major vs minor fields. */ using Version = unsigned int; /** * A unidirectional read connection, to be used by the read half of the * canonical serializers below. */ struct ReadConn { Source & from; Version version; }; /** * A unidirectional write connection, to be used by the write half of the * canonical serializers below. */ struct WriteConn { Sink & to; Version version; }; /** * Stripped down serialization logic suitable for sharing with Hydra. * * @todo remove once Hydra uses Store abstraction consistently. */ struct BasicConnection; struct BasicClientConnection; struct BasicServerConnection; /** * Extra information provided as part of protocol negotation. */ struct ClientHandshakeInfo; /** * Data type for canonical pairs of serialisers for the worker protocol. * * See https://en.cppreference.com/w/cpp/language/adl for the broader * concept of what is going on here. */ template<typename T> struct Serialise; // This is the definition of `Serialise` we *want* to put here, but // do not do so. // // The problem is that if we do so, C++ will think we have // seralisers for *all* types. We don't, of course, but that won't // cause an error until link time. That makes for long debug cycles // when there is a missing serialiser. // // By not defining it globally, and instead letting individual // serialisers specialise the type, we get back the compile-time // errors we would like. When no serialiser exists, C++ sees an // abstract "incomplete" type with no definition, and any attempt to // use `to` or `from` static methods is a compile-time error because // they don't exist on an incomplete type. // // This makes for a quicker debug cycle, as desired. #if 0 { static T read(const StoreDirConfig & store, ReadConn conn); static void write(const StoreDirConfig & store, WriteConn conn, const T & t); }; #endif /** * Wrapper function around `WorkerProto::Serialise<T>::write` that allows us to * infer the type instead of having to write it down explicitly. */ template<typename T> static void write(const StoreDirConfig & store, WriteConn conn, const T & t) { WorkerProto::Serialise<T>::write(store, conn, t); } using Feature = std::string; static const std::set<Feature> allFeatures; }; enum struct WorkerProto::Op : uint64_t { IsValidPath = 1, HasSubstitutes = 3, QueryPathHash = 4, // obsolete QueryReferences = 5, // obsolete QueryReferrers = 6, AddToStore = 7, AddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use WorkerProto::Op::AddToStore BuildPaths = 9, EnsurePath = 10, AddTempRoot = 11, AddIndirectRoot = 12, SyncWithGC = 13, FindRoots = 14, ExportPath = 16, // obsolete QueryDeriver = 18, // obsolete SetOptions = 19, CollectGarbage = 20, QuerySubstitutablePathInfo = 21, QueryDerivationOutputs = 22, // obsolete QueryAllValidPaths = 23, QueryFailedPaths = 24, ClearFailedPaths = 25, QueryPathInfo = 26, ImportPaths = 27, // obsolete QueryDerivationOutputNames = 28, // obsolete QueryPathFromHashPart = 29, QuerySubstitutablePathInfos = 30, QueryValidPaths = 31, QuerySubstitutablePaths = 32, QueryValidDerivers = 33, OptimiseStore = 34, VerifyStore = 35, BuildDerivation = 36, AddSignatures = 37, NarFromPath = 38, AddToStoreNar = 39, QueryMissing = 40, QueryDerivationOutputMap = 41, RegisterDrvOutput = 42, QueryRealisation = 43, AddMultipleToStore = 44, AddBuildLog = 45, BuildPathsWithResults = 46, AddPermRoot = 47, }; struct WorkerProto::ClientHandshakeInfo { /** * The version of the Nix daemon that is processing our requests. * * Do note, it may or may not communicating with another daemon, * rather than being an "end" `LocalStore` or similar. */ std::optional<std::string> daemonNixVersion; /** * Whether the remote side trusts us or not. * * 3 values: "yes", "no", or `std::nullopt` for "unknown". * * Note that the "remote side" might not be just the end daemon, but * also an intermediary forwarder that can make its own trusting * decisions. This would be the intersection of all their trust * decisions, since it takes only one link in the chain to start * denying operations. */ std::optional<TrustedFlag> remoteTrustsUs; bool operator == (const ClientHandshakeInfo &) const = default; }; /** * Convenience for sending operation codes. * * @todo Switch to using `WorkerProto::Serialise` instead probably. But * this was not done at this time so there would be less churn. */ inline Sink & operator << (Sink & sink, WorkerProto::Op op) { return sink << static_cast<uint64_t>(op); } /** * Convenience for debugging. * * @todo Perhaps render known opcodes more nicely. */ inline std::ostream & operator << (std::ostream & s, WorkerProto::Op op) { return s << static_cast<uint64_t>(op); } /** * Declare a canonical serialiser pair for the worker protocol. * * We specialise the struct merely to indicate that we are implementing * the function for the given type. * * Some sort of `template<...>` must be used with the caller for this to * be legal specialization syntax. See below for what that looks like in * practice. */ #define DECLARE_WORKER_SERIALISER(T) \ struct WorkerProto::Serialise< T > \ { \ static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn); \ static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t); \ }; template<> DECLARE_WORKER_SERIALISER(DerivedPath); template<> DECLARE_WORKER_SERIALISER(BuildResult); template<> DECLARE_WORKER_SERIALISER(KeyedBuildResult); template<> DECLARE_WORKER_SERIALISER(ValidPathInfo); template<> DECLARE_WORKER_SERIALISER(UnkeyedValidPathInfo); template<> DECLARE_WORKER_SERIALISER(BuildMode); template<> DECLARE_WORKER_SERIALISER(std::optional<TrustedFlag>); template<> DECLARE_WORKER_SERIALISER(std::optional<std::chrono::microseconds>); template<> DECLARE_WORKER_SERIALISER(WorkerProto::ClientHandshakeInfo); template<typename T> DECLARE_WORKER_SERIALISER(std::vector<T>); template<typename T> DECLARE_WORKER_SERIALISER(std::set<T>); template<typename... Ts> DECLARE_WORKER_SERIALISER(std::tuple<Ts...>); #define COMMA_ , template<typename K, typename V> DECLARE_WORKER_SERIALISER(std::map<K COMMA_ V>); #undef COMMA_ }
8,157
C++
.h
252
28.480159
100
0.706555
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,093
path-references.hh
NixOS_nix/src/libstore/path-references.hh
#pragma once ///@file #include "references.hh" #include "path.hh" namespace nix { std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs); StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs); class PathRefScanSink : public RefScanSink { std::map<std::string, StorePath> backMap; PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap); public: static PathRefScanSink fromPaths(const StorePathSet & refs); StorePathSet getResultPaths(); }; }
576
C++
.h
16
33.375
100
0.767273
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,094
derivations.hh
NixOS_nix/src/libstore/derivations.hh
#pragma once ///@file #include "path.hh" #include "types.hh" #include "hash.hh" #include "content-address.hh" #include "repair-flag.hh" #include "derived-path-map.hh" #include "sync.hh" #include "variant-wrapper.hh" #include <map> #include <variant> namespace nix { struct StoreDirConfig; /* Abstract syntax of derivations. */ /** * A single output of a BasicDerivation (and Derivation). */ struct DerivationOutput { /** * The traditional non-fixed-output derivation type. */ struct InputAddressed { StorePath path; bool operator == (const InputAddressed &) const = default; auto operator <=> (const InputAddressed &) const = default; }; /** * Fixed-output derivations, whose output paths are content * addressed according to that fixed output. */ struct CAFixed { /** * Method and hash used for expected hash computation. * * References are not allowed by fiat. */ ContentAddress ca; /** * Return the \ref StorePath "store path" corresponding to this output * * @param drvName The name of the derivation this is an output of, without the `.drv`. * @param outputName The name of this output. */ StorePath path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; bool operator == (const CAFixed &) const = default; auto operator <=> (const CAFixed &) const = default; }; /** * Floating-output derivations, whose output paths are content * addressed, but not fixed, and so are dynamically calculated from * whatever the output ends up being. * */ struct CAFloating { /** * How the file system objects will be serialized for hashing */ ContentAddressMethod method; /** * How the serialization will be hashed */ HashAlgorithm hashAlgo; bool operator == (const CAFloating &) const = default; auto operator <=> (const CAFloating &) const = default; }; /** * Input-addressed output which depends on a (CA) derivation whose hash * isn't known yet. */ struct Deferred { bool operator == (const Deferred &) const = default; auto operator <=> (const Deferred &) const = default; }; /** * Impure output which is moved to a content-addressed location (like * CAFloating) but isn't registered as a realization. */ struct Impure { /** * How the file system objects will be serialized for hashing */ ContentAddressMethod method; /** * How the serialization will be hashed */ HashAlgorithm hashAlgo; bool operator == (const Impure &) const = default; auto operator <=> (const Impure &) const = default; }; typedef std::variant< InputAddressed, CAFixed, CAFloating, Deferred, Impure > Raw; Raw raw; bool operator == (const DerivationOutput &) const = default; auto operator <=> (const DerivationOutput &) const = default; MAKE_WRAPPER_CONSTRUCTOR(DerivationOutput); /** * Force choosing a variant */ DerivationOutput() = delete; /** * \note when you use this function you should make sure that you're * passing the right derivation name. When in doubt, you should use * the safer interface provided by * BasicDerivation::outputsAndOptPaths */ std::optional<StorePath> path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; nlohmann::json toJSON( const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; /** * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DerivationOutput fromJSON( const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName, const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; typedef std::map<std::string, DerivationOutput> DerivationOutputs; /** * These are analogues to the previous DerivationOutputs data type, * but they also contains, for each output, the (optional) store * path in which it would be written. To calculate values of these * types, see the corresponding functions in BasicDerivation. */ typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>> DerivationOutputsAndOptPaths; /** * For inputs that are sub-derivations, we specify exactly which * output IDs we are interested in. */ typedef std::map<StorePath, StringSet> DerivationInputs; struct DerivationType { /** * Input-addressed derivation types */ struct InputAddressed { /** * True iff the derivation type can't be determined statically, * for instance because it (transitively) depends on a content-addressed * derivation. */ bool deferred; bool operator == (const InputAddressed &) const = default; auto operator <=> (const InputAddressed &) const = default; }; /** * Content-addressed derivation types */ struct ContentAddressed { /** * Whether the derivation should be built safely inside a sandbox. */ bool sandboxed; /** * Whether the derivation's outputs' content-addresses are "fixed" * or "floating". * * - Fixed: content-addresses are written down as part of the * derivation itself. If the outputs don't end up matching the * build fails. * * - Floating: content-addresses are not written down, we do not * know them until we perform the build. */ bool fixed; bool operator == (const ContentAddressed &) const = default; auto operator <=> (const ContentAddressed &) const = default; }; /** * Impure derivation type * * This is similar at buil-time to the content addressed, not standboxed, not fixed * type, but has some restrictions on its usage. */ struct Impure { bool operator == (const Impure &) const = default; auto operator <=> (const Impure &) const = default; }; typedef std::variant< InputAddressed, ContentAddressed, Impure > Raw; Raw raw; bool operator == (const DerivationType &) const = default; auto operator <=> (const DerivationType &) const = default; MAKE_WRAPPER_CONSTRUCTOR(DerivationType); /** * Force choosing a variant */ DerivationType() = delete; /** * Do the outputs of the derivation have paths calculated from their * content, or from the derivation itself? */ bool isCA() const; /** * Is the content of the outputs fixed <em>a priori</em> via a hash? * Never true for non-CA derivations. */ bool isFixed() const; /** * Whether the derivation is fully sandboxed. If false, the sandbox * is opened up, e.g. the derivation has access to the network. Note * that whether or not we actually sandbox the derivation is * controlled separately. Always true for non-CA derivations. */ bool isSandboxed() const; /** * Whether the derivation is expected to produce a different result * every time, and therefore it needs to be rebuilt every time. This is * only true for derivations that have the attribute '__impure = * true'. * * Non-impure derivations can still behave impurely, to the degree permitted * by the sandbox. Hence why this method isn't `isPure`: impure derivations * are not the negation of pure derivations. Purity can not be ascertained * except by rather heavy tools. */ bool isImpure() const; /** * Does the derivation knows its own output paths? * Only true when there's no floating-ca derivation involved in the * closure, or if fixed output. */ bool hasKnownOutputPaths() const; }; struct BasicDerivation { /** * keyed on symbolic IDs */ DerivationOutputs outputs; /** * inputs that are sources */ StorePathSet inputSrcs; std::string platform; Path builder; Strings args; StringPairs env; std::string name; BasicDerivation() = default; BasicDerivation(BasicDerivation &&) = default; BasicDerivation(const BasicDerivation &) = default; BasicDerivation& operator=(BasicDerivation &&) = default; BasicDerivation& operator=(const BasicDerivation &) = default; virtual ~BasicDerivation() { }; bool isBuiltin() const; /** * Return true iff this is a fixed-output derivation. */ DerivationType type() const; /** * Return the output names of a derivation. */ StringSet outputNames() const; /** * Calculates the maps that contains all the DerivationOutputs, but * augmented with knowledge of the Store paths they would be written * into. */ DerivationOutputsAndOptPaths outputsAndOptPaths(const StoreDirConfig & store) const; static std::string_view nameFromPath(const StorePath & storePath); bool operator == (const BasicDerivation &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=> (const BasicDerivation &) const = default; }; class Store; struct Derivation : BasicDerivation { /** * inputs that are sub-derivations */ DerivedPathMap<std::set<OutputName>> inputDrvs; /** * Print a derivation. */ std::string unparse(const StoreDirConfig & store, bool maskOutputs, DerivedPathMap<StringSet>::ChildNode::Map * actualInputs = nullptr) const; /** * Return the underlying basic derivation but with these changes: * * 1. Input drvs are emptied, but the outputs of them that were used * are added directly to input sources. * * 2. Input placeholders are replaced with realized input store * paths. */ std::optional<BasicDerivation> tryResolve(Store & store, Store * evalStore = nullptr) const; /** * Like the above, but instead of querying the Nix database for * realisations, uses a given mapping from input derivation paths + * output names to actual output store paths. */ std::optional<BasicDerivation> tryResolve( Store & store, const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const; /** * Check that the derivation is valid and does not present any * illegal states. * * This is mainly a matter of checking the outputs, where our C++ * representation supports all sorts of combinations we do not yet * allow. */ void checkInvariants(Store & store, const StorePath & drvPath) const; Derivation() = default; Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { } Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { } nlohmann::json toJSON(const StoreDirConfig & store) const; static Derivation fromJSON( const StoreDirConfig & store, const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); bool operator == (const Derivation &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=> (const Derivation &) const = default; }; class Store; /** * Write a derivation to the Nix store, and return its path. */ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false); /** * Read a derivation from a file. */ Derivation parseDerivation( const StoreDirConfig & store, std::string && s, std::string_view name, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * \todo Remove. * * Use Path::isDerivation instead. */ bool isDerivation(std::string_view fileName); /** * Calculate the name that will be used for the store path for this * output. * * This is usually <drv-name>-<output-name>, but is just <drv-name> when * the output name is "out". */ std::string outputPathName(std::string_view drvName, OutputNameView outputName); /** * The hashes modulo of a derivation. * * Each output is given a hash, although in practice only the content-addressed * derivations (fixed-output or not) will have a different hash for each * output. */ struct DrvHash { /** * Map from output names to hashes */ std::map<std::string, Hash> hashes; enum struct Kind : bool { /** * Statically determined derivations. * This hash will be directly used to compute the output paths */ Regular, /** * Floating-output derivations (and their reverse dependencies). */ Deferred, }; /** * The kind of derivation this is, simplified for just "derivation hash * modulo" purposes. */ Kind kind; }; void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept; /** * Returns hashes with the details of fixed-output subderivations * expunged. * * A fixed-output derivation is a derivation whose outputs have a * specified content hash and hash algorithm. (Currently they must have * exactly one output (`out`), which is specified using the `outputHash` * and `outputHashAlgo` attributes, but the algorithm doesn't assume * this.) We don't want changes to such derivations to propagate upwards * through the dependency graph, changing output paths everywhere. * * For instance, if we change the url in a call to the `fetchurl` * function, we do not want to rebuild everything depending on it---after * all, (the hash of) the file being downloaded is unchanged. So the * *output paths* should not change. On the other hand, the *derivation * paths* should change to reflect the new dependency graph. * * For fixed-output derivations, this returns a map from the name of * each output to its hash, unique up to the output's contents. * * For regular derivations, it returns a single hash of the derivation * ATerm, after subderivations have been likewise expunged from that * derivation. */ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs); /** * Return a map associating each output to a hash that uniquely identifies its * derivation (modulo the self-references). * * \todo What is the Hash in this map? */ std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv); /** * Memoisation of hashDerivationModulo(). */ typedef std::map<StorePath, DrvHash> DrvHashes; // FIXME: global, though at least thread-safe. extern Sync<DrvHashes> drvHashes; struct Source; struct Sink; Source & readDerivation(Source & in, const StoreDirConfig & store, BasicDerivation & drv, std::string_view name); void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDerivation & drv); /** * This creates an opaque and almost certainly unique string * deterministically from the output name. * * It is used as a placeholder to allow derivations to refer to their * own outputs without needing to use the hash of a derivation in * itself, making the hash near-impossible to calculate. */ std::string hashPlaceholder(const OutputNameView outputName); extern const Hash impureOutputHash; }
15,789
C++
.h
445
30.116854
123
0.678394
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,095
pathlocks.hh
NixOS_nix/src/libstore/pathlocks.hh
#pragma once ///@file #include "file-descriptor.hh" namespace nix { /** * Open (possibly create) a lock file and return the file descriptor. * -1 is returned if create is false and the lock could not be opened * because it doesn't exist. Any other error throws an exception. */ AutoCloseFD openLockFile(const Path & path, bool create); /** * Delete an open lock file. */ void deleteLockFile(const Path & path, Descriptor desc); enum LockType { ltRead, ltWrite, ltNone }; bool lockFile(Descriptor desc, LockType lockType, bool wait); class PathLocks { private: typedef std::pair<Descriptor, Path> FDPair; std::list<FDPair> fds; bool deletePaths; public: PathLocks(); PathLocks(const PathSet & paths, const std::string & waitMsg = ""); bool lockPaths(const PathSet & _paths, const std::string & waitMsg = "", bool wait = true); ~PathLocks(); void unlock(); void setDeletion(bool deletePaths); }; struct FdLock { Descriptor desc; bool acquired = false; FdLock(Descriptor desc, LockType lockType, bool wait, std::string_view waitMsg); ~FdLock() { if (acquired) lockFile(desc, ltNone, false); } }; }
1,218
C++
.h
45
23.355556
84
0.690784
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,096
make-content-addressed.hh
NixOS_nix/src/libstore/make-content-addressed.hh
#pragma once ///@file #include "store-api.hh" namespace nix { /** Rewrite a closure of store paths to be completely content addressed. */ std::map<StorePath, StorePath> makeContentAddressed( Store & srcStore, Store & dstStore, const StorePathSet & rootPaths); /** Rewrite a closure of a store path to be completely content addressed. * * This is a convenience function for the case where you only have one root path. */ StorePath makeContentAddressed( Store & srcStore, Store & dstStore, const StorePath & rootPath); }
552
C++
.h
19
26.315789
81
0.744318
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,097
gc-store.hh
NixOS_nix/src/libstore/gc-store.hh
#pragma once ///@file #include <unordered_set> #include "store-api.hh" namespace nix { typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots; struct GCOptions { /** * Garbage collector operation: * * - `gcReturnLive`: return the set of paths reachable from * (i.e. in the closure of) the roots. * * - `gcReturnDead`: return the set of paths not reachable from * the roots. * * - `gcDeleteDead`: actually delete the latter set. * * - `gcDeleteSpecific`: delete the paths listed in * `pathsToDelete`, insofar as they are not reachable. */ typedef enum { gcReturnLive, gcReturnDead, gcDeleteDead, gcDeleteSpecific, } GCAction; GCAction action{gcDeleteDead}; /** * If `ignoreLiveness` is set, then reachability from the roots is * ignored (dangerous!). However, the paths must still be * unreferenced *within* the store (i.e., there can be no other * store paths that depend on them). */ bool ignoreLiveness{false}; /** * For `gcDeleteSpecific`, the paths to delete. */ StorePathSet pathsToDelete; /** * Stop after at least `maxFreed` bytes have been freed. */ uint64_t maxFreed{std::numeric_limits<uint64_t>::max()}; }; struct GCResults { /** * Depending on the action, the GC roots, or the paths that would * be or have been deleted. */ PathSet paths; /** * For `gcReturnDead`, `gcDeleteDead` and `gcDeleteSpecific`, the * number of bytes that would be or was freed. */ uint64_t bytesFreed = 0; }; /** * Mix-in class for \ref Store "stores" which expose a notion of garbage * collection. * * Garbage collection will allow deleting paths which are not * transitively "rooted". * * The notion of GC roots actually not part of this class. * * - The base `Store` class has `Store::addTempRoot()` because for a store * that doesn't support garbage collection at all, a temporary GC root is * safely implementable as no-op. * * @todo actually this is not so good because stores are *views*. * Some views have only a no-op temp roots even though others to the * same store allow triggering GC. For instance one can't add a root * over ssh, but that doesn't prevent someone from gc-ing that store * accesed via SSH locally). * * - The derived `LocalFSStore` class has `LocalFSStore::addPermRoot`, * which is not part of this class because it relies on the notion of * an ambient file system. There are stores (`ssh-ng://`, for one), * that *do* support garbage collection but *don't* expose any file * system, and `LocalFSStore::addPermRoot` thus does not make sense * for them. */ struct GcStore : public virtual Store { inline static std::string operationName = "Garbage collection"; /** * Find the roots of the garbage collector. Each root is a pair * `(link, storepath)` where `link` is the path of the symlink * outside of the Nix store that point to `storePath`. If * `censor` is true, privacy-sensitive information about roots * found in `/proc` is censored. */ virtual Roots findRoots(bool censor) = 0; /** * Perform a garbage collection. */ virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0; }; }
3,428
C++
.h
101
29.653465
84
0.672007
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,098
local-fs-store.hh
NixOS_nix/src/libstore/local-fs-store.hh
#pragma once ///@file #include "store-api.hh" #include "gc-store.hh" #include "log-store.hh" namespace nix { struct LocalFSStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; /** * Used to override the `root` settings. Can't be done via modifying * `params` reliably because this parameter is unused except for * passing to base class constructors. * * @todo Make this less error-prone with new store settings system. */ LocalFSStoreConfig(PathView path, const Params & params); const OptionalPathSetting rootDir{this, std::nullopt, "root", "Directory prefixed to all other paths."}; const PathSetting stateDir{this, rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir, "state", "Directory where Nix will store state."}; const PathSetting logDir{this, rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir, "log", "directory where Nix will store log files."}; const PathSetting realStoreDir{this, rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real", "Physical path of the Nix store."}; }; class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store, public virtual GcStore, public virtual LogStore { public: inline static std::string operationName = "Local Filesystem Store"; const static std::string drvsLogDir; LocalFSStore(const Params & params); void narFromPath(const StorePath & path, Sink & sink) override; ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override; /** * Creates symlink from the `gcRoot` to the `storePath` and * registers the `gcRoot` as a permanent GC root. The `gcRoot` * symlink lives outside the store and is created and owned by the * user. * * @param gcRoot The location of the symlink. * * @param storePath The store object being rooted. The symlink will * point to `toRealPath(store.printStorePath(storePath))`. * * How the permanent GC root corresponding to this symlink is * managed is implementation-specific. */ virtual Path addPermRoot(const StorePath & storePath, const Path & gcRoot) = 0; virtual Path getRealStoreDir() { return realStoreDir; } Path toRealPath(const Path & storePath) override { assert(isInStore(storePath)); return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1); } std::optional<std::string> getBuildLogExact(const StorePath & path) override; }; }
2,626
C++
.h
67
33.761194
85
0.690673
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,099
local-binary-cache-store.hh
NixOS_nix/src/libstore/local-binary-cache-store.hh
#include "binary-cache-store.hh" namespace nix { struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig { using BinaryCacheStoreConfig::BinaryCacheStoreConfig; LocalBinaryCacheStoreConfig(std::string_view scheme, PathView binaryCacheDir, const Params & params); Path binaryCacheDir; const std::string name() override { return "Local Binary Cache Store"; } static std::set<std::string> uriSchemes(); std::string doc() override; }; }
493
C++
.h
15
28.666667
105
0.755319
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,100
machines.hh
NixOS_nix/src/libstore/machines.hh
#pragma once ///@file #include "ref.hh" #include "store-reference.hh" namespace nix { class Store; struct Machine; typedef std::vector<Machine> Machines; struct Machine { const StoreReference storeUri; const std::set<std::string> systemTypes; const std::string sshKey; const unsigned int maxJobs; const float speedFactor; const std::set<std::string> supportedFeatures; const std::set<std::string> mandatoryFeatures; const std::string sshPublicHostKey; bool enabled = true; /** * @return Whether `system` is either `"builtin"` or in * `systemTypes`. */ bool systemSupported(const std::string & system) const; /** * @return Whether `features` is a subset of the union of `supportedFeatures` and * `mandatoryFeatures`. */ bool allSupported(const std::set<std::string> & features) const; /** * @return Whether `mandatoryFeatures` is a subset of `features`. */ bool mandatoryMet(const std::set<std::string> & features) const; Machine( const std::string & storeUri, decltype(systemTypes) systemTypes, decltype(sshKey) sshKey, decltype(maxJobs) maxJobs, decltype(speedFactor) speedFactor, decltype(supportedFeatures) supportedFeatures, decltype(mandatoryFeatures) mandatoryFeatures, decltype(sshPublicHostKey) sshPublicHostKey); /** * Elaborate `storeUri` into a complete store reference, * incorporating information from the other fields of the `Machine` * as applicable. */ StoreReference completeStoreReference() const; /** * Open a `Store` for this machine. * * Just a simple function composition: * ```c++ * nix::openStore(completeStoreReference()) * ``` */ ref<Store> openStore() const; /** * Parse a machine configuration. * * Every machine is specified on its own line, and lines beginning * with `@` are interpreted as paths to other configuration files in * the same format. */ static Machines parseConfig(const std::set<std::string> & defaultSystems, const std::string & config); }; /** * Parse machines from the global config * * @todo Remove, globals are bad. */ Machines getMachines(); }
2,292
C++
.h
72
26.708333
106
0.681034
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,101
common-protocol.hh
NixOS_nix/src/libstore/common-protocol.hh
#pragma once ///@file #include "serialise.hh" namespace nix { struct StoreDirConfig; struct Source; // items being serialized class StorePath; struct ContentAddress; struct DrvOutput; struct Realisation; /** * Shared serializers between the worker protocol, serve protocol, and a * few others. * * This `struct` is basically just a `namespace`; We use a type rather * than a namespace just so we can use it as a template argument. */ struct CommonProto { /** * A unidirectional read connection, to be used by the read half of the * canonical serializers below. */ struct ReadConn { Source & from; }; /** * A unidirectional write connection, to be used by the write half of the * canonical serializers below. */ struct WriteConn { Sink & to; }; template<typename T> struct Serialise; /** * Wrapper function around `CommonProto::Serialise<T>::write` that allows us to * infer the type instead of having to write it down explicitly. */ template<typename T> static void write(const StoreDirConfig & store, WriteConn conn, const T & t) { CommonProto::Serialise<T>::write(store, conn, t); } }; #define DECLARE_COMMON_SERIALISER(T) \ struct CommonProto::Serialise< T > \ { \ static T read(const StoreDirConfig & store, CommonProto::ReadConn conn); \ static void write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & str); \ } template<> DECLARE_COMMON_SERIALISER(std::string); template<> DECLARE_COMMON_SERIALISER(StorePath); template<> DECLARE_COMMON_SERIALISER(ContentAddress); template<> DECLARE_COMMON_SERIALISER(DrvOutput); template<> DECLARE_COMMON_SERIALISER(Realisation); template<typename T> DECLARE_COMMON_SERIALISER(std::vector<T>); template<typename T> DECLARE_COMMON_SERIALISER(std::set<T>); template<typename... Ts> DECLARE_COMMON_SERIALISER(std::tuple<Ts...>); #define COMMA_ , template<typename K, typename V> DECLARE_COMMON_SERIALISER(std::map<K COMMA_ V>); #undef COMMA_ /** * These use the empty string for the null case, relying on the fact * that the underlying types never serialize to the empty string. * * We do this instead of a generic std::optional<T> instance because * ordinal tags (0 or 1, here) are a bit of a compatability hazard. For * the same reason, we don't have a std::variant<T..> instances (ordinal * tags 0...n). * * We could the generic instances and then these as specializations for * compatability, but that's proven a bit finnicky, and also makes the * worker protocol harder to implement in other languages where such * specializations may not be allowed. */ template<> DECLARE_COMMON_SERIALISER(std::optional<StorePath>); template<> DECLARE_COMMON_SERIALISER(std::optional<ContentAddress>); }
2,838
C++
.h
91
28.175824
102
0.732796
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,102
worker-protocol-connection.hh
NixOS_nix/src/libstore/worker-protocol-connection.hh
#pragma once ///@file #include "worker-protocol.hh" #include "store-api.hh" namespace nix { struct WorkerProto::BasicConnection { /** * Send with this. */ FdSink to; /** * Receive with this. */ FdSource from; /** * The protocol version agreed by both sides. */ WorkerProto::Version protoVersion; /** * The set of features that both sides support. */ std::set<Feature> features; /** * Coercion to `WorkerProto::ReadConn`. This makes it easy to use the * factored out serve protocol serializers with a * `LegacySSHStore::Connection`. * * The serve protocol connection types are unidirectional, unlike * this type. */ operator WorkerProto::ReadConn() { return WorkerProto::ReadConn{ .from = from, .version = protoVersion, }; } /** * Coercion to `WorkerProto::WriteConn`. This makes it easy to use the * factored out serve protocol serializers with a * `LegacySSHStore::Connection`. * * The serve protocol connection types are unidirectional, unlike * this type. */ operator WorkerProto::WriteConn() { return WorkerProto::WriteConn{ .to = to, .version = protoVersion, }; } }; struct WorkerProto::BasicClientConnection : WorkerProto::BasicConnection { /** * Flush to direction */ virtual ~BasicClientConnection(); virtual void closeWrite() = 0; std::exception_ptr processStderrReturn(Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true); void processStderr(bool * daemonException, Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true); /** * Establishes connection, negotiating version. * * @return The minimum version supported by both sides and the set * of protocol features supported by both sides. * * @param to Taken by reference to allow for various error handling * mechanisms. * * @param from Taken by reference to allow for various error * handling mechanisms. * * @param localVersion Our version which is sent over. * * @param supportedFeatures The protocol features that we support. */ // FIXME: this should probably be a constructor. static std::tuple<Version, std::set<Feature>> handshake( BufferedSink & to, Source & from, WorkerProto::Version localVersion, const std::set<Feature> & supportedFeatures); /** * After calling handshake, must call this to exchange some basic * information abou the connection. */ ClientHandshakeInfo postHandshake(const StoreDirConfig & store); void addTempRoot(const StoreDirConfig & remoteStore, bool * daemonException, const StorePath & path); StorePathSet queryValidPaths( const StoreDirConfig & remoteStore, bool * daemonException, const StorePathSet & paths, SubstituteFlag maybeSubstitute); UnkeyedValidPathInfo queryPathInfo(const StoreDirConfig & store, bool * daemonException, const StorePath & path); void putBuildDerivationRequest( const StoreDirConfig & store, bool * daemonException, const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode); /** * Get the response, must be paired with * `putBuildDerivationRequest`. */ BuildResult getBuildDerivationResponse(const StoreDirConfig & store, bool * daemonException); void narFromPath( const StoreDirConfig & store, bool * daemonException, const StorePath & path, std::function<void(Source &)> fun); void importPaths(const StoreDirConfig & store, bool * daemonException, Source & source); }; struct WorkerProto::BasicServerConnection : WorkerProto::BasicConnection { /** * Establishes connection, negotiating version. * * @return The version provided by the other side of the * connection. * * @param to Taken by reference to allow for various error handling * mechanisms. * * @param from Taken by reference to allow for various error * handling mechanisms. * * @param localVersion Our version which is sent over. * * @param supportedFeatures The protocol features that we support. */ // FIXME: this should probably be a constructor. static std::tuple<Version, std::set<Feature>> handshake( BufferedSink & to, Source & from, WorkerProto::Version localVersion, const std::set<Feature> & supportedFeatures); /** * After calling handshake, must call this to exchange some basic * information abou the connection. */ void postHandshake(const StoreDirConfig & store, const ClientHandshakeInfo & info); }; }
4,915
C++
.h
147
27.353741
119
0.669056
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,103
remote-store.hh
NixOS_nix/src/libstore/remote-store.hh
#pragma once ///@file #include <limits> #include <string> #include "store-api.hh" #include "gc-store.hh" #include "log-store.hh" namespace nix { class Pipe; class Pid; struct FdSink; struct FdSource; template<typename T> class Pool; struct RemoteStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; const Setting<int> maxConnections{this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; const Setting<unsigned int> maxConnectionAge{this, std::numeric_limits<unsigned int>::max(), "max-connection-age", "Maximum age of a connection before it is closed."}; }; /** * \todo RemoteStore is a misnomer - should be something like * DaemonStore. */ class RemoteStore : public virtual RemoteStoreConfig, public virtual Store, public virtual GcStore, public virtual LogStore { public: RemoteStore(const Params & params); /* Implementations of abstract store API methods. */ bool isValidPathUncached(const StorePath & path) override; StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override; StorePathSet queryAllValidPaths() override; void queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override; void queryReferrers(const StorePath & path, StorePathSet & referrers) override; StorePathSet queryValidDerivers(const StorePath & path) override; StorePathSet queryDerivationOutputs(const StorePath & path) override; std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path, Store * evalStore = nullptr) override; std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override; StorePathSet querySubstitutablePaths(const StorePathSet & paths) override; void querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos) override; /** * Add a content-addressable store path. `dump` will be drained. */ ref<const ValidPathInfo> addCAToStore( Source & dump, std::string_view name, ContentAddressMethod caMethod, HashAlgorithm hashAlgo, const StorePathSet & references, RepairFlag repair); /** * Add a content-addressable store path. `dump` will be drained. */ StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = StorePathSet(), RepairFlag repair = NoRepair) override; void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs) override; void addMultipleToStore( Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; void addMultipleToStore( PathsSource & pathsToCopy, Activity & act, RepairFlag repair, CheckSigsFlag checkSigs) override; void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override; void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override; std::vector<KeyedBuildResult> buildPathsWithResults( const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override; BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; void ensurePath(const StorePath & path) override; void addTempRoot(const StorePath & path) override; Roots findRoots(bool censor) override; void collectGarbage(const GCOptions & options, GCResults & results) override; void optimiseStore() override; bool verifyStore(bool checkContents, RepairFlag repair) override; /** * The default instance would schedule the work on the client side, but * for consistency with `buildPaths` and `buildDerivation` it should happen * on the remote side. * * We make this fail for now so we can add implement this properly later * without it being a breaking change. */ void repairPath(const StorePath & path) override { unsupported("repairPath"); } void addSignatures(const StorePath & storePath, const StringSet & sigs) override; void queryMissing(const std::vector<DerivedPath> & targets, StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown, uint64_t & downloadSize, uint64_t & narSize) override; void addBuildLog(const StorePath & drvPath, std::string_view log) override; std::optional<std::string> getVersion() override; void connect() override; unsigned int getProtocol() override; std::optional<TrustedFlag> isTrustedClient() override; void flushBadConnections(); struct Connection; ref<Connection> openConnectionWrapper(); protected: virtual ref<Connection> openConnection() = 0; void initConnection(Connection & conn); ref<Pool<Connection>> connections; virtual void setOptions(Connection & conn); void setOptions() override; struct ConnectionHandle; ConnectionHandle getConnection(); friend struct ConnectionHandle; virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override; virtual void narFromPath(const StorePath & path, Sink & sink) override; private: std::atomic_bool failed{false}; void copyDrvsFromEvalStore( const std::vector<DerivedPath> & paths, std::shared_ptr<Store> evalStore); }; }
6,046
C++
.h
137
38.094891
146
0.733961
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,104
globals.hh
NixOS_nix/src/libstore/globals.hh
#pragma once ///@file #include "types.hh" #include "config.hh" #include "environment-variables.hh" #include "experimental-features.hh" #include "users.hh" #include <map> #include <limits> #include <sys/types.h> namespace nix { typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode; struct MaxBuildJobsSetting : public BaseSetting<unsigned int> { MaxBuildJobsSetting(Config * options, unsigned int def, const std::string & name, const std::string & description, const std::set<std::string> & aliases = {}) : BaseSetting<unsigned int>(def, true, name, description, aliases) { options->addSetting(this); } unsigned int parse(const std::string & str) const override; }; const uint32_t maxIdsPerBuild = #if __linux__ 1 << 16 #else 1 #endif ; class Settings : public Config { unsigned int getDefaultCores(); StringSet getDefaultSystemFeatures(); StringSet getDefaultExtraPlatforms(); bool isWSL1(); Path getDefaultSSLCertFile(); public: Settings(); Path nixPrefix; /** * The directory where we store sources and derived files. */ Path nixStore; Path nixDataDir; /* !!! fix */ /** * The directory where we log various operations. */ Path nixLogDir; /** * The directory where state is stored. */ Path nixStateDir; /** * The directory where system configuration files are stored. */ Path nixConfDir; /** * A list of user configuration files to load. */ std::vector<Path> nixUserConfFiles; /** * The directory where the man pages are stored. */ Path nixManDir; /** * File name of the socket the daemon listens to. */ Path nixDaemonSocketFile; Setting<std::string> storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store", R"( The [URL of the Nix store](@docroot@/store/types/index.md#store-url-format) to use for most operations. See the [Store Types](@docroot@/store/types/index.md) section of the manual for supported store types and settings. )"}; Setting<bool> keepFailed{this, false, "keep-failed", "Whether to keep temporary directories of failed builds."}; Setting<bool> keepGoing{this, false, "keep-going", "Whether to keep building derivations when another build fails."}; Setting<bool> tryFallback{ this, false, "fallback", R"( If set to `true`, Nix will fall back to building from source if a binary substitute fails. This is equivalent to the `--fallback` flag. The default is `false`. )", {"build-fallback"}}; /** * Whether to show build log output in real time. */ bool verboseBuild = true; Setting<size_t> logLines{this, 25, "log-lines", "The number of lines of the tail of " "the log to show if a build fails."}; MaxBuildJobsSetting maxBuildJobs{ this, 1, "max-jobs", R"( Maximum number of jobs that Nix will try to build locally in parallel. The special value `auto` causes Nix to use the number of CPUs in your system. Use `0` to disable local builds and directly use the remote machines specified in [`builders`](#conf-builders). This will not affect derivations that have [`preferLocalBuild = true`](@docroot@/language/advanced-attributes.md#adv-attr-preferLocalBuild), which are always built locally. > **Note** > > The number of CPU cores to use for each build job is independently determined by the [`cores`](#conf-cores) setting. <!-- TODO(@fricklerhandwerk): would be good to have those shorthands for common options as part of the specification --> The setting can be overridden using the `--max-jobs` (`-j`) command line switch. )", {"build-max-jobs"}}; Setting<unsigned int> maxSubstitutionJobs{ this, 16, "max-substitution-jobs", R"( This option defines the maximum number of substitution jobs that Nix will try to run in parallel. The default is `16`. The minimum value one can choose is `1` and lower values will be interpreted as `1`. )", {"substitution-max-jobs"}}; Setting<unsigned int> buildCores{ this, getDefaultCores(), "cores", R"( Sets the value of the `NIX_BUILD_CORES` environment variable in the [invocation of the `builder` executable](@docroot@/language/derivations.md#builder-execution) of a derivation. The `builder` executable can use this variable to control its own maximum amount of parallelism. <!-- FIXME(@fricklerhandwerk): I don't think this should even be mentioned here. A very generic example using `derivation` and `xargs` may be more appropriate to explain the mechanism. Using `mkDerivation` as an example requires being aware of that there are multiple independent layers that are completely opaque here. --> For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it will pass the `-j${NIX_BUILD_CORES}` flag to GNU Make. The value `0` means that the `builder` should use all available CPU cores in the system. > **Note** > > The number of parallel local Nix build jobs is independently controlled with the [`max-jobs`](#conf-max-jobs) setting. )", {"build-cores"}, // Don't document the machine-specific default value false}; /** * Read-only mode. Don't copy stuff to the store, don't change * the database. */ bool readOnlyMode = false; Setting<std::string> thisSystem{ this, SYSTEM, "system", R"( The system type of the current Nix installation. Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms). The default value is set when Nix itself is compiled for the system it will run on. The following system types are widely used, as Nix is actively supported on these platforms: - `x86_64-linux` - `x86_64-darwin` - `i686-linux` - `aarch64-linux` - `aarch64-darwin` - `armv6l-linux` - `armv7l-linux` In general, you do not have to modify this setting. While you can force Nix to run a Darwin-specific `builder` executable on a Linux machine, the result would obviously be wrong. This value is available in the Nix language as [`builtins.currentSystem`](@docroot@/language/builtins.md#builtins-currentSystem) if the [`eval-system`](#conf-eval-system) configuration option is set as the empty string. )"}; Setting<time_t> maxSilentTime{ this, 0, "max-silent-time", R"( This option defines the maximum number of seconds that a builder can go without producing any data on standard output or standard error. This is useful (for instance in an automated build system) to catch builds that are stuck in an infinite loop, or to catch remote builds that are hanging due to network problems. It can be overridden using the `--max-silent-time` command line switch. The value `0` means that there is no timeout. This is also the default. )", {"build-max-silent-time"}}; Setting<time_t> buildTimeout{ this, 0, "timeout", R"( This option defines the maximum number of seconds that a builder can run. This is useful (for instance in an automated build system) to catch builds that are stuck in an infinite loop but keep writing to their standard output or standard error. It can be overridden using the `--timeout` command line switch. The value `0` means that there is no timeout. This is also the default. )", {"build-timeout"}}; Setting<Strings> buildHook{this, {"nix", "__build-remote"}, "build-hook", R"( The path to the helper program that executes remote builds. Nix communicates with the build hook over `stdio` using a custom protocol to request builds that cannot be performed directly by the Nix daemon. The default value is the internal Nix binary that implements remote building. > **Important** > > Change this setting only if you really know what you’re doing. )"}; Setting<std::string> builders{ this, "@" + nixConfDir + "/machines", "builders", R"( A semicolon- or newline-separated list of build machines. In addition to the [usual ways of setting configuration options](@docroot@/command-ref/conf-file.md), the value can be read from a file by prefixing its absolute path with `@`. > **Example** > > This is the default setting: > > ``` > builders = @/etc/nix/machines > ``` Each machine specification consists of the following elements, separated by spaces. Only the first element is required. To leave a field at its default, set it to `-`. 1. The URI of the remote store in the format `ssh://[username@]hostname`. > **Example** > > `ssh://nix@mac` For backward compatibility, `ssh://` may be omitted. The hostname may be an alias defined in `~/.ssh/config`. 2. A comma-separated list of [Nix system types](@docroot@/development/building.md#system-type). If omitted, this defaults to the local platform type. > **Example** > > `aarch64-darwin` It is possible for a machine to support multiple platform types. > **Example** > > `i686-linux,x86_64-linux` 3. The SSH identity file to be used to log in to the remote machine. If omitted, SSH will use its regular identities. > **Example** > > `/home/user/.ssh/id_mac` 4. The maximum number of builds that Nix will execute in parallel on the machine. Typically this should be equal to the number of CPU cores. 5. The “speed factor”, indicating the relative speed of the machine as a positive integer. If there are multiple machines of the right type, Nix will prefer the fastest, taking load into account. 6. A comma-separated list of supported [system features](#conf-system-features). A machine will only be used to build a derivation if all the features in the derivation's [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute are supported by that machine. 7. A comma-separated list of required [system features](#conf-system-features). A machine will only be used to build a derivation if all of the machine’s required features appear in the derivation’s [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute. 8. The (base64-encoded) public host key of the remote machine. If omitted, SSH will use its regular `known_hosts` file. The value for this field can be obtained via `base64 -w0`. > **Example** > > Multiple builders specified on the command line: > > ```console > --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' > ``` > **Example** > > This specifies several machines that can perform `i686-linux` builds: > > ``` > nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 1 kvm > nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 2 > nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 1 2 kvm benchmark > ``` > > However, `poochie` will only build derivations that have the attribute > > ```nix > requiredSystemFeatures = [ "benchmark" ]; > ``` > > or > > ```nix > requiredSystemFeatures = [ "benchmark" "kvm" ]; > ``` > > `itchy` cannot do builds that require `kvm`, but `scratchy` does support such builds. > For regular builds, `itchy` will be preferred over `scratchy` because it has a higher speed factor. For Nix to use substituters, the calling user must be in the [`trusted-users`](#conf-trusted-users) list. > **Note** > > A build machine must be accessible via SSH and have Nix installed. > `nix` must be available in `$PATH` for the user connecting over SSH. > **Warning** > > If you are building via the Nix daemon (default), the Nix daemon user account on the local machine (that is, `root`) requires access to a user account on the remote machine (not necessarily `root`). > > If you can’t or don’t want to configure `root` to be able to access the remote machine, set [`store`](#conf-store) to any [local store](@docroot@/store/types/local-store.html), e.g. by passing `--store /tmp` to the command on the local machine. To build only on remote machines and disable local builds, set [`max-jobs`](#conf-max-jobs) to 0. If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substituters) to `true`. )", {}, false}; Setting<bool> alwaysAllowSubstitutes{ this, false, "always-allow-substitutes", R"( If set to `true`, Nix will ignore the [`allowSubstitutes`](@docroot@/language/advanced-attributes.md) attribute in derivations and always attempt to use [available substituters](#conf-substituters). )"}; Setting<bool> buildersUseSubstitutes{ this, false, "builders-use-substitutes", R"( If set to `true`, Nix will instruct [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available. It means that remote build hosts will fetch as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all. This can drastically reduce build times if the network connection between the local machine and the remote build host is slow. )"}; Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space", "Amount of reserved disk space for the garbage collector."}; Setting<bool> fsyncMetadata{ this, true, "fsync-metadata", R"( If set to `true`, changes to the Nix store metadata (in `/nix/var/nix/db`) are synchronously flushed to disk. This improves robustness in case of system crashes, but reduces performance. The default is `true`. )"}; Setting<bool> fsyncStorePaths{this, false, "fsync-store-paths", R"( Whether to call `fsync()` on store paths before registering them, to flush them to disk. This improves robustness in case of system crashes, but reduces performance. The default is `false`. )"}; Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal", "Whether SQLite should use WAL mode."}; #ifndef _WIN32 // FIXME: remove this option, `fsync-store-paths` is faster. Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering", "Whether to call `sync()` before registering a path as valid."}; #endif Setting<bool> useSubstitutes{ this, true, "substitute", R"( If set to `true` (default), Nix will use binary substitutes if available. This option can be disabled to force building from source. )", {"build-use-substitutes"}}; Setting<std::string> buildUsersGroup{ this, "", "build-users-group", R"( This options specifies the Unix group containing the Nix build user accounts. In multi-user Nix installations, builds should not be performed by the Nix account since that would allow users to arbitrarily modify the Nix store and database by supplying specially crafted builders; and they cannot be performed by the calling user since that would allow him/her to influence the build result. Therefore, if this option is non-empty and specifies a valid group, builds will be performed under the user accounts that are a member of the group specified here (as listed in `/etc/group`). Those user accounts should not be used for any other purpose\! Nix will never run two builds under the same user account at the same time. This is to prevent an obvious security hole: a malicious user writing a Nix expression that modifies the build result of a legitimate Nix expression being built by another user. Therefore it is good to have as many Nix build user accounts as you can spare. (Remember: uids are cheap.) The build users should have permission to create files in the Nix store, but not delete them. Therefore, `/nix/store` should be owned by the Nix account, its group should be the group specified here, and its mode should be `1775`. If the build users group is empty, builds will be performed under the uid of the Nix process (that is, the uid of the caller if `NIX_REMOTE` is empty, the uid under which the Nix daemon runs if `NIX_REMOTE` is `daemon`). Obviously, this should not be used with a nix daemon accessible to untrusted clients. Defaults to `nixbld` when running as root, *empty* otherwise. )", {}, false}; Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids", R"( Whether to select UIDs for builds automatically, instead of using the users in `build-users-group`. UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS. )", {}, true, Xp::AutoAllocateUids}; Setting<uint32_t> startId{this, #if __linux__ 0x34000000, #else 56930, #endif "start-id", "The first UID and GID to use for dynamic ID allocation."}; Setting<uint32_t> uidCount{this, #if __linux__ maxIdsPerBuild * 128, #else 128, #endif "id-count", "The number of UIDs/GIDs to use for dynamic ID allocation."}; #if __linux__ Setting<bool> useCgroups{ this, false, "use-cgroups", R"( Whether to execute builds inside cgroups. This is only supported on Linux. Cgroups are required and enabled automatically for derivations that require the `uid-range` system feature. )"}; #endif Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26", "Whether to impersonate a Linux 2.6 machine on newer kernels.", {"build-impersonate-linux-26"}}; Setting<bool> keepLog{ this, true, "keep-build-log", R"( If set to `true` (the default), Nix will write the build log of a derivation (i.e. the standard output and error of its builder) to the directory `/nix/var/log/nix/drvs`. The build log can be retrieved using the command `nix-store -l path`. )", {"build-keep-log"}}; Setting<bool> compressLog{ this, true, "compress-build-log", R"( If set to `true` (the default), build logs written to `/nix/var/log/nix/drvs` will be compressed on the fly using bzip2. Otherwise, they will not be compressed. )", {"build-compress-log"}}; Setting<unsigned long> maxLogSize{ this, 0, "max-build-log-size", R"( This option defines the maximum number of bytes that a builder can write to its stdout/stderr. If the builder exceeds this limit, it’s killed. A value of `0` (the default) means that there is no limit. )", {"build-max-log-size"}}; Setting<unsigned int> pollInterval{this, 5, "build-poll-interval", "How often (in seconds) to poll for locks."}; Setting<bool> gcKeepOutputs{ this, false, "keep-outputs", R"( If `true`, the garbage collector will keep the outputs of non-garbage derivations. If `false` (default), outputs will be deleted unless they are GC roots themselves (or reachable from other roots). In general, outputs must be registered as roots separately. However, even if the output of a derivation is registered as a root, the collector will still delete store paths that are used only at build time (e.g., the C compiler, or source tarballs downloaded from the network). To prevent it from doing so, set this option to `true`. )", {"gc-keep-outputs"}}; Setting<bool> gcKeepDerivations{ this, true, "keep-derivations", R"( If `true` (default), the garbage collector will keep the derivations from which non-garbage store paths were built. If `false`, they will be deleted unless explicitly registered as a root (or reachable from other roots). Keeping derivation around is useful for querying and traceability (e.g., it allows you to ask with what dependencies or options a store path was built), so by default this option is on. Turn it off to save a bit of disk space (or a lot if `keep-outputs` is also turned on). )", {"gc-keep-derivations"}}; Setting<bool> autoOptimiseStore{ this, false, "auto-optimise-store", R"( If set to `true`, Nix automatically detects files in the store that have identical contents, and replaces them with hard links to a single copy. This saves disk space. If set to `false` (the default), you can still run `nix-store --optimise` to get rid of duplicate files. )"}; Setting<bool> envKeepDerivations{ this, false, "keep-env-derivations", R"( If `false` (default), derivations are not stored in Nix user environments. That is, the derivations of any build-time-only dependencies may be garbage-collected. If `true`, when you add a Nix derivation to a user environment, the path of the derivation is stored in the user environment. Thus, the derivation will not be garbage-collected until the user environment generation is deleted (`nix-env --delete-generations`). To prevent build-time-only dependencies from being collected, you should also turn on `keep-outputs`. The difference between this option and `keep-derivations` is that this one is “sticky”: it applies to any user environment created while this option was enabled, while `keep-derivations` only applies at the moment the garbage collector is run. )", {"env-keep-derivations"}}; Setting<SandboxMode> sandboxMode{ this, #if __linux__ smEnabled #else smDisabled #endif , "sandbox", R"( If set to `true`, builds will be performed in a *sandboxed environment*, i.e., they’re isolated from the normal file system hierarchy and will only see their dependencies in the Nix store, the temporary build directory, private versions of `/proc`, `/dev`, `/dev/shm` and `/dev/pts` (on Linux), and the paths configured with the `sandbox-paths` option. This is useful to prevent undeclared dependencies on files in directories such as `/usr/bin`. In addition, on Linux, builds run in private PID, mount, network, IPC and UTS namespaces to isolate them from other processes in the system (except that fixed-output derivations do not run in private network namespace to ensure they can access the network). Currently, sandboxing only work on Linux and macOS. The use of a sandbox requires that Nix is run as root (so you should use the “build users” feature to perform the actual builds under different users than root). If this option is set to `relaxed`, then fixed-output derivations and derivations that have the `__noChroot` attribute set to `true` do not run in sandboxes. The default is `true` on Linux and `false` on all other platforms. )", {"build-use-chroot", "build-use-sandbox"}}; Setting<PathSet> sandboxPaths{ this, {}, "sandbox-paths", R"( A list of paths bind-mounted into Nix sandbox environments. You can use the syntax `target=source` to mount a path in a different location in the sandbox; for instance, `/bin=/nix-bin` will mount the path `/nix-bin` as `/bin` inside the sandbox. If *source* is followed by `?`, then it is not an error if *source* does not exist; for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will only be mounted in the sandbox if it exists in the host filesystem. If the source is in the Nix store, then its closure will be added to the sandbox as well. Depending on how Nix was built, the default value for this option may be empty or provide `/bin/sh` as a bind-mount of `bash`. )", {"build-chroot-dirs", "build-sandbox-paths"}}; Setting<bool> sandboxFallback{this, true, "sandbox-fallback", "Whether to disable sandboxing when the kernel doesn't allow it."}; #ifndef _WIN32 Setting<bool> requireDropSupplementaryGroups{this, isRootUser(), "require-drop-supplementary-groups", R"( Following the principle of least privilege, Nix will attempt to drop supplementary groups when building with sandboxing. However this can fail under some circumstances. For example, if the user lacks the `CAP_SETGID` capability. Search `setgroups(2)` for `EPERM` to find more detailed information on this. If you encounter such a failure, setting this option to `false` will let you ignore it and continue. But before doing so, you should consider the security implications carefully. Not dropping supplementary groups means the build sandbox will be less restricted than intended. This option defaults to `true` when the user is root (since `root` usually has permissions to call setgroups) and `false` otherwise. )"}; #endif #if __linux__ Setting<std::string> sandboxShmSize{ this, "50%", "sandbox-dev-shm-size", R"( *Linux only* This option determines the maximum size of the `tmpfs` filesystem mounted on `/dev/shm` in Linux sandboxes. For the format, see the description of the `size` option of `tmpfs` in mount(8). The default is `50%`. )"}; Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir", R"( *Linux only* The build directory inside the sandbox. This directory is backed by [`build-dir`](#conf-build-dir) on the host. )"}; #endif Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir", R"( The directory on the host, in which derivations' temporary build directories are created. If not set, Nix will use the system temporary directory indicated by the `TMPDIR` environment variable. Note that builds are often performed by the Nix daemon, so its `TMPDIR` is used, and not that of the Nix command line interface. This is also the location where [`--keep-failed`](@docroot@/command-ref/opt-common.md#opt-keep-failed) leaves its files. If Nix runs without sandbox, or if the platform does not support sandboxing with bind mounts (e.g. macOS), then the [`builder`](@docroot@/language/derivations.md#attr-builder)'s environment will contain this directory, instead of the virtual location [`sandbox-build-dir`](#conf-sandbox-build-dir). )"}; Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps", "Which prefixes to allow derivations to ask for access to (primarily for Darwin)."}; #if __APPLE__ Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations", "Whether to log Darwin sandbox access violations to the system log."}; #endif Setting<bool> runDiffHook{ this, false, "run-diff-hook", R"( If true, enable the execution of the `diff-hook` program. When using the Nix daemon, `run-diff-hook` must be set in the `nix.conf` configuration file, and cannot be passed at the command line. )"}; OptionalPathSetting diffHook{ this, std::nullopt, "diff-hook", R"( Absolute path to an executable capable of diffing build results. The hook is executed if `run-diff-hook` is true, and the output of a build is known to not be the same. This program is not executed to determine if two results are the same. The diff hook is executed by the same user and group who ran the build. However, the diff hook does not have write access to the store path just built. The diff hook program receives three parameters: 1. A path to the previous build's results 2. A path to the current build's results 3. The path to the build's derivation 4. The path to the build's scratch directory. This directory will exist only if the build was run with `--keep-failed`. The stderr and stdout output from the diff hook will not be displayed to the user. Instead, it will print to the nix-daemon's log. When using the Nix daemon, `diff-hook` must be set in the `nix.conf` configuration file, and cannot be passed at the command line. )"}; Setting<Strings> trustedPublicKeys{ this, {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}, "trusted-public-keys", R"( A whitespace-separated list of public keys. At least one of the following condition must be met for Nix to accept copying a store object from another Nix store (such as a [substituter](#conf-substituters)): - the store object has been signed using a key in the trusted keys list - the [`require-sigs`](#conf-require-sigs) option has been set to `false` - the store URL is configured with `trusted=true` - the store object is [content-addressed](@docroot@/glossary.md#gloss-content-addressed-store-object) )", {"binary-cache-public-keys"}}; Setting<Strings> secretKeyFiles{ this, {}, "secret-key-files", R"( A whitespace-separated list of files containing secret (private) keys. These are used to sign locally-built paths. They can be generated using `nix-store --generate-binary-cache-key`. The corresponding public key can be distributed to other users, who can add it to `trusted-public-keys` in their `nix.conf`. )"}; Setting<unsigned int> tarballTtl{ this, 60 * 60, "tarball-ttl", R"( The number of seconds a downloaded tarball is considered fresh. If the cached tarball is stale, Nix will check whether it is still up to date using the ETag header. Nix will download a new version if the ETag header is unsupported, or the cached ETag doesn't match. Setting the TTL to `0` forces Nix to always check if the tarball is up to date. Nix caches tarballs in `$XDG_CACHE_HOME/nix/tarballs`. Files fetched via `NIX_PATH`, `fetchGit`, `fetchMercurial`, `fetchTarball`, and `fetchurl` respect this TTL. )"}; Setting<bool> requireSigs{ this, true, "require-sigs", R"( If set to `true` (the default), any non-content-addressed path added or copied to the Nix store (e.g. when substituting from a binary cache) must have a signature by a trusted key. A trusted key is one listed in `trusted-public-keys`, or a public key counterpart to a private key stored in a file listed in `secret-key-files`. Set to `false` to disable signature checking and trust all non-content-addressed paths unconditionally. (Content-addressed paths are inherently trustworthy and thus unaffected by this configuration option.) )"}; Setting<StringSet> extraPlatforms{ this, getDefaultExtraPlatforms(), "extra-platforms", R"( System types of executables that can be run on this machine. Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system). Setting this can be useful to build derivations locally on compatible machines: - `i686-linux` executables can be run on `x86_64-linux` machines (set by default) - `x86_64-darwin` executables can be run on macOS `aarch64-darwin` with Rosetta 2 (set by default where applicable) - `armv6` and `armv5tel` executables can be run on `armv7` - some `aarch64` machines can also natively run 32-bit ARM code - `qemu-user` may be used to support non-native platforms (though this may be slow and buggy) Build systems will usually detect the target platform to be the current physical system and therefore produce machine code incompatible with what may be intended in the derivation. You should design your derivation's `builder` accordingly and cross-check the results when using this option against natively-built versions of your derivation. )", {}, // Don't document the machine-specific default value false}; Setting<StringSet> systemFeatures{ this, getDefaultSystemFeatures(), "system-features", R"( A set of system “features” supported by this machine. This complements the [`system`](#conf-system) and [`extra-platforms`](#conf-extra-platforms) configuration options and the corresponding [`system`](@docroot@/language/derivations.md#attr-system) attribute on derivations. A derivation can require system features in the [`requiredSystemFeatures` attribute](@docroot@/language/advanced-attributes.md#adv-attr-requiredSystemFeatures), and the machine to build the derivation must have them. System features are user-defined, but Nix sets the following defaults: - `apple-virt` Included on Darwin if virtualization is available. - `kvm` Included on Linux if `/dev/kvm` is accessible. - `nixos-test`, `benchmark`, `big-parallel` These historical pseudo-features are always enabled for backwards compatibility, as they are used in Nixpkgs to route Hydra builds to specific machines. - `ca-derivations` Included by default if the [`ca-derivations` experimental feature](@docroot@/development/experimental-features.md#xp-feature-ca-derivations) is enabled. This system feature is implicitly required by derivations with the [`__contentAddressed` attribute](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed). - `recursive-nix` Included by default if the [`recursive-nix` experimental feature](@docroot@/development/experimental-features.md#xp-feature-recursive-nix) is enabled. - `uid-range` On Linux, Nix can run builds in a user namespace where they run as root (UID 0) and have 65,536 UIDs available. This is primarily useful for running containers such as `systemd-nspawn` inside a Nix build. For an example, see [`tests/systemd-nspawn/nix`][nspawn]. [nspawn]: https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix. Included by default on Linux if the [`auto-allocate-uids`](#conf-auto-allocate-uids) setting is enabled. )", {}, // Don't document the machine-specific default value false}; Setting<Strings> substituters{ this, Strings{"https://cache.nixos.org/"}, "substituters", R"( A list of [URLs of Nix stores](@docroot@/store/types/index.md#store-url-format) to be used as substituters, separated by whitespace. A substituter is an additional [store](@docroot@/glossary.md#gloss-store) from which Nix can obtain [store objects](@docroot@/store/store-object.md) instead of building them. Substituters are tried based on their priority value, which each substituter can set independently. Lower value means higher priority. The default is `https://cache.nixos.org`, which has a priority of 40. At least one of the following conditions must be met for Nix to use a substituter: - The substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list - The user calling Nix is in the [`trusted-users`](#conf-trusted-users) list In addition, each store path should be trusted as described in [`trusted-public-keys`](#conf-trusted-public-keys) )", {"binary-caches"}}; Setting<StringSet> trustedSubstituters{ this, {}, "trusted-substituters", R"( A list of [Nix store URLs](@docroot@/store/types/index.md#store-url-format), separated by whitespace. These are not used by default, but users of the Nix daemon can enable them by specifying [`substituters`](#conf-substituters). Unprivileged users (those set in only [`allowed-users`](#conf-allowed-users) but not [`trusted-users`](#conf-trusted-users)) can pass as `substituters` only those URLs listed in `trusted-substituters`. )", {"trusted-binary-caches"}}; Setting<unsigned int> ttlNegativeNarInfoCache{ this, 3600, "narinfo-cache-negative-ttl", R"( The TTL in seconds for negative lookups. If a store path is queried from a [substituter](#conf-substituters) but was not found, there will be a negative lookup cached in the local disk cache database for the specified duration. Set to `0` to force updating the lookup cache. To wipe the lookup cache completely: ```shell-session $ rm $HOME/.cache/nix/binary-cache-v*.sqlite* # rm /root/.cache/nix/binary-cache-v*.sqlite* ``` )"}; Setting<unsigned int> ttlPositiveNarInfoCache{ this, 30 * 24 * 3600, "narinfo-cache-positive-ttl", R"( The TTL in seconds for positive lookups. If a store path is queried from a substituter, the result of the query will be cached in the local disk cache database including some of the NAR metadata. The default TTL is a month, setting a shorter TTL for positive lookups can be useful for binary caches that have frequent garbage collection, in which case having a more frequent cache invalidation would prevent trying to pull the path again and failing with a hash mismatch if the build isn't reproducible. )"}; Setting<bool> printMissing{this, true, "print-missing", "Whether to print what paths need to be built or downloaded."}; Setting<std::string> preBuildHook{ this, "", "pre-build-hook", R"( If set, the path to a program that can set extra derivation-specific settings for this system. This is used for settings that can't be captured by the derivation model itself and are too variable between different versions of the same system to be hard-coded into nix. The hook is passed the derivation path and, if sandboxes are enabled, the sandbox directory. It can then modify the sandbox and send a series of commands to modify various settings to stdout. The currently recognized commands are: - `extra-sandbox-paths`\ Pass a list of files and directories to be included in the sandbox for this build. One entry per line, terminated by an empty line. Entries have the same format as `sandbox-paths`. )"}; Setting<std::string> postBuildHook{ this, "", "post-build-hook", R"( Optional. The path to a program to execute after each build. This option is only settable in the global `nix.conf`, or on the command line by trusted users. When using the nix-daemon, the daemon executes the hook as `root`. If the nix-daemon is not involved, the hook runs as the user executing the nix-build. - The hook executes after an evaluation-time build. - The hook does not execute on substituted paths. - The hook's output always goes to the user's terminal. - If the hook fails, the build succeeds but no further builds execute. - The hook executes synchronously, and blocks other builds from progressing while it runs. The program executes with no arguments. The program's environment contains the following environment variables: - `DRV_PATH` The derivation for the built paths. Example: `/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv` - `OUT_PATHS` Output paths of the built derivation, separated by a space character. Example: `/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev /nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc /nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info /nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man /nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`. )"}; Setting<unsigned int> downloadSpeed { this, 0, "download-speed", R"( Specify the maximum transfer rate in kilobytes per second you want Nix to use for downloads. )"}; Setting<std::string> netrcFile{ this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file", R"( If set to an absolute path to a `netrc` file, Nix will use the HTTP authentication credentials in this file when trying to download from a remote host through HTTP or HTTPS. Defaults to `$NIX_CONF_DIR/netrc`. The `netrc` file consists of a list of accounts in the following format: machine my-machine login my-username password my-password For the exact syntax, see [the `curl` documentation](https://ec.haxx.se/usingcurl-netrc.html). > **Note** > > This must be an absolute path, and `~` is not resolved. For > example, `~/.netrc` won't resolve to your home directory's > `.netrc`. )"}; Setting<Path> caFile{ this, getDefaultSSLCertFile(), "ssl-cert-file", R"( The path of a file containing CA certificates used to authenticate `https://` downloads. Nix by default will use the first of the following files that exists: 1. `/etc/ssl/certs/ca-certificates.crt` 2. `/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt` The path can be overridden by the following environment variables, in order of precedence: 1. `NIX_SSL_CERT_FILE` 2. `SSL_CERT_FILE` )"}; #if __linux__ Setting<bool> filterSyscalls{ this, true, "filter-syscalls", R"( Whether to prevent certain dangerous system calls, such as creation of setuid/setgid files or adding ACLs or extended attributes. Only disable this if you're aware of the security implications. )"}; Setting<bool> allowNewPrivileges{ this, false, "allow-new-privileges", R"( (Linux-specific.) By default, builders on Linux cannot acquire new privileges by calling setuid/setgid programs or programs that have file capabilities. For example, programs such as `sudo` or `ping` will fail. (Note that in sandbox builds, no such programs are available unless you bind-mount them into the sandbox via the `sandbox-paths` option.) You can allow the use of such programs by enabling this option. This is impure and usually undesirable, but may be useful in certain scenarios (e.g. to spin up containers or set up userspace network interfaces in tests). )"}; #endif #if HAVE_ACL_SUPPORT Setting<StringSet> ignoredAcls{ this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls", R"( A list of ACLs that should be ignored, normally Nix attempts to remove all ACLs from files and directories in the Nix store, but some ACLs like `security.selinux` or `system.nfs4_acl` can't be removed even by root. Therefore it's best to just ignore them. )"}; #endif Setting<Strings> hashedMirrors{ this, {}, "hashed-mirrors", R"( A list of web servers used by `builtins.fetchurl` to obtain files by hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix will try to download the file from *hashed-mirror*/*ha*/*h*. This allows files to be downloaded even if they have disappeared from their original URI. For example, given an example mirror `http://tarballs.nixos.org/`, when building the derivation ```nix builtins.fetchurl { url = "https://example.org/foo-1.2.3.tar.xz"; sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; } ``` Nix will attempt to download this file from `http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae` first. If it is not available there, if will try the original URI. )"}; Setting<uint64_t> minFree{ this, 0, "min-free", R"( When free disk space in `/nix/store` drops below `min-free` during a build, Nix performs a garbage-collection until `max-free` bytes are available or there is no more garbage. A value of `0` (the default) disables this feature. )"}; Setting<uint64_t> maxFree{ // n.b. this is deliberately int64 max rather than uint64 max because // this goes through the Nix language JSON parser and thus needs to be // representable in Nix language integers. this, std::numeric_limits<int64_t>::max(), "max-free", R"( When a garbage collection is triggered by the `min-free` option, it stops as soon as `max-free` bytes are available. The default is infinity (i.e. delete all garbage). )"}; Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval", "Number of seconds between checking free disk space."}; Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size", "Maximum size of NARs before spilling them to disk."}; Setting<bool> allowSymlinkedStore{ this, false, "allow-symlinked-store", R"( If set to `true`, Nix will stop complaining if the store directory (typically /nix/store) contains symlink components. This risks making some builds "impure" because builders sometimes "canonicalise" paths by resolving all symlink components. Problems occur if those builds are then deployed to machines where /nix/store resolves to a different location from that of the build machine. You can enable this setting if you are sure you're not going to do that. )"}; Setting<bool> useXDGBaseDirectories{ this, false, "use-xdg-base-directories", R"( If set to `true`, Nix will conform to the [XDG Base Directory Specification] for files in `$HOME`. The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/command-ref/env-common.md). [XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html > **Warning** > This changes the location of some well-known symlinks that Nix creates, which might break tools that rely on the old, non-XDG-conformant locations. In particular, the following locations change: | Old | New | |-------------------|--------------------------------| | `~/.nix-profile` | `$XDG_STATE_HOME/nix/profile` | | `~/.nix-defexpr` | `$XDG_STATE_HOME/nix/defexpr` | | `~/.nix-channels` | `$XDG_STATE_HOME/nix/channels` | If you already have Nix installed and are using [profiles](@docroot@/package-management/profiles.md) or [channels](@docroot@/command-ref/nix-channel.md), you should migrate manually when you enable this option. If `$XDG_STATE_HOME` is not set, use `$HOME/.local/state/nix` instead of `$XDG_STATE_HOME/nix`. This can be achieved with the following shell commands: ```sh nix_state_home=${XDG_STATE_HOME-$HOME/.local/state}/nix mkdir -p $nix_state_home mv $HOME/.nix-profile $nix_state_home/profile mv $HOME/.nix-defexpr $nix_state_home/defexpr mv $HOME/.nix-channels $nix_state_home/channels ``` )" }; Setting<StringMap> impureEnv {this, {}, "impure-env", R"( A list of items, each in the format of: - `name=value`: Set environment variable `name` to `value`. If the user is trusted (see `trusted-users` option), when building a fixed-output derivation, environment variables set in this option will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md#adv-attr-impureEnvVars). This option is useful for, e.g., setting `https_proxy` for fixed-output derivations and in a multi-user Nix installation, or setting private access tokens when fetching a private repository. )", {}, // aliases true, // document default Xp::ConfigurableImpureEnv }; Setting<std::string> upgradeNixStorePathUrl{ this, "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix", "upgrade-nix-store-path-url", R"( Used by `nix upgrade-nix`, the URL of the file that contains the store paths of the latest Nix release. )" }; Setting<uint64_t> warnLargePathThreshold{ this, 0, "warn-large-path-threshold", R"( Warn when copying a path larger than this number of bytes to the Nix store (as determined by its NAR serialisation). Default is 0, which disables the warning. Set it to 1 to warn on all paths. )" }; }; // FIXME: don't use a global variable. extern Settings settings; /** * Load the configuration (from `nix.conf`, `NIX_CONFIG`, etc.) into the * given configuration object. * * Usually called with `globalConfig`. */ void loadConfFile(AbstractConfig & config); // Used by the Settings constructor std::vector<Path> getUserConfigFiles(); extern const std::string nixVersion; /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. * @note When using libexpr, and/or libmain, This is not sufficient. See initNix(). */ void initLibStore(bool loadConfig = true); /** * It's important to initialize before doing _anything_, which is why we * call upon the programmer to handle this correctly. However, we only add * this in a key locations, so as not to litter the code. */ void assertLibStoreInitialized(); }
53,899
C++
.h
1,016
43.383858
310
0.650652
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,105
posix-fs-canonicalise.hh
NixOS_nix/src/libstore/posix-fs-canonicalise.hh
#pragma once ///@file #include <sys/stat.h> #include <sys/time.h> #include "types.hh" #include "error.hh" namespace nix { typedef std::pair<dev_t, ino_t> Inode; typedef std::set<Inode> InodesSeen; /** * "Fix", or canonicalise, the meta-data of the files in a store path * after it has been built. In particular: * * - the last modification date on each file is set to 1 (i.e., * 00:00:01 1/1/1970 UTC) * * - the permissions are set of 444 or 555 (i.e., read-only with or * without execute permission; setuid bits etc. are cleared) * * - the owner and group are set to the Nix user and group, if we're * running as root. (Unix only.) * * If uidRange is not empty, this function will throw an error if it * encounters files owned by a user outside of the closed interval * [uidRange->first, uidRange->second]. */ void canonicalisePathMetaData( const Path & path, #ifndef _WIN32 std::optional<std::pair<uid_t, uid_t>> uidRange, #endif InodesSeen & inodesSeen); void canonicalisePathMetaData( const Path & path #ifndef _WIN32 , std::optional<std::pair<uid_t, uid_t>> uidRange = std::nullopt #endif ); void canonicaliseTimestampAndPermissions(const Path & path); MakeError(PathInUse, Error); }
1,246
C++
.h
41
28.170732
69
0.718828
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,106
store-cast.hh
NixOS_nix/src/libstore/store-cast.hh
#pragma once ///@file #include "store-api.hh" namespace nix { /** * Helper to try downcasting a Store with a nice method if it fails. * * This is basically an alternative to the user-facing part of * Store::unsupported that allows us to still have a nice message but * better interface design. */ template<typename T> T & require(Store & store) { auto * castedStore = dynamic_cast<T *>(&store); if (!castedStore) throw UsageError("%s not supported by store '%s'", T::operationName, store.getUri()); return *castedStore; } }
554
C++
.h
20
25.2
93
0.711321
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,107
daemon.hh
NixOS_nix/src/libstore/daemon.hh
#pragma once ///@file #include "serialise.hh" #include "store-api.hh" namespace nix::daemon { enum RecursiveFlag : bool { NotRecursive = false, Recursive = true }; void processConnection( ref<Store> store, FdSource && from, FdSink && to, TrustedFlag trusted, RecursiveFlag recursive); }
312
C++
.h
13
21.076923
69
0.721088
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,108
s3.hh
NixOS_nix/src/libstore/s3.hh
#pragma once ///@file #if ENABLE_S3 #include "ref.hh" #include <optional> #include <string> namespace Aws { namespace Client { struct ClientConfiguration; } } namespace Aws { namespace S3 { class S3Client; } } namespace nix { struct S3Helper { ref<Aws::Client::ClientConfiguration> config; ref<Aws::S3::S3Client> client; S3Helper(const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint); ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint); struct FileTransferResult { std::optional<std::string> data; unsigned int durationMs; }; FileTransferResult getObject( const std::string & bucketName, const std::string & key); }; } #endif
839
C++
.h
25
29.84
139
0.714464
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,109
local-store.hh
NixOS_nix/src/libstore/local-store.hh
#pragma once ///@file #include "sqlite.hh" #include "pathlocks.hh" #include "store-api.hh" #include "indirect-root-store.hh" #include "sync.hh" #include <chrono> #include <future> #include <string> #include <unordered_set> namespace nix { /** * Nix store and database schema version. * * Version 1 (or 0) was Nix <= * 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. * Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is * Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */ const int nixSchemaVersion = 10; struct OptimiseStats { unsigned long filesLinked = 0; uint64_t bytesFreed = 0; }; struct LocalStoreConfig : virtual LocalFSStoreConfig { using LocalFSStoreConfig::LocalFSStoreConfig; LocalStoreConfig( std::string_view scheme, std::string_view authority, const Params & params); Setting<bool> requireSigs{this, settings.requireSigs, "require-sigs", "Whether store paths copied into this store should have a trusted signature."}; Setting<bool> readOnly{this, false, "read-only", R"( Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem. Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem. Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set. > **Warning** > Do not use this unless the filesystem is read-only. > > Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process. > While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it. )"}; const std::string name() override { return "Local Store"; } static std::set<std::string> uriSchemes() { return {"local"}; } std::string doc() override; }; class LocalStore : public virtual LocalStoreConfig , public virtual IndirectRootStore , public virtual GcStore { private: /** * Lock file used for upgrading. */ AutoCloseFD globalLock; struct State { /** * The SQLite database object. */ SQLite db; struct Stmts; std::unique_ptr<Stmts> stmts; /** * The last time we checked whether to do an auto-GC, or an * auto-GC finished. */ std::chrono::time_point<std::chrono::steady_clock> lastGCCheck; /** * Whether auto-GC is running. If so, get gcFuture to wait for * the GC to finish. */ bool gcRunning = false; std::shared_future<void> gcFuture; /** * How much disk space was available after the previous * auto-GC. If the current available disk space is below * minFree but not much below availAfterGC, then there is no * point in starting a new GC. */ uint64_t availAfterGC = std::numeric_limits<uint64_t>::max(); std::unique_ptr<PublicKeys> publicKeys; }; Sync<State> _state; public: const Path dbDir; const Path linksDir; const Path reservedPath; const Path schemaPath; const Path tempRootsDir; const Path fnTempRoots; private: const PublicKeys & getPublicKeys(); public: /** * Hack for build-remote.cc. */ PathSet locksHeld; /** * Initialise the local store, upgrading the schema if * necessary. */ LocalStore(const Params & params); LocalStore( std::string_view scheme, PathView path, const Params & params); ~LocalStore(); /** * Implementations of abstract store API methods. */ std::string getUri() override; bool isValidPathUncached(const StorePath & path) override; StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override; StorePathSet queryAllValidPaths() override; void queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override; void queryReferrers(const StorePath & path, StorePathSet & referrers) override; StorePathSet queryValidDerivers(const StorePath & path) override; std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override; std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override; StorePathSet querySubstitutablePaths(const StorePathSet & paths) override; bool pathInfoIsUntrusted(const ValidPathInfo &) override; bool realisationIsUntrusted(const Realisation & ) override; void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod, ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, RepairFlag repair) override; void addTempRoot(const StorePath & path) override; private: void createTempRootsFile(); /** * The file to which we write our temporary roots. */ Sync<AutoCloseFD> _fdTempRoots; /** * The global GC lock. */ Sync<AutoCloseFD> _fdGCLock; /** * Connection to the garbage collector. */ Sync<AutoCloseFD> _fdRootsSocket; public: /** * Implementation of IndirectRootStore::addIndirectRoot(). * * The weak reference merely is a symlink to `path' from * /nix/var/nix/gcroots/auto/<hash of `path'>. */ void addIndirectRoot(const Path & path) override; private: void findTempRoots(Roots & roots, bool censor); AutoCloseFD openGCLock(); public: Roots findRoots(bool censor) override; void collectGarbage(const GCOptions & options, GCResults & results) override; /** * Called by `collectGarbage` to trace in reverse. * * Using this rather than `queryReferrers` directly allows us to * fine-tune which referrers we consider for garbage collection; * some store implementations take advantage of this. */ virtual void queryGCReferrers(const StorePath & path, StorePathSet & referrers) { return queryReferrers(path, referrers); } /** * Called by `collectGarbage` to recursively delete a path. * The default implementation simply calls `deletePath`, but it can be * overridden by stores that wish to provide their own deletion behaviour. */ virtual void deleteStorePath(const Path & path, uint64_t & bytesFreed); /** * Optimise the disk space usage of the Nix store by hard-linking * files with the same contents. */ void optimiseStore(OptimiseStats & stats); void optimiseStore() override; /** * Optimise a single store path. Optionally, test the encountered * symlinks for corruption. */ void optimisePath(const Path & path, RepairFlag repair); bool verifyStore(bool checkContents, RepairFlag repair) override; protected: /** * Result of `verifyAllValidPaths` */ struct VerificationResult { /** * Whether any errors were encountered */ bool errors; /** * A set of so-far valid paths. The store objects pointed to by * those paths are suitable for further validation checking. */ StorePathSet validPaths; }; /** * First, unconditional step of `verifyStore` */ virtual VerificationResult verifyAllValidPaths(RepairFlag repair); public: /** * Register the validity of a path, i.e., that `path` exists, that * the paths referenced by it exists, and in the case of an output * path of a derivation, that it has been produced by a successful * execution of the derivation (or something equivalent). Also * register the hash of the file system contents of the path. The * hash must be a SHA-256 hash. */ void registerValidPath(const ValidPathInfo & info); virtual void registerValidPaths(const ValidPathInfos & infos); unsigned int getProtocol() override; std::optional<TrustedFlag> isTrustedClient() override; void vacuumDB(); void addSignatures(const StorePath & storePath, const StringSet & sigs) override; /** * If free disk space in /nix/store if below minFree, delete * garbage until it exceeds maxFree. */ void autoGC(bool sync = true); /** * Register the store path 'output' as the output named 'outputName' of * derivation 'deriver'. */ void registerDrvOutput(const Realisation & info) override; void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override; void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id); std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached(const DrvOutput&, Callback<std::shared_ptr<const Realisation>> callback) noexcept override; std::optional<std::string> getVersion() override; protected: void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir, StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors); private: /** * Retrieve the current version of the database schema. * If the database does not exist yet, the version returned will be 0. */ int getSchema(); void openDB(State & state, bool create); void makeStoreWritable(); uint64_t queryValidPathId(State & state, const StorePath & path); uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true); void invalidatePath(State & state, const StorePath & path); /** * Delete a path from the Nix store. */ void invalidatePathChecked(const StorePath & path); std::shared_ptr<const ValidPathInfo> queryPathInfoInternal(State & state, const StorePath & path); void updatePathInfo(State & state, const ValidPathInfo & info); PathSet queryValidPathsOld(); ValidPathInfo queryPathInfoOld(const Path & path); void findRoots(const Path & path, std::filesystem::file_type type, Roots & roots); void findRootsNoTemp(Roots & roots, bool censor); void findRuntimeRoots(Roots & roots, bool censor); std::pair<std::filesystem::path, AutoCloseFD> createTempDirInStore(); typedef std::unordered_set<ino_t> InodeHash; InodeHash loadInodeHash(); Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash); void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair); // Internal versions that are not wrapped in retry_sqlite. bool isValidPath_(State & state, const StorePath & path); void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers); /** * Add signatures to a ValidPathInfo or Realisation using the secret keys * specified by the ‘secret-key-files’ option. */ void signPathInfo(ValidPathInfo & info); void signRealisation(Realisation &); void addBuildLog(const StorePath & drvPath, std::string_view log) override; friend struct LocalDerivationGoal; friend struct PathSubstitutionGoal; friend struct SubstitutionGoal; friend struct DerivationGoal; }; }
12,121
C++
.h
303
33.910891
200
0.6956
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,110
local-overlay-store.hh
NixOS_nix/src/libstore/local-overlay-store.hh
#include "local-store.hh" namespace nix { /** * Configuration for `LocalOverlayStore`. */ struct LocalOverlayStoreConfig : virtual LocalStoreConfig { LocalOverlayStoreConfig(const StringMap & params) : LocalOverlayStoreConfig("local-overlay", "", params) { } LocalOverlayStoreConfig(std::string_view scheme, PathView path, const Params & params) : StoreConfig(params) , LocalFSStoreConfig(path, params) , LocalStoreConfig(scheme, path, params) { } const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store", R"( [Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) for the lower store. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly). Must be a store with a store dir on the file system. Must be used as OverlayFS lower layer for this store's store dir. )"}; const PathSetting upperLayer{(StoreConfig*) this, "", "upper-layer", R"( Directory containing the OverlayFS upper layer for this store's store dir. )"}; Setting<bool> checkMount{(StoreConfig*) this, true, "check-mount", R"( Check that the overlay filesystem is correctly mounted. Nix does not manage the overlayfs mount point itself, but the correct functioning of the overlay store does depend on this mount point being set up correctly. Rather than just assume this is the case, check that the lowerdir and upperdir options are what we expect them to be. This check is on by default, but can be disabled if needed. )"}; const PathSetting remountHook{(StoreConfig*) this, "", "remount-hook", R"( Script or other executable to run when overlay filesystem needs remounting. This is occasionally necessary when deleting a store path that exists in both upper and lower layers. In such a situation, bypassing OverlayFS and deleting the path in the upper layer directly is the only way to perform the deletion without creating a "whiteout". However this causes the OverlayFS kernel data structures to get out-of-sync, and can lead to 'stale file handle' errors; remounting solves the problem. The store directory is passed as an argument to the invoked executable. )"}; const std::string name() override { return "Experimental Local Overlay Store"; } std::optional<ExperimentalFeature> experimentalFeature() const override { return ExperimentalFeature::LocalOverlayStore; } static std::set<std::string> uriSchemes() { return { "local-overlay" }; } std::string doc() override; protected: /** * @return The host OS path corresponding to the store path for the * upper layer. * * @note The there is no guarantee a store object is actually stored * at that file path. It might be stored in the lower layer instead, * or it might not be part of this store at all. */ Path toUpperPath(const StorePath & path); }; /** * Variation of local store using OverlayFS for the store directory. * * Documentation on overridden methods states how they differ from their * `LocalStore` counterparts. */ class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual LocalStore { /** * The store beneath us. * * Our store dir should be an overlay fs where the lower layer * is that store's store dir, and the upper layer is some * scratch storage just for us. */ ref<LocalFSStore> lowerStore; public: LocalOverlayStore(const Params & params) : LocalOverlayStore("local-overlay", "", params) { } LocalOverlayStore(std::string_view scheme, PathView path, const Params & params); std::string getUri() override { return "local-overlay://"; } private: /** * First copy up any lower store realisation with the same key, so we * merge rather than mask it. */ void registerDrvOutput(const Realisation & info) override; /** * Check lower store if upper DB does not have. */ void queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override; /** * Check lower store if upper DB does not have. * * In addition, copy up metadata for lower store objects (and their * closure). (I.e. Optimistically cache in the upper DB.) */ bool isValidPathUncached(const StorePath & path) override; /** * Check the lower store and upper DB. */ void queryReferrers(const StorePath & path, StorePathSet & referrers) override; /** * Check the lower store and upper DB. */ StorePathSet queryValidDerivers(const StorePath & path) override; /** * Check lower store if upper DB does not have. */ std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override; /** * First copy up any lower store realisation with the same key, so we * merge rather than mask it. */ void registerValidPaths(const ValidPathInfos & infos) override; /** * Check lower store if upper DB does not have. */ void queryRealisationUncached(const DrvOutput&, Callback<std::shared_ptr<const Realisation>> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. */ void collectGarbage(const GCOptions & options, GCResults & results) override; /** * Check which layers the store object exists in to try to avoid * needing to remount. */ void deleteStorePath(const Path & path, uint64_t & bytesFreed) override; /** * Deduplicate by removing store objects from the upper layer that * are now in the lower layer. * * Operations on a layered store will not cause duplications, but addition of * new store objects to the lower layer can instill induce them * (there is no way to prevent that). This cleans up those * duplications. * * @note We do not yet optomise the upper layer in the normal way * (hardlink) yet. We would like to, but it requires more * refactoring of existing code to support this sustainably. */ void optimiseStore() override; /** * Check all paths registered in the upper DB. * * Note that this includes store objects that reside in either overlayfs layer; * just enumerating the contents of the upper layer would skip them. * * We don't verify the contents of both layers on the assumption that the lower layer is far bigger, * and also the observation that anything not in the upper db the overlayfs doesn't yet care about. */ VerificationResult verifyAllValidPaths(RepairFlag repair) override; /** * Deletion only effects the upper layer, so we ignore lower-layer referrers. */ void queryGCReferrers(const StorePath & path, StorePathSet & referrers) override; /** * Call the `remountHook` if we have done something such that the * OverlayFS needed to be remounted. See that hook's user-facing * documentation for further details. */ void remountIfNecessary(); /** * State for `remountIfNecessary` */ std::atomic_bool _remountRequired = false; }; }
7,487
C++
.h
182
35.005495
111
0.687853
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,111
worker-protocol-impl.hh
NixOS_nix/src/libstore/worker-protocol-impl.hh
#pragma once /** * @file * * Template implementations (as opposed to mere declarations). * * This file is an exmample of the "impl.hh" pattern. See the * contributing guide. */ #include "worker-protocol.hh" #include "length-prefixed-protocol-helper.hh" namespace nix { /* protocol-agnostic templates */ #define WORKER_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \ TEMPLATE T WorkerProto::Serialise< T >::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) \ { \ return LengthPrefixedProtoHelper<WorkerProto, T >::read(store, conn); \ } \ TEMPLATE void WorkerProto::Serialise< T >::write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) \ { \ LengthPrefixedProtoHelper<WorkerProto, T >::write(store, conn, t); \ } WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>) WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>) WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>) #define WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA , WORKER_USE_LENGTH_PREFIX_SERIALISER( template<typename K WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA typename V>, std::map<K WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA V>) /** * Use `CommonProto` where possible. */ template<typename T> struct WorkerProto::Serialise { static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { return CommonProto::Serialise<T>::read(store, CommonProto::ReadConn { .from = conn.from }); } static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) { CommonProto::Serialise<T>::write(store, CommonProto::WriteConn { .to = conn.to }, t); } }; /* protocol-specific templates */ }
1,817
C++
.h
49
33.183673
126
0.7112
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,112
binary-cache-store.hh
NixOS_nix/src/libstore/binary-cache-store.hh
#pragma once ///@file #include "signature/local-keys.hh" #include "store-api.hh" #include "log-store.hh" #include "pool.hh" #include <atomic> namespace nix { struct NarInfo; struct BinaryCacheStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; const Setting<std::string> compression{this, "xz", "compression", "NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."}; const Setting<bool> writeNARListing{this, false, "write-nar-listing", "Whether to write a JSON file that lists the files in each NAR."}; const Setting<bool> writeDebugInfo{this, false, "index-debug-info", R"( Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to fetch debug info on demand )"}; const Setting<Path> secretKeyFile{this, "", "secret-key", "Path to the secret key used to sign the binary cache."}; const Setting<Path> localNarCache{this, "", "local-nar-cache", "Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."}; const Setting<bool> parallelCompression{this, false, "parallel-compression", "Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."}; const Setting<int> compressionLevel{this, -1, "compression-level", R"( The *preset level* to be used when compressing NARs. The meaning and accepted values depend on the compression method selected. `-1` specifies that the default compression level should be used. )"}; }; /** * @note subclasses must implement at least one of the two * virtual getFile() methods. */ class BinaryCacheStore : public virtual BinaryCacheStoreConfig, public virtual Store, public virtual LogStore { private: std::unique_ptr<Signer> signer; protected: // The prefix under which realisation infos will be stored const std::string realisationsPrefix = "realisations"; BinaryCacheStore(const Params & params); public: virtual bool fileExists(const std::string & path) = 0; virtual void upsertFile(const std::string & path, std::shared_ptr<std::basic_iostream<char>> istream, const std::string & mimeType) = 0; void upsertFile(const std::string & path, // FIXME: use std::string_view std::string && data, const std::string & mimeType); /** * Dump the contents of the specified file to a sink. */ virtual void getFile(const std::string & path, Sink & sink); /** * Fetch the specified file and call the specified callback with * the result. A subclass may implement this asynchronously. */ virtual void getFile( const std::string & path, Callback<std::optional<std::string>> callback) noexcept; std::optional<std::string> getFile(const std::string & path); public: virtual void init() override; private: std::string narMagic; std::string narInfoFileFor(const StorePath & storePath); void writeNarInfo(ref<NarInfo> narInfo); ref<const ValidPathInfo> addToStoreCommon( Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs, std::function<ValidPathInfo(HashResult)> mkInfo); public: bool isValidPathUncached(const StorePath & path) override; void queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override; std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override; void addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs) override; StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod, ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, RepairFlag repair) override; StorePath addToStore( std::string_view name, const SourcePath & path, ContentAddressMethod method, HashAlgorithm hashAlgo, const StorePathSet & references, PathFilter & filter, RepairFlag repair) override; void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override; void addSignatures(const StorePath & storePath, const StringSet & sigs) override; std::optional<std::string> getBuildLogExact(const StorePath & path) override; void addBuildLog(const StorePath & drvPath, std::string_view log) override; }; MakeError(NoSuchBinaryCacheFile, Error); }
4,972
C++
.h
112
38.321429
126
0.710811
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,113
outputs-spec.hh
NixOS_nix/src/libstore/outputs-spec.hh
#pragma once ///@file #include <cassert> #include <optional> #include <set> #include <variant> #include "json-impls.hh" #include "variant-wrapper.hh" namespace nix { /** * An (owned) output name. Just a type alias used to make code more * readible. */ typedef std::string OutputName; /** * A borrowed output name. Just a type alias used to make code more * readible. */ typedef std::string_view OutputNameView; struct OutputsSpec { /** * A non-empty set of outputs, specified by name */ struct Names : std::set<OutputName> { using std::set<OutputName>::set; /* These need to be "inherited manually" */ Names(const std::set<OutputName> & s) : std::set<OutputName>(s) { assert(!empty()); } /** * Needs to be "inherited manually" */ Names(std::set<OutputName> && s) : std::set<OutputName>(s) { assert(!empty()); } /* This set should always be non-empty, so we delete this constructor in order make creating empty ones by mistake harder. */ Names() = delete; }; /** * The set of all outputs, without needing to name them explicitly */ struct All : std::monostate { }; typedef std::variant<All, Names> Raw; Raw raw; bool operator == (const OutputsSpec &) const = default; // TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet. bool operator < (const OutputsSpec & other) const { return raw < other.raw; } MAKE_WRAPPER_CONSTRUCTOR(OutputsSpec); /** * Force choosing a variant */ OutputsSpec() = delete; bool contains(const OutputName & output) const; /** * Create a new OutputsSpec which is the union of this and that. */ OutputsSpec union_(const OutputsSpec & that) const; /** * Whether this OutputsSpec is a subset of that. */ bool isSubsetOf(const OutputsSpec & outputs) const; /** * Parse a string of the form 'output1,...outputN' or '*', returning * the outputs spec. */ static OutputsSpec parse(std::string_view s); static std::optional<OutputsSpec> parseOpt(std::string_view s); std::string to_string() const; }; struct ExtendedOutputsSpec { struct Default : std::monostate { }; using Explicit = OutputsSpec; typedef std::variant<Default, Explicit> Raw; Raw raw; bool operator == (const ExtendedOutputsSpec &) const = default; // TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet. bool operator < (const ExtendedOutputsSpec &) const; MAKE_WRAPPER_CONSTRUCTOR(ExtendedOutputsSpec); /** * Force choosing a variant */ ExtendedOutputsSpec() = delete; /** * Parse a string of the form 'prefix^output1,...outputN' or * 'prefix^*', returning the prefix and the extended outputs spec. */ static std::pair<std::string_view, ExtendedOutputsSpec> parse(std::string_view s); static std::optional<std::pair<std::string_view, ExtendedOutputsSpec>> parseOpt(std::string_view s); std::string to_string() const; }; } JSON_IMPL(OutputsSpec) JSON_IMPL(ExtendedOutputsSpec)
3,231
C++
.h
97
27.989691
104
0.649049
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,114
path-with-outputs.hh
NixOS_nix/src/libstore/path-with-outputs.hh
#pragma once ///@file #include "path.hh" #include "derived-path.hh" namespace nix { struct StoreDirConfig; /** * This is a deprecated old type just for use by the old CLI, and older * versions of the RPC protocols. In new code don't use it; you want * `DerivedPath` instead. * * `DerivedPath` is better because it handles more cases, and does so more * explicitly without devious punning tricks. */ struct StorePathWithOutputs { StorePath path; std::set<std::string> outputs; std::string to_string(const StoreDirConfig & store) const; DerivedPath toDerivedPath() const; typedef std::variant<StorePathWithOutputs, StorePath, std::monostate> ParseResult; static StorePathWithOutputs::ParseResult tryFromDerivedPath(const DerivedPath &); }; std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs>); std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s); /** * Split a string specifying a derivation and a set of outputs * (/nix/store/hash-foo!out1,out2,...) into the derivation path * and the outputs. */ StorePathWithOutputs parsePathWithOutputs(const StoreDirConfig & store, std::string_view pathWithOutputs); class Store; StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs); }
1,334
C++
.h
34
36.794118
110
0.781493
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,115
realisation.hh
NixOS_nix/src/libstore/realisation.hh
#pragma once ///@file #include <variant> #include "hash.hh" #include "path.hh" #include "derived-path.hh" #include <nlohmann/json_fwd.hpp> #include "comparator.hh" #include "signature/signer.hh" namespace nix { class Store; struct OutputsSpec; /** * A general `Realisation` key. * * This is similar to a `DerivedPath::Opaque`, but the derivation is * identified by its "hash modulo" instead of by its store path. */ struct DrvOutput { /** * The hash modulo of the derivation. * * Computed from the derivation itself for most types of * derivations, but computed from the (fixed) content address of the * output for fixed-output derivations. */ Hash drvHash; /** * The name of the output. */ OutputName outputName; std::string to_string() const; std::string strHash() const { return drvHash.to_string(HashFormat::Base16, true); } static DrvOutput parse(const std::string &); GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); }; struct Realisation { DrvOutput id; StorePath outPath; StringSet signatures; /** * The realisations that are required for the current one to be valid. * * When importing this realisation, the store will first check that all its * dependencies exist, and map to the correct output path */ std::map<DrvOutput, StorePath> dependentRealisations; nlohmann::json toJSON() const; static Realisation fromJSON(const nlohmann::json& json, const std::string& whence); std::string fingerprint() const; void sign(const Signer &); bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; size_t checkSignatures(const PublicKeys & publicKeys) const; static std::set<Realisation> closure(Store &, const std::set<Realisation> &); static void closure(Store &, const std::set<Realisation> &, std::set<Realisation> & res); bool isCompatibleWith(const Realisation & other) const; StorePath getPath() const { return outPath; } GENERATE_CMP(Realisation, me->id, me->outPath); }; /** * Collection type for a single derivation's outputs' `Realisation`s. * * Since these are the outputs of a single derivation, we know the * output names are unique so we can use them as the map key. */ typedef std::map<OutputName, Realisation> SingleDrvOutputs; /** * Collection type for multiple derivations' outputs' `Realisation`s. * * `DrvOutput` is used because in general the derivations are not all * the same, so we need to identify firstly which derivation, and * secondly which output of that derivation. */ typedef std::map<DrvOutput, Realisation> DrvOutputs; /** * Filter a SingleDrvOutputs to include only specific output names * * Moves the `outputs` input. */ SingleDrvOutputs filterDrvOutputs(const OutputsSpec&, SingleDrvOutputs&&); struct OpaquePath { StorePath path; StorePath getPath() const { return path; } GENERATE_CMP(OpaquePath, me->path); }; /** * A store path with all the history of how it went into the store */ struct RealisedPath { /* * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ using Raw = std::variant<Realisation, OpaquePath>; Raw raw; using Set = std::set<RealisedPath>; RealisedPath(StorePath path) : raw(OpaquePath{path}) {} RealisedPath(Realisation r) : raw(r) {} /** * Get the raw store path associated to this */ StorePath path() const; void closure(Store& store, Set& ret) const; static void closure(Store& store, const Set& startPaths, Set& ret); Set closure(Store& store) const; GENERATE_CMP(RealisedPath, me->raw); }; class MissingRealisation : public Error { public: MissingRealisation(DrvOutput & outputId) : MissingRealisation(outputId.outputName, outputId.strHash()) {} MissingRealisation(std::string_view drv, OutputName outputName) : Error( "cannot operate on output '%s' of the " "unbuilt derivation '%s'", outputName, drv) {} }; }
4,171
C++
.h
122
30.016393
93
0.706527
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,116
path-info.hh
NixOS_nix/src/libstore/path-info.hh
#pragma once ///@file #include "signature/signer.hh" #include "path.hh" #include "hash.hh" #include "content-address.hh" #include <string> #include <optional> namespace nix { class Store; struct SubstitutablePathInfo { std::optional<StorePath> deriver; StorePathSet references; /** * 0 = unknown or inapplicable */ uint64_t downloadSize; /** * 0 = unknown */ uint64_t narSize; }; using SubstitutablePathInfos = std::map<StorePath, SubstitutablePathInfo>; /** * Information about a store object. * * See `store/store-object` and `protocols/json/store-object-info` in * the Nix manual */ struct UnkeyedValidPathInfo { /** * Path to derivation that produced this store object, if known. */ std::optional<StorePath> deriver; /** * \todo document this */ Hash narHash; /** * Other store objects this store object referes to. */ StorePathSet references; /** * When this store object was registered in the store that contains * it, if known. */ time_t registrationTime = 0; /** * 0 = unknown */ uint64_t narSize = 0; /** * internal use only: SQL primary key for on-disk store objects with * `LocalStore`. * * @todo Remove, layer violation */ uint64_t id = 0; /** * Whether the path is ultimately trusted, that is, it's a * derivation output that was built locally. */ bool ultimate = false; StringSet sigs; // note: not necessarily verified /** * If non-empty, an assertion that the path is content-addressed, * i.e., that the store path is computed from a cryptographic hash * of the contents of the path, plus some other bits of data like * the "name" part of the path. Such a path doesn't need * signatures, since we don't have to trust anybody's claim that * the path is the output of a particular derivation. (In the * extensional store model, we have to trust that the *contents* * of an output path of a derivation were actually produced by * that derivation. In the intensional model, we have to trust * that a particular output path was produced by a derivation; the * path then implies the contents.) * * Ideally, the content-addressability assertion would just be a Boolean, * and the store path would be computed from the name component, ‘narHash’ * and ‘references’. However, we support many types of content addresses. */ std::optional<ContentAddress> ca; UnkeyedValidPathInfo(const UnkeyedValidPathInfo & other) = default; UnkeyedValidPathInfo(Hash narHash) : narHash(narHash) { }; bool operator == (const UnkeyedValidPathInfo &) const noexcept; /** * @todo return `std::strong_ordering` once `id` is removed */ std::weak_ordering operator <=> (const UnkeyedValidPathInfo &) const noexcept; virtual ~UnkeyedValidPathInfo() { } /** * @param includeImpureInfo If true, variable elements such as the * registration time are included. */ virtual nlohmann::json toJSON( const Store & store, bool includeImpureInfo, HashFormat hashFormat) const; static UnkeyedValidPathInfo fromJSON( const Store & store, const nlohmann::json & json); }; struct ValidPathInfo : UnkeyedValidPathInfo { StorePath path; bool operator == (const ValidPathInfo &) const = default; auto operator <=> (const ValidPathInfo &) const = default; /** * Return a fingerprint of the store path to be used in binary * cache signatures. It contains the store path, the base-32 * SHA-256 hash of the NAR serialisation of the path, the size of * the NAR, and the sorted references. The size field is strictly * speaking superfluous, but might prevent endless/excessive data * attacks. */ std::string fingerprint(const Store & store) const; void sign(const Store & store, const Signer & signer); /** * @return The `ContentAddressWithReferences` that determines the * store path for a content-addressed store object, `std::nullopt` * for an input-addressed store object. */ std::optional<ContentAddressWithReferences> contentAddressWithReferences() const; /** * @return true iff the path is verifiably content-addressed. */ bool isContentAddressed(const Store & store) const; static const size_t maxSigs = std::numeric_limits<size_t>::max(); /** * Return the number of signatures on this .narinfo that were * produced by one of the specified keys, or maxSigs if the path * is content-addressed. */ size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const; /** * Verify a single signature. */ bool checkSignature(const Store & store, const PublicKeys & publicKeys, const std::string & sig) const; /** * References as store path basenames, including a self reference if it has one. */ Strings shortRefs() const; ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(std::move(path)) { }; ValidPathInfo(const StorePath & path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(path) { }; ValidPathInfo(const Store & store, std::string_view name, ContentAddressWithReferences && ca, Hash narHash); }; static_assert(std::is_move_assignable_v<ValidPathInfo>); static_assert(std::is_copy_assignable_v<ValidPathInfo>); static_assert(std::is_copy_constructible_v<ValidPathInfo>); static_assert(std::is_move_constructible_v<ValidPathInfo>); using ValidPathInfos = std::map<StorePath, ValidPathInfo>; }
5,766
C++
.h
154
32.331169
120
0.693801
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,117
remote-store-connection.hh
NixOS_nix/src/libstore/remote-store-connection.hh
#pragma once ///@file #include "remote-store.hh" #include "worker-protocol.hh" #include "worker-protocol-connection.hh" #include "pool.hh" namespace nix { /** * Bidirectional connection (send and receive) used by the Remote Store * implementation. * * Contains `Source` and `Sink` for actual communication, along with * other information learned when negotiating the connection. */ struct RemoteStore::Connection : WorkerProto::BasicClientConnection, WorkerProto::ClientHandshakeInfo { /** * Time this connection was established. */ std::chrono::time_point<std::chrono::steady_clock> startTime; }; /** * A wrapper around Pool<RemoteStore::Connection>::Handle that marks * the connection as bad (causing it to be closed) if a non-daemon * exception is thrown before the handle is closed. Such an exception * causes a deviation from the expected protocol and therefore a * desynchronization between the client and daemon. */ struct RemoteStore::ConnectionHandle { Pool<RemoteStore::Connection>::Handle handle; bool daemonException = false; ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle) : handle(std::move(handle)) { } ConnectionHandle(ConnectionHandle && h) noexcept : handle(std::move(h.handle)) { } ~ConnectionHandle(); RemoteStore::Connection & operator * () { return *handle; } RemoteStore::Connection * operator -> () { return &*handle; } void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true); void withFramedSink(std::function<void(Sink & sink)> fun); }; }
1,625
C++
.h
46
32.086957
99
0.728954
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,118
store-dir-config.hh
NixOS_nix/src/libstore/store-dir-config.hh
#pragma once #include "path.hh" #include "hash.hh" #include "content-address.hh" #include "globals.hh" #include "config.hh" #include <map> #include <string> #include <variant> namespace nix { struct SourcePath; MakeError(BadStorePath, Error); MakeError(BadStorePathName, BadStorePath); struct StoreDirConfig : public Config { using Config::Config; StoreDirConfig() = delete; virtual ~StoreDirConfig() = default; const PathSetting storeDir_{this, settings.nixStore, "store", R"( Logical location of the Nix store, usually `/nix/store`. Note that you can only copy store paths between stores if they have the same `store` setting. )"}; const Path storeDir = storeDir_; // pure methods StorePath parseStorePath(std::string_view path) const; std::optional<StorePath> maybeParseStorePath(std::string_view path) const; std::string printStorePath(const StorePath & path) const; /** * Deprecated * * \todo remove */ StorePathSet parseStorePathSet(const PathSet & paths) const; PathSet printStorePathSet(const StorePathSet & path) const; /** * Display a set of paths in human-readable form (i.e., between quotes * and separated by commas). */ std::string showPaths(const StorePathSet & paths); /** * @return true if *path* is in the Nix store (but not the Nix * store itself). */ bool isInStore(PathView path) const; /** * @return true if *path* is a store path, i.e. a direct child of the * Nix store. */ bool isStorePath(std::string_view path) const; /** * Split a path like `/nix/store/<hash>-<name>/<bla>` into * `/nix/store/<hash>-<name>` and `/<bla>`. */ std::pair<StorePath, Path> toStorePath(PathView path) const; /** * Constructs a unique store path name. */ StorePath makeStorePath(std::string_view type, std::string_view hash, std::string_view name) const; StorePath makeStorePath(std::string_view type, const Hash & hash, std::string_view name) const; StorePath makeOutputPath(std::string_view id, const Hash & hash, std::string_view name) const; StorePath makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const; StorePath makeFixedOutputPathFromCA(std::string_view name, const ContentAddressWithReferences & ca) const; /** * Read-only variant of addToStore(). It returns the store * path for the given file sytem object. */ std::pair<StorePath, Hash> computeStorePath( std::string_view name, const SourcePath & path, ContentAddressMethod method = FileIngestionMethod::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = {}, PathFilter & filter = defaultPathFilter) const; }; }
2,916
C++
.h
81
30.444444
110
0.680669
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,119
keys.hh
NixOS_nix/src/libstore/keys.hh
#pragma once ///@file #include "signature/local-keys.hh" namespace nix { PublicKeys getDefaultPublicKeys(); }
114
C++
.h
6
17.333333
34
0.798077
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,120
nar-accessor.hh
NixOS_nix/src/libstore/nar-accessor.hh
#pragma once ///@file #include "source-accessor.hh" #include <functional> #include <nlohmann/json_fwd.hpp> namespace nix { struct Source; /** * Return an object that provides access to the contents of a NAR * file. */ ref<SourceAccessor> makeNarAccessor(std::string && nar); ref<SourceAccessor> makeNarAccessor(Source & source); /** * Create a NAR accessor from a NAR listing (in the format produced by * listNar()). The callback getNarBytes(offset, length) is used by the * readFile() method of the accessor to get the contents of files * inside the NAR. */ using GetNarBytes = std::function<std::string(uint64_t, uint64_t)>; ref<SourceAccessor> makeLazyNarAccessor( const std::string & listing, GetNarBytes getNarBytes); /** * Write a JSON representation of the contents of a NAR (except file * contents). */ nlohmann::json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse); }
935
C++
.h
29
30.206897
91
0.756425
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,121
ssh-store.hh
NixOS_nix/src/libstore/ssh-store.hh
#pragma once ///@file #include "common-ssh-store-config.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "remote-store.hh" namespace nix { struct SSHStoreConfig : virtual RemoteStoreConfig, virtual CommonSSHStoreConfig { using CommonSSHStoreConfig::CommonSSHStoreConfig; using RemoteStoreConfig::RemoteStoreConfig; SSHStoreConfig(std::string_view scheme, std::string_view authority, const Params & params); const Setting<Strings> remoteProgram{ this, {"nix-daemon"}, "remote-program", "Path to the `nix-daemon` executable on the remote machine."}; const std::string name() override { return "Experimental SSH Store"; } static std::set<std::string> uriSchemes() { return {"ssh-ng"}; } std::string doc() override; }; struct MountedSSHStoreConfig : virtual SSHStoreConfig, virtual LocalFSStoreConfig { using LocalFSStoreConfig::LocalFSStoreConfig; using SSHStoreConfig::SSHStoreConfig; MountedSSHStoreConfig(StringMap params); MountedSSHStoreConfig(std::string_view scheme, std::string_view host, StringMap params); const std::string name() override { return "Experimental SSH Store with filesystem mounted"; } static std::set<std::string> uriSchemes() { return {"mounted-ssh-ng"}; } std::string doc() override; std::optional<ExperimentalFeature> experimentalFeature() const override { return ExperimentalFeature::MountedSSHStore; } }; }
1,516
C++
.h
45
29.044444
110
0.72646
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,122
serve-protocol.hh
NixOS_nix/src/libstore/serve-protocol.hh
#pragma once ///@file #include "common-protocol.hh" namespace nix { #define SERVE_MAGIC_1 0x390c9deb #define SERVE_MAGIC_2 0x5452eecb #define SERVE_PROTOCOL_VERSION (2 << 8 | 7) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) struct StoreDirConfig; struct Source; // items being serialised struct BuildResult; struct UnkeyedValidPathInfo; /** * The "serve protocol", used by ssh:// stores. * * This `struct` is basically just a `namespace`; We use a type rather * than a namespace just so we can use it as a template argument. */ struct ServeProto { /** * Enumeration of all the request types for the protocol. */ enum struct Command : uint64_t; /** * Version type for the protocol. * * @todo Convert to struct with separate major vs minor fields. */ using Version = unsigned int; /** * A unidirectional read connection, to be used by the read half of the * canonical serializers below. */ struct ReadConn { Source & from; Version version; }; /** * A unidirectional write connection, to be used by the write half of the * canonical serializers below. */ struct WriteConn { Sink & to; Version version; }; /** * Stripped down serialization logic suitable for sharing with Hydra. * * @todo remove once Hydra uses Store abstraction consistently. */ struct BasicClientConnection; struct BasicServerConnection; /** * Data type for canonical pairs of serialisers for the serve protocol. * * See https://en.cppreference.com/w/cpp/language/adl for the broader * concept of what is going on here. */ template<typename T> struct Serialise; // This is the definition of `Serialise` we *want* to put here, but // do not do so. // // See `worker-protocol.hh` for a longer explanation. #if 0 { static T read(const StoreDirConfig & store, ReadConn conn); static void write(const StoreDirConfig & store, WriteConn conn, const T & t); }; #endif /** * Wrapper function around `ServeProto::Serialise<T>::write` that allows us to * infer the type instead of having to write it down explicitly. */ template<typename T> static void write(const StoreDirConfig & store, WriteConn conn, const T & t) { ServeProto::Serialise<T>::write(store, conn, t); } /** * Options for building shared between * `ServeProto::Command::BuildPaths` and * `ServeProto::Command::BuildDerivation`. */ struct BuildOptions; }; enum struct ServeProto::Command : uint64_t { QueryValidPaths = 1, QueryPathInfos = 2, DumpStorePath = 3, ImportPaths = 4, ExportPaths = 5, BuildPaths = 6, QueryClosure = 7, BuildDerivation = 8, AddToStoreNar = 9, }; struct ServeProto::BuildOptions { /** * Default value in this and every other field is so tests pass when * testing older deserialisers which do not set all the fields. */ time_t maxSilentTime = -1; time_t buildTimeout = -1; size_t maxLogSize = -1; size_t nrRepeats = -1; bool enforceDeterminism = -1; bool keepFailed = -1; bool operator == (const ServeProto::BuildOptions &) const = default; }; /** * Convenience for sending operation codes. * * @todo Switch to using `ServeProto::Serialize` instead probably. But * this was not done at this time so there would be less churn. */ inline Sink & operator << (Sink & sink, ServeProto::Command op) { return sink << (uint64_t) op; } /** * Convenience for debugging. * * @todo Perhaps render known opcodes more nicely. */ inline std::ostream & operator << (std::ostream & s, ServeProto::Command op) { return s << (uint64_t) op; } /** * Declare a canonical serialiser pair for the worker protocol. * * We specialise the struct merely to indicate that we are implementing * the function for the given type. * * Some sort of `template<...>` must be used with the caller for this to * be legal specialization syntax. See below for what that looks like in * practice. */ #define DECLARE_SERVE_SERIALISER(T) \ struct ServeProto::Serialise< T > \ { \ static T read(const StoreDirConfig & store, ServeProto::ReadConn conn); \ static void write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t); \ }; template<> DECLARE_SERVE_SERIALISER(BuildResult); template<> DECLARE_SERVE_SERIALISER(UnkeyedValidPathInfo); template<> DECLARE_SERVE_SERIALISER(ServeProto::BuildOptions); template<typename T> DECLARE_SERVE_SERIALISER(std::vector<T>); template<typename T> DECLARE_SERVE_SERIALISER(std::set<T>); template<typename... Ts> DECLARE_SERVE_SERIALISER(std::tuple<Ts...>); #define COMMA_ , template<typename K, typename V> DECLARE_SERVE_SERIALISER(std::map<K COMMA_ V>); #undef COMMA_ }
4,954
C++
.h
166
25.945783
99
0.690191
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,123
http-binary-cache-store.hh
NixOS_nix/src/libstore/http-binary-cache-store.hh
#include "binary-cache-store.hh" namespace nix { struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig { using BinaryCacheStoreConfig::BinaryCacheStoreConfig; HttpBinaryCacheStoreConfig(std::string_view scheme, std::string_view _cacheUri, const Params & params); Path cacheUri; const std::string name() override { return "HTTP Binary Cache Store"; } static std::set<std::string> uriSchemes() { static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; auto ret = std::set<std::string>({"http", "https"}); if (forceHttp) ret.insert("file"); return ret; } std::string doc() override; }; }
700
C++
.h
22
26.272727
107
0.667164
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,124
serve-protocol-impl.hh
NixOS_nix/src/libstore/serve-protocol-impl.hh
#pragma once /** * @file * * Template implementations (as opposed to mere declarations). * * This file is an exmample of the "impl.hh" pattern. See the * contributing guide. */ #include "serve-protocol.hh" #include "length-prefixed-protocol-helper.hh" namespace nix { /* protocol-agnostic templates */ #define SERVE_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \ TEMPLATE T ServeProto::Serialise< T >::read(const StoreDirConfig & store, ServeProto::ReadConn conn) \ { \ return LengthPrefixedProtoHelper<ServeProto, T >::read(store, conn); \ } \ TEMPLATE void ServeProto::Serialise< T >::write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t) \ { \ LengthPrefixedProtoHelper<ServeProto, T >::write(store, conn, t); \ } SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>) SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>) SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>) #define SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA , SERVE_USE_LENGTH_PREFIX_SERIALISER( template<typename K SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA typename V>, std::map<K SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA V>) /** * Use `CommonProto` where possible. */ template<typename T> struct ServeProto::Serialise { static T read(const StoreDirConfig & store, ServeProto::ReadConn conn) { return CommonProto::Serialise<T>::read(store, CommonProto::ReadConn { .from = conn.from }); } static void write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t) { CommonProto::Serialise<T>::write(store, CommonProto::WriteConn { .to = conn.to }, t); } }; /* protocol-specific templates */ }
1,799
C++
.h
49
32.816327
124
0.708214
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,125
serve-protocol-connection.hh
NixOS_nix/src/libstore/serve-protocol-connection.hh
#pragma once ///@file #include "serve-protocol.hh" #include "store-api.hh" namespace nix { struct ServeProto::BasicClientConnection { FdSink to; FdSource from; ServeProto::Version remoteVersion; /** * Establishes connection, negotiating version. * * @return the version provided by the other side of the * connection. * * @param to Taken by reference to allow for various error handling * mechanisms. * * @param from Taken by reference to allow for various error * handling mechanisms. * * @param localVersion Our version which is sent over * * @param host Just used to add context to thrown exceptions. */ static ServeProto::Version handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion, std::string_view host); /** * Coercion to `ServeProto::ReadConn`. This makes it easy to use the * factored out serve protocol serializers with a * `LegacySSHStore::Connection`. * * The serve protocol connection types are unidirectional, unlike * this type. */ operator ServeProto::ReadConn() { return ServeProto::ReadConn{ .from = from, .version = remoteVersion, }; } /** * Coercion to `ServeProto::WriteConn`. This makes it easy to use the * factored out serve protocol serializers with a * `LegacySSHStore::Connection`. * * The serve protocol connection types are unidirectional, unlike * this type. */ operator ServeProto::WriteConn() { return ServeProto::WriteConn{ .to = to, .version = remoteVersion, }; } StorePathSet queryValidPaths( const StoreDirConfig & remoteStore, bool lock, const StorePathSet & paths, SubstituteFlag maybeSubstitute); std::map<StorePath, UnkeyedValidPathInfo> queryPathInfos(const StoreDirConfig & store, const StorePathSet & paths); ; void putBuildDerivationRequest( const StoreDirConfig & store, const StorePath & drvPath, const BasicDerivation & drv, const ServeProto::BuildOptions & options); /** * Get the response, must be paired with * `putBuildDerivationRequest`. */ BuildResult getBuildDerivationResponse(const StoreDirConfig & store); void narFromPath(const StoreDirConfig & store, const StorePath & path, std::function<void(Source &)> fun); void importPaths(const StoreDirConfig & store, std::function<void(Sink &)> fun); }; struct ServeProto::BasicServerConnection { /** * Establishes connection, negotiating version. * * @return the version provided by the other side of the * connection. * * @param to Taken by reference to allow for various error handling * mechanisms. * * @param from Taken by reference to allow for various error * handling mechanisms. * * @param localVersion Our version which is sent over */ static ServeProto::Version handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion); }; }
3,137
C++
.h
94
27.531915
119
0.678772
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,126
uds-remote-store.hh
NixOS_nix/src/libstore/uds-remote-store.hh
#pragma once ///@file #include "remote-store.hh" #include "remote-store-connection.hh" #include "indirect-root-store.hh" namespace nix { struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig { // TODO(fzakaria): Delete this constructor once moved over to the factory pattern // outlined in https://github.com/NixOS/nix/issues/10766 using LocalFSStoreConfig::LocalFSStoreConfig; using RemoteStoreConfig::RemoteStoreConfig; /** * @param authority is the socket path. */ UDSRemoteStoreConfig( std::string_view scheme, std::string_view authority, const Params & params); const std::string name() override { return "Local Daemon Store"; } std::string doc() override; /** * The path to the unix domain socket. * * The default is `settings.nixDaemonSocketFile`, but we don't write * that below, instead putting in the constructor. */ Path path; protected: static constexpr char const * scheme = "unix"; public: static std::set<std::string> uriSchemes() { return {scheme}; } }; class UDSRemoteStore : public virtual UDSRemoteStoreConfig , public virtual IndirectRootStore , public virtual RemoteStore { public: /** * @deprecated This is the old API to construct the store. */ UDSRemoteStore(const Params & params); /** * @param authority is the socket path. */ UDSRemoteStore( std::string_view scheme, std::string_view authority, const Params & params); std::string getUri() override; ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override { return LocalFSStore::getFSAccessor(requireValidPath); } void narFromPath(const StorePath & path, Sink & sink) override { LocalFSStore::narFromPath(path, sink); } /** * Implementation of `IndirectRootStore::addIndirectRoot()` which * delegates to the remote store. * * The idea is that the client makes the direct symlink, so it is * owned managed by the client's user account, and the server makes * the indirect symlink. */ void addIndirectRoot(const Path & path) override; private: struct Connection : RemoteStore::Connection { AutoCloseFD fd; void closeWrite() override; }; ref<RemoteStore::Connection> openConnection() override; }; }
2,413
C++
.h
73
28.041096
85
0.699138
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,127
build-result.hh
NixOS_nix/src/libstore/build-result.hh
#pragma once ///@file #include "realisation.hh" #include "derived-path.hh" #include <string> #include <chrono> #include <optional> namespace nix { struct BuildResult { /** * @note This is directly used in the nix-store --serve protocol. * That means we need to worry about compatability across versions. * Therefore, don't remove status codes, and only add new status * codes at the end of the list. */ enum Status { Built = 0, Substituted, AlreadyValid, PermanentFailure, InputRejected, OutputRejected, /// possibly transient TransientFailure, /// no longer used CachedFailure, TimedOut, MiscFailure, DependencyFailed, LogLimitExceeded, NotDeterministic, ResolvesToAlreadyValid, NoSubstituters, } status = MiscFailure; /** * Information about the error if the build failed. * * @todo This should be an entire ErrorInfo object, not just a * string, for richer information. */ std::string errorMsg; std::string toString() const { auto strStatus = [&]() { switch (status) { case Built: return "Built"; case Substituted: return "Substituted"; case AlreadyValid: return "AlreadyValid"; case PermanentFailure: return "PermanentFailure"; case InputRejected: return "InputRejected"; case OutputRejected: return "OutputRejected"; case TransientFailure: return "TransientFailure"; case CachedFailure: return "CachedFailure"; case TimedOut: return "TimedOut"; case MiscFailure: return "MiscFailure"; case DependencyFailed: return "DependencyFailed"; case LogLimitExceeded: return "LogLimitExceeded"; case NotDeterministic: return "NotDeterministic"; case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid"; case NoSubstituters: return "NoSubstituters"; default: return "Unknown"; }; }(); return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg); } /** * How many times this build was performed. */ unsigned int timesBuilt = 0; /** * If timesBuilt > 1, whether some builds did not produce the same * result. (Note that 'isNonDeterministic = false' does not mean * the build is deterministic, just that we don't have evidence of * non-determinism.) */ bool isNonDeterministic = false; /** * For derivations, a mapping from the names of the wanted outputs * to actual paths. */ SingleDrvOutputs builtOutputs; /** * The start/stop times of the build (or one of the rounds, if it * was repeated). */ time_t startTime = 0, stopTime = 0; /** * User and system CPU time the build took. */ std::optional<std::chrono::microseconds> cpuUser, cpuSystem; bool operator ==(const BuildResult &) const noexcept; std::strong_ordering operator <=>(const BuildResult &) const noexcept; bool success() { return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid; } void rethrow() { throw Error("%s", errorMsg); } }; /** * A `BuildResult` together with its "primary key". */ struct KeyedBuildResult : BuildResult { /** * The derivation we built or the store path we substituted. */ DerivedPath path; // Hack to work around a gcc "may be used uninitialized" warning. KeyedBuildResult(BuildResult res, DerivedPath path) : BuildResult(std::move(res)), path(std::move(path)) { } }; }
3,849
C++
.h
116
25.818966
118
0.624058
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,128
nar-info.hh
NixOS_nix/src/libstore/nar-info.hh
#pragma once ///@file #include "types.hh" #include "hash.hh" #include "path-info.hh" namespace nix { class Store; struct NarInfo : ValidPathInfo { std::string url; std::string compression; std::optional<Hash> fileHash; uint64_t fileSize = 0; NarInfo() = delete; NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash) : ValidPathInfo(store, std::move(name), std::move(ca), narHash) { } NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } NarInfo(const Store & store, const std::string & s, const std::string & whence); bool operator ==(const NarInfo &) const = default; // TODO libc++ 16 (used by darwin) missing `std::optional::operator <=>`, can't do yet //auto operator <=>(const NarInfo &) const = default; std::string to_string(const Store & store) const; nlohmann::json toJSON( const Store & store, bool includeImpureInfo, HashFormat hashFormat) const override; static NarInfo fromJSON( const Store & store, const StorePath & path, const nlohmann::json & json); }; }
1,234
C++
.h
34
31.5
97
0.675063
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,129
store-reference.hh
NixOS_nix/src/libstore/store-reference.hh
#pragma once ///@file #include <variant> #include "types.hh" namespace nix { /** * A parsed Store URI (URI is a slight misnomer...), parsed but not yet * resolved to a specific instance and query parms validated. * * Supported values are: * * - `local`: The Nix store in /nix/store and database in * /nix/var/nix/db, accessed directly. * * - `daemon`: The Nix store accessed via a Unix domain socket * connection to nix-daemon. * * - `unix://<path>`: The Nix store accessed via a Unix domain socket * connection to nix-daemon, with the socket located at `<path>`. * * - `auto` or ``: Equivalent to `local` or `daemon` depending on * whether the user has write access to the local Nix * store/database. * * - `file://<path>`: A binary cache stored in `<path>`. * * - `https://<path>`: A binary cache accessed via HTTP. * * - `s3://<path>`: A writable binary cache stored on Amazon's Simple * Storage Service. * * - `ssh://[user@]<host>`: A remote Nix store accessed by running * `nix-store --serve` via SSH. * * You can pass parameters to the store type by appending * `?key=value&key=value&...` to the URI. */ struct StoreReference { using Params = std::map<std::string, std::string>; /** * Special store reference `""` or `"auto"` */ struct Auto { inline bool operator==(const Auto & rhs) const = default; inline auto operator<=>(const Auto & rhs) const = default; }; /** * General case, a regular `scheme://authority` URL. */ struct Specified { std::string scheme; std::string authority = ""; bool operator==(const Specified & rhs) const = default; auto operator<=>(const Specified & rhs) const = default; }; typedef std::variant<Auto, Specified> Variant; Variant variant; Params params; bool operator==(const StoreReference & rhs) const = default; /** * Render the whole store reference as a URI, including parameters. */ std::string render() const; /** * Parse a URI into a store reference. */ static StoreReference parse(const std::string & uri, const Params & extraParams = Params{}); }; /** * Split URI into protocol+hierarchy part and its parameter set. */ std::pair<std::string, StoreReference::Params> splitUriAndParams(const std::string & uri); }
2,376
C++
.h
76
27.578947
96
0.655142
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,130
downstream-placeholder.hh
NixOS_nix/src/libstore/downstream-placeholder.hh
#pragma once ///@file #include "hash.hh" #include "path.hh" #include "derived-path.hh" namespace nix { /** * Downstream Placeholders are opaque and almost certainly unique values * used to allow derivations to refer to store objects which are yet to * be built and for we do not yet have store paths for. * * They correspond to `DerivedPaths` that are not `DerivedPath::Opaque`, * except for the cases involving input addressing or fixed outputs * where we do know a store path for the derivation output in advance. * * Unlike `DerivationPath`, however, `DownstreamPlaceholder` is * purposefully opaque and obfuscated. This is so they are hard to * create by accident, and so substituting them (once we know what the * path to store object is) is unlikely to capture other stuff it * shouldn't. * * We use them with `Derivation`: the `render()` method is called to * render an opaque string which can be used in the derivation, and the * resolving logic can substitute those strings for store paths when * resolving `Derivation.inputDrvs` to `BasicDerivation.inputSrcs`. */ class DownstreamPlaceholder { /** * `DownstreamPlaceholder` is just a newtype of `Hash`. * This its only field. */ Hash hash; /** * Newtype constructor */ DownstreamPlaceholder(Hash hash) : hash(hash) { } public: /** * This creates an opaque and almost certainly unique string * deterministically from the placeholder. */ std::string render() const; /** * Create a placeholder for an unknown output of a content-addressed * derivation. * * The derivation itself is known (we have a store path for it), but * the output doesn't yet have a known store path. * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DownstreamPlaceholder unknownCaOutput( const StorePath & drvPath, OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Create a placehold for the output of an unknown derivation. * * The derivation is not yet known because it is a dynamic * derivaiton --- it is itself an output of another derivation --- * and we just have (another) placeholder for it. * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DownstreamPlaceholder unknownDerivation( const DownstreamPlaceholder & drvPlaceholder, OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Convenience constructor that handles both cases (unknown * content-addressed output and unknown derivation), delegating as * needed to `unknownCaOutput` and `unknownDerivation`. * * Recursively builds up a placeholder from a * `SingleDerivedPath::Built.drvPath` chain. */ static DownstreamPlaceholder fromSingleDerivedPathBuilt( const SingleDerivedPath::Built & built, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; }
3,144
C++
.h
82
33.743902
86
0.723878
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,131
sqlite.hh
NixOS_nix/src/libstore/sqlite.hh
#pragma once ///@file #include <functional> #include <string> #include "error.hh" struct sqlite3; struct sqlite3_stmt; namespace nix { enum class SQLiteOpenMode { /** * Open the database in read-write mode. * If the database does not exist, it will be created. */ Normal, /** * Open the database in read-write mode. * Fails with an error if the database does not exist. */ NoCreate, /** * Open the database in immutable mode. * In addition to the database being read-only, * no wal or journal files will be created by sqlite. * Use this mode if the database is on a read-only filesystem. * Fails with an error if the database does not exist. */ Immutable, }; /** * RAII wrapper to close a SQLite database automatically. */ struct SQLite { sqlite3 * db = 0; SQLite() { } SQLite(const Path & path, SQLiteOpenMode mode = SQLiteOpenMode::Normal); SQLite(const SQLite & from) = delete; SQLite& operator = (const SQLite & from) = delete; // NOTE: This is noexcept since we are only copying and assigning raw pointers. SQLite& operator = (SQLite && from) noexcept { db = from.db; from.db = 0; return *this; } ~SQLite(); operator sqlite3 * () { return db; } /** * Disable synchronous mode, set truncate journal mode. */ void isCache(); void exec(const std::string & stmt); uint64_t getLastInsertedRowId(); }; /** * RAII wrapper to create and destroy SQLite prepared statements. */ struct SQLiteStmt { sqlite3 * db = 0; sqlite3_stmt * stmt = 0; std::string sql; SQLiteStmt() { } SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); } void create(sqlite3 * db, const std::string & s); ~SQLiteStmt(); operator sqlite3_stmt * () { return stmt; } /** * Helper for binding / executing statements. */ class Use { friend struct SQLiteStmt; private: SQLiteStmt & stmt; unsigned int curArg = 1; Use(SQLiteStmt & stmt); public: ~Use(); /** * Bind the next parameter. */ Use & operator () (std::string_view value, bool notNull = true); Use & operator () (const unsigned char * data, size_t len, bool notNull = true); Use & operator () (int64_t value, bool notNull = true); Use & bind(); // null int step(); /** * Execute a statement that does not return rows. */ void exec(); /** * For statements that return 0 or more rows. Returns true iff * a row is available. */ bool next(); std::string getStr(int col); int64_t getInt(int col); bool isNull(int col); }; Use use() { return Use(*this); } }; /** * RAII helper that ensures transactions are aborted unless explicitly * committed. */ struct SQLiteTxn { bool active = false; sqlite3 * db; SQLiteTxn(sqlite3 * db); void commit(); ~SQLiteTxn(); }; struct SQLiteError : Error { std::string path; std::string errMsg; int errNo, extendedErrNo, offset; template<typename... Args> [[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) { throw_(db, HintFmt(fs, args...)); } SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, HintFmt && hf); protected: template<typename... Args> SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args) : SQLiteError(path, errMsg, errNo, extendedErrNo, offset, HintFmt(fs, args...)) { } [[noreturn]] static void throw_(sqlite3 * db, HintFmt && hf); }; MakeError(SQLiteBusy, SQLiteError); void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning); /** * Convenience function for retrying a SQLite transaction when the * database is busy. */ template<typename T, typename F> T retrySQLite(F && fun) { time_t nextWarning = time(0) + 1; while (true) { try { return fun(); } catch (SQLiteBusy & e) { handleSQLiteBusy(e, nextWarning); } } } }
4,301
C++
.h
148
23.851351
142
0.625213
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,132
common-protocol-impl.hh
NixOS_nix/src/libstore/common-protocol-impl.hh
#pragma once /** * @file * * Template implementations (as opposed to mere declarations). * * This file is an exmample of the "impl.hh" pattern. See the * contributing guide. */ #include "common-protocol.hh" #include "length-prefixed-protocol-helper.hh" namespace nix { /* protocol-agnostic templates */ #define COMMON_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \ TEMPLATE T CommonProto::Serialise< T >::read(const StoreDirConfig & store, CommonProto::ReadConn conn) \ { \ return LengthPrefixedProtoHelper<CommonProto, T >::read(store, conn); \ } \ TEMPLATE void CommonProto::Serialise< T >::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & t) \ { \ LengthPrefixedProtoHelper<CommonProto, T >::write(store, conn, t); \ } COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>) COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>) COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>) #define COMMA_ , COMMON_USE_LENGTH_PREFIX_SERIALISER( template<typename K COMMA_ typename V>, std::map<K COMMA_ V>) #undef COMMA_ /* protocol-specific templates */ }
1,201
C++
.h
32
34.53125
126
0.726724
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,133
path.hh
NixOS_nix/src/libstore/path.hh
#pragma once ///@file #include <string_view> #include "types.hh" namespace nix { struct Hash; /** * Check whether a name is a valid store path name. * * @throws BadStorePathName if the name is invalid. The message is of the format "name %s is not valid, for this specific reason". */ void checkName(std::string_view name); /** * \ref StorePath "Store path" is the fundamental reference type of Nix. * A store paths refers to a Store object. * * See store/store-path.html for more information on a * conceptual level. */ class StorePath { std::string baseName; public: /** * Size of the hash part of store paths, in base-32 characters. */ constexpr static size_t HashLen = 32; // i.e. 160 bits constexpr static size_t MaxPathLen = 211; StorePath() = delete; /** @throws BadStorePath */ StorePath(std::string_view baseName); /** @throws BadStorePath */ StorePath(const Hash & hash, std::string_view name); std::string_view to_string() const noexcept { return baseName; } bool operator == (const StorePath & other) const noexcept = default; auto operator <=> (const StorePath & other) const noexcept = default; /** * Check whether a file name ends with the extension for derivations. */ bool isDerivation() const noexcept; /** * Throw an exception if `isDerivation` is false. */ void requireDerivation() const; std::string_view name() const { return std::string_view(baseName).substr(HashLen + 1); } std::string_view hashPart() const { return std::string_view(baseName).substr(0, HashLen); } static StorePath dummy; static StorePath random(std::string_view name); }; typedef std::set<StorePath> StorePathSet; typedef std::vector<StorePath> StorePaths; /** * The file extension of \ref nix::Derivation derivations when serialized * into store objects. */ constexpr std::string_view drvExtension = ".drv"; } namespace std { template<> struct hash<nix::StorePath> { std::size_t operator()(const nix::StorePath & path) const noexcept { return * (std::size_t *) path.to_string().data(); } }; }
2,203
C++
.h
74
25.837838
130
0.684736
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,134
remote-fs-accessor.hh
NixOS_nix/src/libstore/remote-fs-accessor.hh
#pragma once ///@file #include "source-accessor.hh" #include "ref.hh" #include "store-api.hh" namespace nix { class RemoteFSAccessor : public SourceAccessor { ref<Store> store; std::map<std::string, ref<SourceAccessor>> nars; bool requireValidPath; Path cacheDir; std::pair<ref<SourceAccessor>, CanonPath> fetch(const CanonPath & path); friend class BinaryCacheStore; Path makeCacheFile(std::string_view hashPart, const std::string & ext); ref<SourceAccessor> addToCache(std::string_view hashPart, std::string && nar); public: RemoteFSAccessor(ref<Store> store, bool requireValidPath = true, const /* FIXME: use std::optional */ Path & cacheDir = ""); std::optional<Stat> maybeLstat(const CanonPath & path) override; DirEntries readDirectory(const CanonPath & path) override; std::string readFile(const CanonPath & path) override; std::string readLink(const CanonPath & path) override; }; }
976
C++
.h
26
33.269231
82
0.72776
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,135
builtins.hh
NixOS_nix/src/libstore/builtins.hh
#pragma once ///@file #include "derivations.hh" namespace nix { // TODO: make pluggable. void builtinFetchurl( const BasicDerivation & drv, const std::map<std::string, Path> & outputs, const std::string & netrcData, const std::string & caFileData); void builtinUnpackChannel( const BasicDerivation & drv, const std::map<std::string, Path> & outputs); }
382
C++
.h
14
24.214286
49
0.719008
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,136
derived-path-map.hh
NixOS_nix/src/libstore/derived-path-map.hh
#pragma once ///@file #include "types.hh" #include "derived-path.hh" namespace nix { /** * A simple Trie, of sorts. Conceptually a map of `SingleDerivedPath` to * values. * * Concretely, an n-ary tree, as described below. A * `SingleDerivedPath::Opaque` maps to the value of an immediate child * of the root node. A `SingleDerivedPath::Built` maps to a deeper child * node: the `SingleDerivedPath::Built::drvPath` is first mapped to a a * child node (inductively), and then the * `SingleDerivedPath::Built::output` is used to look up that child's * child via its map. In this manner, every `SingleDerivedPath` is * mapped to a child node. * * @param V A type to instantiate for each output. It should probably * should be an "optional" type so not every interior node has to have a * value. `* const Something` or `std::optional<Something>` would be * good choices for "optional" types. */ template<typename V> struct DerivedPathMap { /** * A child node (non-root node). */ struct ChildNode { /** * Value of this child node. * * @see DerivedPathMap for what `V` should be. */ V value; /** * The map type for the root node. */ using Map = std::map<OutputName, ChildNode>; /** * The map of the root node. */ Map childMap; bool operator == (const ChildNode &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. // decltype(std::declval<V>() <=> std::declval<V>()) // operator <=> (const ChildNode &) const noexcept; }; /** * The map type for the root node. */ using Map = std::map<StorePath, ChildNode>; /** * The map of root node. */ Map map; bool operator == (const DerivedPathMap &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. // auto operator <=> (const DerivedPathMap &) const noexcept; /** * Find the node for `k`, creating it if needed. * * The node is referred to as a "slot" on the assumption that `V` is * some sort of optional type, so the given key can be set or unset * by changing this node. */ ChildNode & ensureSlot(const SingleDerivedPath & k); /** * Like `ensureSlot` but does not create the slot if it doesn't exist. * * Read the entire description of `ensureSlot` to understand an * important caveat here that "have slot" does *not* imply "key is * set in map". To ensure a key is set one would need to get the * child node (with `findSlot` or `ensureSlot`) *and* check the * `ChildNode::value`. */ ChildNode * findSlot(const SingleDerivedPath & k); }; template<> bool DerivedPathMap<std::set<std::string>>::ChildNode::operator == ( const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. #if 0 template<> std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator <=> ( const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept; template<> inline auto DerivedPathMap<std::set<std::string>>::operator <=> (const DerivedPathMap<std::set<std::string>> &) const noexcept = default; #endif extern template struct DerivedPathMap<std::set<std::string>>::ChildNode; extern template struct DerivedPathMap<std::set<std::string>>; }
3,532
C++
.h
92
33.576087
137
0.656049
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,137
length-prefixed-protocol-helper.hh
NixOS_nix/src/libstore/length-prefixed-protocol-helper.hh
#pragma once /** * @file * * Reusable serialisers for serialization container types in a * length-prefixed manner. * * Used by both the Worker and Serve protocols. */ #include "types.hh" namespace nix { struct StoreDirConfig; /** * Reusable serialisers for serialization container types in a * length-prefixed manner. * * @param T The type of the collection being serialised * * @param Inner This the most important parameter; this is the "inner" * protocol. The user of this will substitute `MyProtocol` or similar * when making a `MyProtocol::Serialiser<Collection<T>>`. Note that the * inside is allowed to call to call `Inner::Serialiser` on different * types. This is especially important for `std::map` which doesn't have * a single `T` but one `K` and one `V`. */ template<class Inner, typename T> struct LengthPrefixedProtoHelper; #define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \ struct LengthPrefixedProtoHelper< Inner, T > \ { \ static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \ static void write(const StoreDirConfig & store, typename Inner::WriteConn conn, const T & str); \ private: \ /*! \ * Read this as simply `using S = Inner::Serialise;`. \ * \ * It would be nice to use that directly, but C++ doesn't seem to allow \ * it. The `typename` keyword needed to refer to `Inner` seems to greedy \ * (low precedence), and then C++ complains that `Serialise` is not a \ * type parameter but a real type. \ * \ * Making this `S` alias seems to be the only way to avoid these issues. \ */ \ template<typename U> using S = typename Inner::template Serialise<U>; \ } template<class Inner, typename T> LENGTH_PREFIXED_PROTO_HELPER(Inner, std::vector<T>); template<class Inner, typename T> LENGTH_PREFIXED_PROTO_HELPER(Inner, std::set<T>); template<class Inner, typename... Ts> LENGTH_PREFIXED_PROTO_HELPER(Inner, std::tuple<Ts...>); template<class Inner, typename K, typename V> #define LENGTH_PREFIXED_PROTO_HELPER_X std::map<K, V> LENGTH_PREFIXED_PROTO_HELPER(Inner, LENGTH_PREFIXED_PROTO_HELPER_X); template<class Inner, typename T> std::vector<T> LengthPrefixedProtoHelper<Inner, std::vector<T>>::read( const StoreDirConfig & store, typename Inner::ReadConn conn) { std::vector<T> resSet; auto size = readNum<size_t>(conn.from); while (size--) { resSet.push_back(S<T>::read(store, conn)); } return resSet; } template<class Inner, typename T> void LengthPrefixedProtoHelper<Inner, std::vector<T>>::write( const StoreDirConfig & store, typename Inner::WriteConn conn, const std::vector<T> & resSet) { conn.to << resSet.size(); for (auto & key : resSet) { S<T>::write(store, conn, key); } } template<class Inner, typename T> std::set<T> LengthPrefixedProtoHelper<Inner, std::set<T>>::read( const StoreDirConfig & store, typename Inner::ReadConn conn) { std::set<T> resSet; auto size = readNum<size_t>(conn.from); while (size--) { resSet.insert(S<T>::read(store, conn)); } return resSet; } template<class Inner, typename T> void LengthPrefixedProtoHelper<Inner, std::set<T>>::write( const StoreDirConfig & store, typename Inner::WriteConn conn, const std::set<T> & resSet) { conn.to << resSet.size(); for (auto & key : resSet) { S<T>::write(store, conn, key); } } template<class Inner, typename K, typename V> std::map<K, V> LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read( const StoreDirConfig & store, typename Inner::ReadConn conn) { std::map<K, V> resMap; auto size = readNum<size_t>(conn.from); while (size--) { auto k = S<K>::read(store, conn); auto v = S<V>::read(store, conn); resMap.insert_or_assign(std::move(k), std::move(v)); } return resMap; } template<class Inner, typename K, typename V> void LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write( const StoreDirConfig & store, typename Inner::WriteConn conn, const std::map<K, V> & resMap) { conn.to << resMap.size(); for (auto & i : resMap) { S<K>::write(store, conn, i.first); S<V>::write(store, conn, i.second); } } template<class Inner, typename... Ts> std::tuple<Ts...> LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::read( const StoreDirConfig & store, typename Inner::ReadConn conn) { return std::tuple<Ts...> { S<Ts>::read(store, conn)..., }; } template<class Inner, typename... Ts> void LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::write( const StoreDirConfig & store, typename Inner::WriteConn conn, const std::tuple<Ts...> & res) { std::apply([&]<typename... Us>(const Us &... args) { (S<Us>::write(store, conn, args), ...); }, res); } }
4,869
C++
.h
142
30.485915
105
0.676577
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,138
s3-binary-cache-store.hh
NixOS_nix/src/libstore/s3-binary-cache-store.hh
#pragma once ///@file #include "binary-cache-store.hh" #include <atomic> namespace nix { struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig { std::string bucketName; using BinaryCacheStoreConfig::BinaryCacheStoreConfig; S3BinaryCacheStoreConfig(std::string_view uriScheme, std::string_view bucketName, const Params & params); const Setting<std::string> profile{ this, "", "profile", R"( The name of the AWS configuration profile to use. By default Nix will use the `default` profile. )"}; protected: constexpr static const char * defaultRegion = "us-east-1"; public: const Setting<std::string> region{ this, defaultRegion, "region", R"( The region of the S3 bucket. If your bucket is not in `us–east-1`, you should always explicitly specify the region parameter. )"}; const Setting<std::string> scheme{ this, "", "scheme", R"( The scheme used for S3 requests, `https` (default) or `http`. This option allows you to disable HTTPS for binary caches which don't support it. > **Note** > > HTTPS should be used if the cache might contain sensitive > information. )"}; const Setting<std::string> endpoint{ this, "", "endpoint", R"( The URL of the endpoint of an S3-compatible service such as MinIO. Do not specify this setting if you're using Amazon S3. > **Note** > > This endpoint must support HTTPS and will use path-based > addressing instead of virtual host based addressing. )"}; const Setting<std::string> narinfoCompression{ this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; const Setting<std::string> lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; const Setting<std::string> logCompression{ this, "", "log-compression", R"( Compression method for `log/*` files. It is recommended to use a compression method supported by most web browsers (e.g. `brotli`). )"}; const Setting<bool> multipartUpload{this, false, "multipart-upload", "Whether to use multi-part uploads."}; const Setting<uint64_t> bufferSize{ this, 5 * 1024 * 1024, "buffer-size", "Size (in bytes) of each part in multi-part uploads."}; const std::string name() override { return "S3 Binary Cache Store"; } static std::set<std::string> uriSchemes() { return {"s3"}; } std::string doc() override; }; class S3BinaryCacheStore : public virtual BinaryCacheStore { protected: S3BinaryCacheStore(const Params & params); public: struct Stats { std::atomic<uint64_t> put{0}; std::atomic<uint64_t> putBytes{0}; std::atomic<uint64_t> putTimeMs{0}; std::atomic<uint64_t> get{0}; std::atomic<uint64_t> getBytes{0}; std::atomic<uint64_t> getTimeMs{0}; std::atomic<uint64_t> head{0}; }; virtual const Stats & getS3Stats() = 0; }; }
3,290
C++
.h
98
26.173469
112
0.619349
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,139
content-address.hh
NixOS_nix/src/libstore/content-address.hh
#pragma once ///@file #include <variant> #include "hash.hh" #include "path.hh" #include "file-content-address.hh" #include "variant-wrapper.hh" namespace nix { /* * Content addressing method */ /** * Compute the prefix to the hash algorithm which indicates how the * files were ingested. */ std::string_view makeFileIngestionPrefix(FileIngestionMethod m); /** * An enumeration of all the ways we can content-address store objects. * * Just the type of a content address. Combine with the hash itself, and * we have a `ContentAddress` as defined below. Combine that, in turn, * with info on references, and we have `ContentAddressWithReferences`, * as defined further below. */ struct ContentAddressMethod { enum struct Raw { /** * Calculate a store path using the `FileIngestionMethod::Flat` * hash of the file system objects, and references. * * See `store-object/content-address.md#method-flat` in the * manual. */ Flat, /** * Calculate a store path using the * `FileIngestionMethod::NixArchive` hash of the file system * objects, and references. * * See `store-object/content-address.md#method-flat` in the * manual. */ NixArchive, /** * Calculate a store path using the `FileIngestionMethod::Git` * hash of the file system objects, and references. * * Part of `ExperimentalFeature::GitHashing`. * * See `store-object/content-address.md#method-git` in the * manual. */ Git, /** * Calculate a store path using the `FileIngestionMethod::Flat` * hash of the file system objects, and references, but in a * different way than `ContentAddressMethod::Raw::Flat`. * * See `store-object/content-address.md#method-text` in the * manual. */ Text, }; Raw raw; bool operator ==(const ContentAddressMethod &) const = default; auto operator <=>(const ContentAddressMethod &) const = default; MAKE_WRAPPER_CONSTRUCTOR(ContentAddressMethod); /** * Parse a content addressing method (name). * * The inverse of `render`. */ static ContentAddressMethod parse(std::string_view rawCaMethod); /** * Render a content addressing method (name). * * The inverse of `parse`. */ std::string_view render() const; /** * Parse the prefix tag which indicates how the files * were ingested, with the fixed output case not prefixed for back * compat. * * @param m A string that should begin with the * prefix. On return, the remainder of the string after the * prefix. */ static ContentAddressMethod parsePrefix(std::string_view & m); /** * Render the prefix tag which indicates how the files wre ingested. * * The rough inverse of `parsePrefix()`. */ std::string_view renderPrefix() const; /** * Parse a content addressing method and hash algorithm. */ static std::pair<ContentAddressMethod, HashAlgorithm> parseWithAlgo(std::string_view rawCaMethod); /** * Render a content addressing method and hash algorithm in a * nicer way, prefixing both cases. * * The rough inverse of `parse()`. */ std::string renderWithAlgo(HashAlgorithm ha) const; /** * Get the underlying way to content-address file system objects. * * Different ways of hashing store objects may use the same method * for hashing file systeme objects. */ FileIngestionMethod getFileIngestionMethod() const; }; /* * Mini content address */ /** * We've accumulated several types of content-addressed paths over the * years; fixed-output derivations support multiple hash algorithms and * serialisation methods (flat file vs NAR). Thus, `ca` has one of the * following forms: * * - `TextIngestionMethod`: * `text:sha256:<sha256 hash of file contents>` * * - `FixedIngestionMethod`: * `fixed:<r?>:<hash algorithm>:<hash of file contents>` */ struct ContentAddress { /** * How the file system objects are serialized */ ContentAddressMethod method; /** * Hash of that serialization */ Hash hash; bool operator ==(const ContentAddress &) const = default; auto operator <=>(const ContentAddress &) const = default; /** * Compute the content-addressability assertion * (`ValidPathInfo::ca`) for paths created by * `Store::makeFixedOutputPath()` / `Store::addToStore()`. */ std::string render() const; static ContentAddress parse(std::string_view rawCa); static std::optional<ContentAddress> parseOpt(std::string_view rawCaOpt); std::string printMethodAlgo() const; }; /** * Render the `ContentAddress` if it exists to a string, return empty * string otherwise. */ std::string renderContentAddress(std::optional<ContentAddress> ca); /* * Full content address * * See the schema for store paths in store-api.cc */ /** * A set of references to other store objects. * * References to other store objects are tracked with store paths, self * references however are tracked with a boolean. */ struct StoreReferences { /** * References to other store objects */ StorePathSet others; /** * Reference to this store object */ bool self = false; /** * @return true iff no references, i.e. others is empty and self is * false. */ bool empty() const; /** * Returns the numbers of references, i.e. the size of others + 1 * iff self is true. */ size_t size() const; bool operator ==(const StoreReferences &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=>(const StoreReferences &) const = default; }; // This matches the additional info that we need for makeTextPath struct TextInfo { /** * Hash of the contents of the text/file. */ Hash hash; /** * References to other store objects only; self references * disallowed */ StorePathSet references; bool operator ==(const TextInfo &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=>(const TextInfo &) const = default; }; struct FixedOutputInfo { /** * How the file system objects are serialized */ FileIngestionMethod method; /** * Hash of that serialization */ Hash hash; /** * References to other store objects or this one. */ StoreReferences references; bool operator ==(const FixedOutputInfo &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=>(const FixedOutputInfo &) const = default; }; /** * Ways of content addressing but not a complete ContentAddress. * * A ContentAddress without a Hash. */ struct ContentAddressWithReferences { typedef std::variant< TextInfo, FixedOutputInfo > Raw; Raw raw; bool operator ==(const ContentAddressWithReferences &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. //auto operator <=>(const ContentAddressWithReferences &) const = default; MAKE_WRAPPER_CONSTRUCTOR(ContentAddressWithReferences); /** * Create a `ContentAddressWithReferences` from a mere * `ContentAddress`, by claiming no references. */ static ContentAddressWithReferences withoutRefs(const ContentAddress &) noexcept; /** * Create a `ContentAddressWithReferences` from 3 parts: * * @param method Way ingesting the file system data. * * @param hash Hash of ingested file system data. * * @param refs References to other store objects or oneself. * * @note note that all combinations are supported. This is a * *partial function* and exceptions will be thrown for invalid * combinations. */ static ContentAddressWithReferences fromParts( ContentAddressMethod method, Hash hash, StoreReferences refs); ContentAddressMethod getMethod() const; Hash getHash() const; }; }
8,399
C++
.h
266
26.454887
102
0.666419
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,140
parsed-derivations.hh
NixOS_nix/src/libstore/parsed-derivations.hh
#pragma once ///@file #include "derivations.hh" #include "store-api.hh" #include <nlohmann/json_fwd.hpp> namespace nix { class ParsedDerivation { StorePath drvPath; BasicDerivation & drv; std::unique_ptr<nlohmann::json> structuredAttrs; public: ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv); ~ParsedDerivation(); const nlohmann::json * getStructuredAttrs() const { return structuredAttrs.get(); } std::optional<std::string> getStringAttr(const std::string & name) const; bool getBoolAttr(const std::string & name, bool def = false) const; std::optional<Strings> getStringsAttr(const std::string & name) const; StringSet getRequiredSystemFeatures() const; bool canBuildLocally(Store & localStore) const; bool willBuildLocally(Store & localStore) const; bool substitutesAllowed() const; bool useUidRange() const; std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths); }; std::string writeStructuredAttrsShell(const nlohmann::json & json); }
1,102
C++
.h
30
32.566667
105
0.746439
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
11,141
derived-path.hh
NixOS_nix/src/libstore/derived-path.hh
#pragma once ///@file #include "path.hh" #include "outputs-spec.hh" #include "config.hh" #include "ref.hh" #include <variant> #include <nlohmann/json_fwd.hpp> namespace nix { struct StoreDirConfig; // TODO stop needing this, `toJSON` below should be pure class Store; /** * An opaque derived path. * * Opaque derived paths are just store paths, and fully evaluated. They * cannot be simplified further. Since they are opaque, they cannot be * built, but they can fetched. */ struct DerivedPathOpaque { StorePath path; std::string to_string(const StoreDirConfig & store) const; static DerivedPathOpaque parse(const StoreDirConfig & store, std::string_view); nlohmann::json toJSON(const StoreDirConfig & store) const; bool operator == (const DerivedPathOpaque &) const = default; auto operator <=> (const DerivedPathOpaque &) const = default; }; struct SingleDerivedPath; /** * A single derived path that is built from a derivation * * Built derived paths are pair of a derivation and an output name. They are * evaluated by building the derivation, and then taking the resulting output * path of the given output name. */ struct SingleDerivedPathBuilt { ref<SingleDerivedPath> drvPath; OutputName output; /** * Get the store path this is ultimately derived from (by realising * and projecting outputs). * * Note that this is *not* a property of the store object being * referred to, but just of this path --- how we happened to be * referring to that store object. In other words, this means this * function breaks "referential transparency". It should therefore * be used only with great care. */ const StorePath & getBaseStorePath() const; /** * Uses `^` as the separator */ std::string to_string(const StoreDirConfig & store) const; /** * Uses `!` as the separator */ std::string to_string_legacy(const StoreDirConfig & store) const; /** * The caller splits on the separator, so it works for both variants. * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static SingleDerivedPathBuilt parse( const StoreDirConfig & store, ref<SingleDerivedPath> drvPath, OutputNameView outputs, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); nlohmann::json toJSON(Store & store) const; bool operator == (const SingleDerivedPathBuilt &) const noexcept; std::strong_ordering operator <=> (const SingleDerivedPathBuilt &) const noexcept; }; using _SingleDerivedPathRaw = std::variant< DerivedPathOpaque, SingleDerivedPathBuilt >; /** * A "derived path" is a very simple sort of expression (not a Nix * language expression! But an expression in a the general sense) that * evaluates to (concrete) store path. It is either: * * - opaque, in which case it is just a concrete store path with * possibly no known derivation * * - built, in which case it is a pair of a derivation path and an * output name. */ struct SingleDerivedPath : _SingleDerivedPathRaw { using Raw = _SingleDerivedPathRaw; using Raw::Raw; using Opaque = DerivedPathOpaque; using Built = SingleDerivedPathBuilt; inline const Raw & raw() const { return static_cast<const Raw &>(*this); } bool operator == (const SingleDerivedPath &) const = default; auto operator <=> (const SingleDerivedPath &) const = default; /** * Get the store path this is ultimately derived from (by realising * and projecting outputs). * * Note that this is *not* a property of the store object being * referred to, but just of this path --- how we happened to be * referring to that store object. In other words, this means this * function breaks "referential transparency". It should therefore * be used only with great care. */ const StorePath & getBaseStorePath() const; /** * Uses `^` as the separator */ std::string to_string(const StoreDirConfig & store) const; /** * Uses `!` as the separator */ std::string to_string_legacy(const StoreDirConfig & store) const; /** * Uses `^` as the separator * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static SingleDerivedPath parse( const StoreDirConfig & store, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Uses `!` as the separator * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static SingleDerivedPath parseLegacy( const StoreDirConfig & store, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); nlohmann::json toJSON(Store & store) const; }; static inline ref<SingleDerivedPath> makeConstantStorePathRef(StorePath drvPath) { return make_ref<SingleDerivedPath>(SingleDerivedPath::Opaque { drvPath }); } /** * A set of derived paths that are built from a derivation * * Built derived paths are pair of a derivation and some output names. * They are evaluated by building the derivation, and then replacing the * output names with the resulting outputs. * * Note that does mean a derived store paths evaluates to multiple * opaque paths, which is sort of icky as expressions are supposed to * evaluate to single values. Perhaps this should have just a single * output name. */ struct DerivedPathBuilt { ref<SingleDerivedPath> drvPath; OutputsSpec outputs; /** * Get the store path this is ultimately derived from (by realising * and projecting outputs). * * Note that this is *not* a property of the store object being * referred to, but just of this path --- how we happened to be * referring to that store object. In other words, this means this * function breaks "referential transparency". It should therefore * be used only with great care. */ const StorePath & getBaseStorePath() const; /** * Uses `^` as the separator */ std::string to_string(const StoreDirConfig & store) const; /** * Uses `!` as the separator */ std::string to_string_legacy(const StoreDirConfig & store) const; /** * The caller splits on the separator, so it works for both variants. * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DerivedPathBuilt parse( const StoreDirConfig & store, ref<SingleDerivedPath>, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); nlohmann::json toJSON(Store & store) const; bool operator == (const DerivedPathBuilt &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet. bool operator < (const DerivedPathBuilt &) const noexcept; }; using _DerivedPathRaw = std::variant< DerivedPathOpaque, DerivedPathBuilt >; /** * A "derived path" is a very simple sort of expression that evaluates * to one or more (concrete) store paths. It is either: * * - opaque, in which case it is just a single concrete store path with * possibly no known derivation * * - built, in which case it is a pair of a derivation path and some * output names. */ struct DerivedPath : _DerivedPathRaw { using Raw = _DerivedPathRaw; using Raw::Raw; using Opaque = DerivedPathOpaque; using Built = DerivedPathBuilt; inline const Raw & raw() const { return static_cast<const Raw &>(*this); } bool operator == (const DerivedPath &) const = default; // TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet. //auto operator <=> (const DerivedPath &) const = default; /** * Get the store path this is ultimately derived from (by realising * and projecting outputs). * * Note that this is *not* a property of the store object being * referred to, but just of this path --- how we happened to be * referring to that store object. In other words, this means this * function breaks "referential transparency". It should therefore * be used only with great care. */ const StorePath & getBaseStorePath() const; /** * Uses `^` as the separator */ std::string to_string(const StoreDirConfig & store) const; /** * Uses `!` as the separator */ std::string to_string_legacy(const StoreDirConfig & store) const; /** * Uses `^` as the separator * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DerivedPath parse( const StoreDirConfig & store, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Uses `!` as the separator * * @param xpSettings Stop-gap to avoid globals during unit tests. */ static DerivedPath parseLegacy( const StoreDirConfig & store, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Convert a `SingleDerivedPath` to a `DerivedPath`. */ static DerivedPath fromSingle(const SingleDerivedPath &); nlohmann::json toJSON(Store & store) const; }; typedef std::vector<DerivedPath> DerivedPaths; /** * Used by various parser functions to require experimental features as * needed. * * Somewhat unfortunate this cannot just be an implementation detail for * this module. * * @param xpSettings Stop-gap to avoid globals during unit tests. */ void drvRequireExperiment( const SingleDerivedPath & drv, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }
9,872
C++
.h
268
32.25
86
0.702101
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
11,142
path-regex.hh
NixOS_nix/src/libstore/path-regex.hh
#pragma once ///@file namespace nix { static constexpr std::string_view nameRegexStr = // This uses a negative lookahead: (?!\.\.?(-|$)) // - deny ".", "..", or those strings followed by '-' // - when it's not those, start again at the start of the input and apply the next regex, which is [0-9a-zA-Z\+\-\._\?=]+ R"((?!\.\.?(-|$))[0-9a-zA-Z\+\-\._\?=]+)"; }
382
C++
.h
9
39.222222
127
0.555556
NixOS/nix
12,186
1,472
3,401
LGPL-2.1
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false