code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * SPDX-License-Identifier: curl * ***************************************************************************/ #include "curl_setup.h" #ifdef USE_CURL_NTLM_CORE #include "curl_md4.h" #ifdef USE_OPENSSL #include <openssl/opensslv.h> #if OPENSSL_VERSION_NUMBER >= 0x30000000L && !defined(USE_AMISSL) /* OpenSSL 3.0.0 marks the MD4 functions as deprecated */ #define OPENSSL_NO_MD4 #else /* Cover also OPENSSL_NO_MD4 configured in openssl */ #include <openssl/opensslconf.h> #endif #endif /* USE_OPENSSL */ #ifdef USE_WOLFSSL #include <wolfssl/options.h> #define VOID_MD4_INIT #ifdef NO_MD4 #define WOLFSSL_NO_MD4 #endif #endif /* When OpenSSL or wolfSSL is available, we use their MD4 functions. */ #if defined(USE_WOLFSSL) && !defined(WOLFSSL_NO_MD4) #include <wolfssl/openssl/md4.h> #elif defined(USE_OPENSSL) && !defined(OPENSSL_NO_MD4) #include <openssl/md4.h> #elif (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && \ (__MAC_OS_X_VERSION_MAX_ALLOWED >= 1040) && \ defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \ (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \ (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000) && \ defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ (__IPHONE_OS_VERSION_MIN_REQUIRED < 130000)) #define AN_APPLE_OS #include <CommonCrypto/CommonDigest.h> #elif defined(USE_WIN32_CRYPTO) #include <wincrypt.h> #elif defined(USE_GNUTLS) #include <nettle/md4.h> #endif #if defined(USE_WOLFSSL) && !defined(WOLFSSL_NO_MD4) #ifdef OPENSSL_COEXIST # define MD4_CTX WOLFSSL_MD4_CTX # define MD4_Init wolfSSL_MD4_Init # define MD4_Update wolfSSL_MD4_Update # define MD4_Final wolfSSL_MD4_Final #endif #elif defined(USE_OPENSSL) && !defined(OPENSSL_NO_MD4) #elif defined(AN_APPLE_OS) typedef CC_MD4_CTX MD4_CTX; static int MD4_Init(MD4_CTX *ctx) { return CC_MD4_Init(ctx); } static void MD4_Update(MD4_CTX *ctx, const void *input, unsigned long len) { (void)CC_MD4_Update(ctx, input, (CC_LONG)len); } static void MD4_Final(unsigned char *digest, MD4_CTX *ctx) { (void)CC_MD4_Final(digest, ctx); } #elif defined(USE_WIN32_CRYPTO) struct md4_ctx { HCRYPTPROV hCryptProv; HCRYPTHASH hHash; }; typedef struct md4_ctx MD4_CTX; static int MD4_Init(MD4_CTX *ctx) { ctx->hCryptProv = 0; ctx->hHash = 0; if(!CryptAcquireContext(&ctx->hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) return 0; if(!CryptCreateHash(ctx->hCryptProv, CALG_MD4, 0, 0, &ctx->hHash)) { CryptReleaseContext(ctx->hCryptProv, 0); ctx->hCryptProv = 0; return 0; } return 1; } static void MD4_Update(MD4_CTX *ctx, const void *input, unsigned long len) { CryptHashData(ctx->hHash, (const BYTE *)input, (unsigned int)len, 0); } static void MD4_Final(unsigned char *digest, MD4_CTX *ctx) { unsigned long length = 0; CryptGetHashParam(ctx->hHash, HP_HASHVAL, NULL, &length, 0); if(length == MD4_DIGEST_LENGTH) CryptGetHashParam(ctx->hHash, HP_HASHVAL, digest, &length, 0); if(ctx->hHash) CryptDestroyHash(ctx->hHash); if(ctx->hCryptProv) CryptReleaseContext(ctx->hCryptProv, 0); } #elif defined(USE_GNUTLS) typedef struct md4_ctx MD4_CTX; static int MD4_Init(MD4_CTX *ctx) { md4_init(ctx); return 1; } static void MD4_Update(MD4_CTX *ctx, const void *input, unsigned long len) { md4_update(ctx, len, input); } static void MD4_Final(unsigned char *digest, MD4_CTX *ctx) { md4_digest(ctx, MD4_DIGEST_SIZE, digest); } #else /* When no other crypto library is available, or the crypto library does not * support MD4, we use this code segment this implementation of it * * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc. * MD4 Message-Digest Algorithm (RFC 1320). * * Homepage: * https://openwall.info/wiki/people/solar/software/public-domain-source-code/md4 * * Author: * Alexander Peslyak, better known as Solar Designer <solar at openwall.com> * * This software was written by Alexander Peslyak in 2001. No copyright is * claimed, and the software is hereby placed in the public domain. In case * this attempt to disclaim copyright and place the software in the public * domain is deemed null and void, then the software is Copyright (c) 2001 * Alexander Peslyak and it is hereby released to the general public under * the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There is ABSOLUTELY NO WARRANTY, express or implied. * * (This is a heavily cut-down "BSD license".) */ struct md4_ctx { uint32_t lo, hi; uint32_t a, b, c, d; unsigned char buffer[64]; uint32_t block[16]; }; typedef struct md4_ctx MD4_CTX; /* * The basic MD4 functions. * * F and G are optimized compared to their RFC 1320 definitions, with the * optimization for F borrowed from Colin Plumb's MD5 implementation. */ #define MD4_F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) #define MD4_G(x, y, z) (((x) & ((y) | (z))) | ((y) & (z))) #define MD4_H(x, y, z) ((x) ^ (y) ^ (z)) /* * The MD4 transformation for all three rounds. */ #define MD4_STEP(f, a, b, c, d, x, s) \ (a) += f((b), (c), (d)) + (x); \ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); /* * SET reads 4 input bytes in little-endian byte order and stores them * in a properly aligned word in host byte order. * * The check for little-endian architectures that tolerate unaligned * memory accesses is just an optimization. Nothing will break if it * does not work. */ #if defined(__i386__) || defined(__x86_64__) || defined(__vax__) #define MD4_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4]) #define MD4_GET(n) MD4_SET(n) #else #define MD4_SET(n) (ctx->block[(n)] = \ (uint32_t)ptr[(n) * 4] | \ ((uint32_t)ptr[(n) * 4 + 1] << 8) | \ ((uint32_t)ptr[(n) * 4 + 2] << 16) | \ ((uint32_t)ptr[(n) * 4 + 3] << 24)) #define MD4_GET(n) (ctx->block[(n)]) #endif /* * This processes one or more 64-byte data blocks, but does NOT update * the bit counters. There are no alignment requirements. */ static const void *my_md4_body(MD4_CTX *ctx, const void *input, unsigned long size) { const unsigned char *ptr; uint32_t a, b, c, d; ptr = (const unsigned char *)input; a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; do { uint32_t saved_a, saved_b, saved_c, saved_d; saved_a = a; saved_b = b; saved_c = c; saved_d = d; /* Round 1 */ MD4_STEP(MD4_F, a, b, c, d, MD4_SET(0), 3) MD4_STEP(MD4_F, d, a, b, c, MD4_SET(1), 7) MD4_STEP(MD4_F, c, d, a, b, MD4_SET(2), 11) MD4_STEP(MD4_F, b, c, d, a, MD4_SET(3), 19) MD4_STEP(MD4_F, a, b, c, d, MD4_SET(4), 3) MD4_STEP(MD4_F, d, a, b, c, MD4_SET(5), 7) MD4_STEP(MD4_F, c, d, a, b, MD4_SET(6), 11) MD4_STEP(MD4_F, b, c, d, a, MD4_SET(7), 19) MD4_STEP(MD4_F, a, b, c, d, MD4_SET(8), 3) MD4_STEP(MD4_F, d, a, b, c, MD4_SET(9), 7) MD4_STEP(MD4_F, c, d, a, b, MD4_SET(10), 11) MD4_STEP(MD4_F, b, c, d, a, MD4_SET(11), 19) MD4_STEP(MD4_F, a, b, c, d, MD4_SET(12), 3) MD4_STEP(MD4_F, d, a, b, c, MD4_SET(13), 7) MD4_STEP(MD4_F, c, d, a, b, MD4_SET(14), 11) MD4_STEP(MD4_F, b, c, d, a, MD4_SET(15), 19) /* Round 2 */ MD4_STEP(MD4_G, a, b, c, d, MD4_GET(0) + 0x5a827999, 3) MD4_STEP(MD4_G, d, a, b, c, MD4_GET(4) + 0x5a827999, 5) MD4_STEP(MD4_G, c, d, a, b, MD4_GET(8) + 0x5a827999, 9) MD4_STEP(MD4_G, b, c, d, a, MD4_GET(12) + 0x5a827999, 13) MD4_STEP(MD4_G, a, b, c, d, MD4_GET(1) + 0x5a827999, 3) MD4_STEP(MD4_G, d, a, b, c, MD4_GET(5) + 0x5a827999, 5) MD4_STEP(MD4_G, c, d, a, b, MD4_GET(9) + 0x5a827999, 9) MD4_STEP(MD4_G, b, c, d, a, MD4_GET(13) + 0x5a827999, 13) MD4_STEP(MD4_G, a, b, c, d, MD4_GET(2) + 0x5a827999, 3) MD4_STEP(MD4_G, d, a, b, c, MD4_GET(6) + 0x5a827999, 5) MD4_STEP(MD4_G, c, d, a, b, MD4_GET(10) + 0x5a827999, 9) MD4_STEP(MD4_G, b, c, d, a, MD4_GET(14) + 0x5a827999, 13) MD4_STEP(MD4_G, a, b, c, d, MD4_GET(3) + 0x5a827999, 3) MD4_STEP(MD4_G, d, a, b, c, MD4_GET(7) + 0x5a827999, 5) MD4_STEP(MD4_G, c, d, a, b, MD4_GET(11) + 0x5a827999, 9) MD4_STEP(MD4_G, b, c, d, a, MD4_GET(15) + 0x5a827999, 13) /* Round 3 */ MD4_STEP(MD4_H, a, b, c, d, MD4_GET(0) + 0x6ed9eba1, 3) MD4_STEP(MD4_H, d, a, b, c, MD4_GET(8) + 0x6ed9eba1, 9) MD4_STEP(MD4_H, c, d, a, b, MD4_GET(4) + 0x6ed9eba1, 11) MD4_STEP(MD4_H, b, c, d, a, MD4_GET(12) + 0x6ed9eba1, 15) MD4_STEP(MD4_H, a, b, c, d, MD4_GET(2) + 0x6ed9eba1, 3) MD4_STEP(MD4_H, d, a, b, c, MD4_GET(10) + 0x6ed9eba1, 9) MD4_STEP(MD4_H, c, d, a, b, MD4_GET(6) + 0x6ed9eba1, 11) MD4_STEP(MD4_H, b, c, d, a, MD4_GET(14) + 0x6ed9eba1, 15) MD4_STEP(MD4_H, a, b, c, d, MD4_GET(1) + 0x6ed9eba1, 3) MD4_STEP(MD4_H, d, a, b, c, MD4_GET(9) + 0x6ed9eba1, 9) MD4_STEP(MD4_H, c, d, a, b, MD4_GET(5) + 0x6ed9eba1, 11) MD4_STEP(MD4_H, b, c, d, a, MD4_GET(13) + 0x6ed9eba1, 15) MD4_STEP(MD4_H, a, b, c, d, MD4_GET(3) + 0x6ed9eba1, 3) MD4_STEP(MD4_H, d, a, b, c, MD4_GET(11) + 0x6ed9eba1, 9) MD4_STEP(MD4_H, c, d, a, b, MD4_GET(7) + 0x6ed9eba1, 11) MD4_STEP(MD4_H, b, c, d, a, MD4_GET(15) + 0x6ed9eba1, 15) a += saved_a; b += saved_b; c += saved_c; d += saved_d; ptr += 64; } while(size -= 64); ctx->a = a; ctx->b = b; ctx->c = c; ctx->d = d; return ptr; } static int MD4_Init(MD4_CTX *ctx) { ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->lo = 0; ctx->hi = 0; return 1; } static void MD4_Update(MD4_CTX *ctx, const void *input, unsigned long len) { uint32_t saved_lo; unsigned long used; saved_lo = ctx->lo; ctx->lo = (saved_lo + len) & 0x1fffffff; if(ctx->lo < saved_lo) ctx->hi++; ctx->hi += (uint32_t)len >> 29; used = saved_lo & 0x3f; if(used) { unsigned long available = 64 - used; if(len < available) { memcpy(&ctx->buffer[used], input, len); return; } memcpy(&ctx->buffer[used], input, available); input = (const unsigned char *)input + available; len -= available; my_md4_body(ctx, ctx->buffer, 64); } if(len >= 64) { input = my_md4_body(ctx, input, len & ~(unsigned long)0x3f); len &= 0x3f; } memcpy(ctx->buffer, input, len); } static void MD4_Final(unsigned char *digest, MD4_CTX *ctx) { unsigned long used, available; used = ctx->lo & 0x3f; ctx->buffer[used++] = 0x80; available = 64 - used; if(available < 8) { memset(&ctx->buffer[used], 0, available); my_md4_body(ctx, ctx->buffer, 64); used = 0; available = 64; } memset(&ctx->buffer[used], 0, available - 8); ctx->lo <<= 3; ctx->buffer[56] = curlx_ultouc((ctx->lo) & 0xff); ctx->buffer[57] = curlx_ultouc((ctx->lo >> 8) & 0xff); ctx->buffer[58] = curlx_ultouc((ctx->lo >> 16) & 0xff); ctx->buffer[59] = curlx_ultouc((ctx->lo >> 24) & 0xff); ctx->buffer[60] = curlx_ultouc((ctx->hi) & 0xff); ctx->buffer[61] = curlx_ultouc((ctx->hi >> 8) & 0xff); ctx->buffer[62] = curlx_ultouc((ctx->hi >> 16) & 0xff); ctx->buffer[63] = curlx_ultouc(ctx->hi >> 24); my_md4_body(ctx, ctx->buffer, 64); digest[0] = curlx_ultouc((ctx->a) & 0xff); digest[1] = curlx_ultouc((ctx->a >> 8) & 0xff); digest[2] = curlx_ultouc((ctx->a >> 16) & 0xff); digest[3] = curlx_ultouc(ctx->a >> 24); digest[4] = curlx_ultouc((ctx->b) & 0xff); digest[5] = curlx_ultouc((ctx->b >> 8) & 0xff); digest[6] = curlx_ultouc((ctx->b >> 16) & 0xff); digest[7] = curlx_ultouc(ctx->b >> 24); digest[8] = curlx_ultouc((ctx->c) & 0xff); digest[9] = curlx_ultouc((ctx->c >> 8) & 0xff); digest[10] = curlx_ultouc((ctx->c >> 16) & 0xff); digest[11] = curlx_ultouc(ctx->c >> 24); digest[12] = curlx_ultouc((ctx->d) & 0xff); digest[13] = curlx_ultouc((ctx->d >> 8) & 0xff); digest[14] = curlx_ultouc((ctx->d >> 16) & 0xff); digest[15] = curlx_ultouc(ctx->d >> 24); memset(ctx, 0, sizeof(*ctx)); } #endif /* CRYPTO LIBS */ CURLcode Curl_md4it(unsigned char *output, const unsigned char *input, const size_t len) { MD4_CTX ctx; #ifdef VOID_MD4_INIT MD4_Init(&ctx); #else if(!MD4_Init(&ctx)) return CURLE_FAILED_INIT; #endif MD4_Update(&ctx, input, curlx_uztoui(len)); MD4_Final(output, &ctx); return CURLE_OK; } #endif /* USE_CURL_NTLM_CORE */
c
github
https://github.com/curl/curl
lib/md4.c
package main import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "strings" "testing" "time" "github.com/moby/moby/api/types/container" volumetypes "github.com/moby/moby/api/types/volume" "github.com/moby/moby/v2/daemon/volume" "github.com/moby/moby/v2/integration-cli/cli" "github.com/moby/moby/v2/integration-cli/daemon" "github.com/moby/moby/v2/internal/testutil" testdaemon "github.com/moby/moby/v2/internal/testutil/daemon" "github.com/moby/moby/v2/pkg/plugins" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) const volumePluginName = "test-external-volume-driver" type eventCounter struct { activations int creations int removals int mounts int unmounts int paths int lists int gets int caps int } type DockerExternalVolumeSuite struct { ds *DockerSuite d *daemon.Daemon *volumePlugin } func (s *DockerExternalVolumeSuite) SetUpTest(ctx context.Context, t *testing.T) { testRequires(t, testEnv.IsLocalDaemon) s.d = daemon.New(t, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) s.ec = &eventCounter{} } func (s *DockerExternalVolumeSuite) TearDownTest(ctx context.Context, t *testing.T) { if s.d != nil { s.d.Stop(t) s.ds.TearDownTest(ctx, t) } } func (s *DockerExternalVolumeSuite) SetUpSuite(ctx context.Context, t *testing.T) { s.volumePlugin = newVolumePlugin(t, volumePluginName) } type volumePlugin struct { ec *eventCounter *httptest.Server vols map[string]vol } type vol struct { Name string Mountpoint string Ninja bool // hack used to trigger a null volume return on `Get` Status map[string]any Options map[string]string } func (p *volumePlugin) Close() { p.Server.Close() } func newVolumePlugin(t *testing.T, name string) *volumePlugin { mux := http.NewServeMux() s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} type pluginRequest struct { Name string Opts map[string]string ID string } type pluginResp struct { Mountpoint string `json:",omitempty"` Err string `json:",omitempty"` } read := func(b io.ReadCloser) (pluginRequest, error) { defer b.Close() var pr pluginRequest err := json.NewDecoder(b).Decode(&pr) return pr, err } send := func(w http.ResponseWriter, data any) { switch d := data.(type) { case error: http.Error(w, d.Error(), http.StatusInternalServerError) case string: w.Header().Set("Content-Type", plugins.VersionMimetype) _, _ = fmt.Fprintln(w, d) default: w.Header().Set("Content-Type", plugins.VersionMimetype) err := json.NewEncoder(w).Encode(&data) if err != nil { t.Logf("Error encoding plugin response: %v", err) } } } mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { s.ec.activations++ send(w, `{"Implements": ["VolumeDriver"]}`) }) mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { s.ec.creations++ pr, err := read(r.Body) if err != nil { send(w, err) return } _, isNinja := pr.Opts["ninja"] status := map[string]any{"Hello": "world"} s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} send(w, nil) }) mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { s.ec.lists++ vols := make([]vol, 0, len(s.vols)) for _, v := range s.vols { if v.Ninja { continue } vols = append(vols, v) } send(w, map[string][]vol{"Volumes": vols}) }) mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { s.ec.gets++ pr, err := read(r.Body) if err != nil { send(w, err) return } v, exists := s.vols[pr.Name] if !exists { send(w, `{"Err": "no such volume"}`) } if v.Ninja { send(w, map[string]vol{}) return } v.Mountpoint = hostVolumePath(pr.Name) send(w, map[string]vol{"Volume": v}) }) mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { s.ec.removals++ pr, err := read(r.Body) if err != nil { send(w, err) return } v, ok := s.vols[pr.Name] if !ok { send(w, nil) return } if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { send(w, &pluginResp{Err: err.Error()}) return } delete(s.vols, v.Name) send(w, nil) }) mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { s.ec.paths++ pr, err := read(r.Body) if err != nil { send(w, err) return } p := hostVolumePath(pr.Name) send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { s.ec.mounts++ pr, err := read(r.Body) if err != nil { send(w, err) return } if v, exists := s.vols[pr.Name]; exists { // Use this to simulate a mount failure if _, exists := v.Options["invalidOption"]; exists { send(w, errors.New("invalid argument")) return } } p := hostVolumePath(pr.Name) if err := os.MkdirAll(p, 0o755); err != nil { send(w, &pluginResp{Err: err.Error()}) return } if err := os.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0o644); err != nil { send(w, err) return } if err := os.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0o644); err != nil { send(w, err) return } send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { s.ec.unmounts++ _, err := read(r.Body) if err != nil { send(w, err) return } send(w, nil) }) mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { s.ec.caps++ _, err := read(r.Body) if err != nil { send(w, err) return } send(w, `{"Capabilities": { "Scope": "global" }}`) }) err := os.MkdirAll("/etc/docker/plugins", 0o755) assert.NilError(t, err) err = os.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0o644) assert.NilError(t, err) return s } func (s *DockerExternalVolumeSuite) TearDownSuite(ctx context.Context, t *testing.T) { s.volumePlugin.Close() err := os.RemoveAll("/etc/docker/plugins") assert.NilError(t, err) } func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *testing.T) { cli.DockerCmd(c, "volume", "create", "test") out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) assert.Assert(c, err != nil, "volume create exception name already in use with another driver") assert.Assert(c, is.Contains(out, "must be unique")) driver := cli.DockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test").Stdout() _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(driver)) assert.NilError(c, err) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") assert.NilError(c, err, out) assert.Assert(c, is.Contains(out, s.Server.URL)) _, err = s.d.Cmd("volume", "rm", "external-volume-test") assert.NilError(c, err) p := hostVolumePath("external-volume-test") _, err = os.Lstat(p) assert.ErrorContains(c, err, "") assert.Assert(c, os.IsNotExist(err), "Expected volume path in host to not exist: %s, %v\n", p, err) assert.Equal(c, s.ec.activations, 1) assert.Equal(c, s.ec.creations, 1) assert.Equal(c, s.ec.removals, 1) assert.Equal(c, s.ec.mounts, 1) assert.Equal(c, s.ec.unmounts, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") assert.NilError(c, err, out) assert.Assert(c, is.Contains(out, s.Server.URL)) assert.Equal(c, s.ec.activations, 1) assert.Equal(c, s.ec.creations, 1) assert.Equal(c, s.ec.removals, 1) assert.Equal(c, s.ec.mounts, 1) assert.Equal(c, s.ec.unmounts, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") assert.NilError(c, err, out) out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") assert.NilError(c, err, out) out, err = s.d.Cmd("rm", "-fv", "vol-test1") assert.NilError(c, err, out) assert.Equal(c, s.ec.activations, 1) assert.Equal(c, s.ec.creations, 1) assert.Equal(c, s.ec.removals, 1) assert.Equal(c, s.ec.mounts, 2) assert.Equal(c, s.ec.unmounts, 2) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") assert.NilError(c, err, out) out, err = s.d.Cmd("rm", "-fv", "vol-test1") assert.NilError(c, err, out) assert.Equal(c, s.ec.activations, 1) assert.Equal(c, s.ec.creations, 1) assert.Equal(c, s.ec.removals, 1) assert.Equal(c, s.ec.mounts, 1) assert.Equal(c, s.ec.unmounts, 1) } func hostVolumePath(name string) string { return fmt.Sprintf("/var/lib/docker/volumes/%s", name) } // Make sure a request to use a down driver doesn't block other requests func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *testing.T) { specPath := "/etc/docker/plugins/down-driver.spec" err := os.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0o644) assert.NilError(c, err) defer os.RemoveAll(specPath) chCmd1 := make(chan struct{}) chCmd2 := make(chan error, 1) cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") cmd2 := exec.Command(dockerBinary, "volume", "create") assert.NilError(c, cmd1.Start()) defer cmd1.Process.Kill() time.Sleep(100 * time.Millisecond) // ensure API has been called assert.NilError(c, cmd2.Start()) go func() { cmd1.Wait() close(chCmd1) }() go func() { chCmd2 <- cmd2.Wait() }() select { case <-chCmd1: cmd2.Process.Kill() c.Fatalf("volume create with down driver finished unexpectedly") case err := <-chCmd2: assert.NilError(c, err) case <-time.After(5 * time.Second): cmd2.Process.Kill() c.Fatal("volume creates are blocked by previous create requests when previous driver is down") } } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) driverName := "test-external-volume-driver-retry" errchan := make(chan error, 1) started := make(chan struct{}) go func() { close(started) if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", driverName, "busybox:latest"); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() <-started // wait for a retry to occur, then create spec to allow plugin to register time.Sleep(2 * time.Second) p := newVolumePlugin(c, driverName) defer p.Close() select { case err := <-errchan: assert.NilError(c, err) case <-time.After(8 * time.Second): c.Fatal("volume creates fail when plugin not immediately available") } _, err := s.d.Cmd("volume", "rm", "external-volume-test") assert.NilError(c, err) assert.Equal(c, p.ec.activations, 1) assert.Equal(c, p.ec.creations, 1) assert.Equal(c, p.ec.removals, 1) assert.Equal(c, p.ec.mounts, 1) assert.Equal(c, p.ec.unmounts, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *testing.T) { cli.DockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") cli.DockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") var mounts []struct { Name string Driver string } out := inspectFieldJSON(c, "testing", "Mounts") assert.NilError(c, json.NewDecoder(strings.NewReader(out)).Decode(&mounts)) assert.Equal(c, len(mounts), 1, out) assert.Equal(c, mounts[0].Name, "foo") assert.Equal(c, mounts[0].Driver, volumePluginName) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *testing.T) { cli.DockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") out := cli.DockerCmd(c, "volume", "ls").Stdout() ls := strings.Split(strings.TrimSpace(out), "\n") assert.Equal(c, len(ls), 2, fmt.Sprintf("\n%s", out)) vol := strings.Fields(ls[len(ls)-1]) assert.Equal(c, len(vol), 2, fmt.Sprintf("%v", vol)) assert.Equal(c, vol[0], volumePluginName) assert.Equal(c, vol[1], "abc3") assert.Equal(c, s.ec.lists, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *testing.T) { out, _, err := dockerCmdWithError("volume", "inspect", "dummy") assert.ErrorContains(c, err, "", out) assert.Assert(c, is.Contains(out, "no such volume")) assert.Equal(c, s.ec.gets, 1) cli.DockerCmd(c, "volume", "create", "test", "-d", volumePluginName) out = cli.DockerCmd(c, "volume", "inspect", "test").Stdout() type vol struct { Status map[string]string } var st []vol assert.NilError(c, json.Unmarshal([]byte(out), &st)) assert.Equal(c, len(st), 1) assert.Equal(c, len(st[0].Status), 1, fmt.Sprintf("%v", st[0])) assert.Equal(c, st[0].Status["Hello"], "world", fmt.Sprintf("%v", st[0].Status)) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *testing.T) { cli.DockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") s.d.Restart(c) cli.DockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") var mounts []container.MountPoint inspectFieldAndUnmarshall(c, "test", "Mounts", &mounts) assert.Equal(c, len(mounts), 1) assert.Equal(c, mounts[0].Driver, volumePluginName) } // Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. // Prior the daemon would panic in this scenario. func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *testing.T) { s.d.Start(c) out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") assert.NilError(c, err, out) out, err = s.d.Cmd("volume", "inspect", "abc2") assert.ErrorContains(c, err, "", out) assert.Assert(c, is.Contains(out, "no such volume")) } // Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` // // TODO(@cpuguy83): This test is testing internal implementation. In all the cases here, there may not even be a path available because the volume is not even mounted. Consider removing this test. func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *testing.T) { s.d.Start(c) assert.Equal(c, s.ec.paths, 0) out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") assert.NilError(c, err, out) assert.Equal(c, s.ec.paths, 0) out, err = s.d.Cmd("volume", "ls") assert.NilError(c, err, out) assert.Equal(c, s.ec.paths, 0) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") assert.NilError(c, err, out) assert.Assert(c, strings.TrimSpace(out) != "") } // Check that VolumeDriver.Capabilities gets called, and only called once func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *testing.T) { s.d.Start(c) assert.Equal(c, s.ec.caps, 0) for i := range 3 { out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) assert.NilError(c, err, out) assert.Equal(c, s.ec.caps, 1) out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) assert.NilError(c, err) assert.Equal(c, strings.TrimSpace(out), volume.GlobalScope) } } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *testing.T) { ctx := testutil.GetContext(c) driverName := strings.ReplaceAll(strings.ToLower(c.Name()), "/", "_") p := newVolumePlugin(c, driverName) defer p.Close() s.d.StartWithBusybox(ctx, c) out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") assert.NilError(c, err, out) out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") assert.ErrorContains(c, err, "", out) assert.Assert(c, is.Contains(out, "must be unique")) // simulate out of band volume deletion on plugin level delete(p.vols, "test") // test re-create with same driver out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") assert.NilError(c, err, out) out, err = s.d.Cmd("volume", "inspect", "test") assert.NilError(c, err, out) var vs []volumetypes.Volume err = json.Unmarshal([]byte(out), &vs) assert.NilError(c, err) assert.Equal(c, len(vs), 1) assert.Equal(c, vs[0].Driver, driverName) assert.Assert(c, vs[0].Options != nil) assert.Equal(c, vs[0].Options["foo"], "bar") assert.Equal(c, vs[0].Driver, driverName) // simulate out of band volume deletion on plugin level delete(p.vols, "test") // test create with different driver out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") assert.NilError(c, err, out) out, err = s.d.Cmd("volume", "inspect", "test") assert.NilError(c, err, out) vs = nil err = json.Unmarshal([]byte(out), &vs) assert.NilError(c, err) assert.Equal(c, len(vs), 1) assert.Equal(c, len(vs[0].Options), 0) assert.Equal(c, vs[0].Driver, "local") } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") assert.Equal(c, s.ec.unmounts, 0, out) out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") assert.Equal(c, s.ec.unmounts, 0, out) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *testing.T) { ctx := testutil.GetContext(c) s.d.StartWithBusybox(ctx, c) s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test") out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top") assert.Equal(c, s.ec.mounts, 1, out) out, _ = s.d.Cmd("cp", "test:/test", "/tmp/test") assert.Equal(c, s.ec.mounts, 2, out) assert.Equal(c, s.ec.unmounts, 1, out) out, _ = s.d.Cmd("kill", "test") assert.Equal(c, s.ec.unmounts, 2, out) }
go
github
https://github.com/moby/moby
integration-cli/docker_cli_external_volume_driver_test.go
#!/usr/bin/env python # -*- coding: UTF-8 -*- import xbmcplugin,xbmcgui,xbmc,xbmcaddon import os,sys,urllib def get_params(): param=[] paramstring=sys.argv[2] if len(paramstring)>=2: params=sys.argv[2] cleanedparams=params.replace('?','') if (params[len(params)-1]=='/'): params=params[0:len(params)-2] pairsofparams=cleanedparams.split('&') param={} for i in range(len(pairsofparams)): splitparams={} splitparams=pairsofparams[i].split('=') if (len(splitparams))==2: param[splitparams[0]]=splitparams[1] return param params=get_params() print(params) try: action=urllib.unquote_plus(params["action"]) except: pass print ("Action: "+action) if action == 'find': try: artist=urllib.unquote_plus(params["artist"]) album=urllib.unquote_plus(params["title"]) except: pass print('Find album with title %s from artist %s' %(album, artist)) liz=xbmcgui.ListItem('Demo album 1', thumbnailImage='DefaultAlbum.png', offscreen=True) liz.setProperty('relevance', '0.5') liz.setProperty('album.artist', artist) liz.setProperty('album.year', '2005') xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/album", listitem=liz, isFolder=True) liz=xbmcgui.ListItem('Demo album 2', thumbnailImage='DefaultVideo.png', offscreen=True) liz.setProperty('relevance', '0.3') liz.setProperty('album.artist', 'spiff') liz.setProperty('album.year', '2016') xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/album2", listitem=liz, isFolder=True) elif action == 'getdetails': try: url=urllib.unquote_plus(params["url"]) except: pass if url == '/path/to/album': liz=xbmcgui.ListItem('Demo album 1', offscreen=True) liz.setProperty('album.musicbrainzid', '123') liz.setProperty('album.artists', '2') liz.setProperty('album.artist1.name', 'Jan') liz.setProperty('album.artist1.musicbrainzid', '456') liz.setProperty('album.artist2.name', 'Banan') liz.setProperty('album.artist2.musicbrainzid', '789') liz.setProperty('album.artist_description', 'I hate this album.') liz.setProperty('album.genre', 'rock / pop') liz.setProperty('album.styles', 'light / heavy') liz.setProperty('album.moods', 'angry / happy') liz.setProperty('album.themes', 'Morbid sexual things.. And urmumz.') liz.setProperty('album.compiliation', 'true') liz.setProperty('album.review', 'Somebody should die for making this') liz.setProperty('album.release_date', '2005-01-02') liz.setProperty('album.label', 'ArtistExploitation inc') liz.setProperty('album.type', 'what is this?') liz.setProperty('album.release_type', 'single') liz.setProperty('album.year', '2005') liz.setProperty('album.rating', '2.5') liz.setProperty('album.userrating', '4.5') liz.setProperty('album.votes', '100') liz.setProperty('album.thumbs', '2') liz.setProperty('album.thumb1.url', 'DefaultBackFanart.png') liz.setProperty('album.thumb1.aspect', '1.78') liz.setProperty('album.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png') liz.setProperty('album.thumb2.aspect', '2.35') xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz) elif url == '/path/to/album2': liz=xbmcgui.ListItem('Demo album 2', offscreen=True) liz.setProperty('album.musicbrainzid', '123') liz.setProperty('album.artists', '2') liz.setProperty('album.artist1.name', 'Heise') liz.setProperty('album.artist1.musicbrainzid', '456') liz.setProperty('album.artist2.name', 'Kran') liz.setProperty('album.artist2.musicbrainzid', '789') liz.setProperty('album.artist_description', 'I love this album.') liz.setProperty('album.genre', 'classical / jazz') liz.setProperty('album.styles', 'yay / hurrah') liz.setProperty('album.moods', 'sad / excited') liz.setProperty('album.themes', 'Nice things.. And unicorns.') liz.setProperty('album.compiliation', 'false') liz.setProperty('album.review', 'Somebody should be rewarded for making this') liz.setProperty('album.release_date', '2015-01-02') liz.setProperty('album.label', 'Artists inc') liz.setProperty('album.type', 'what is that?') liz.setProperty('album.release_type', 'album') liz.setProperty('album.year', '2015') liz.setProperty('album.rating', '4.5') liz.setProperty('album.userrating', '3.5') liz.setProperty('album.votes', '200') liz.setProperty('album.thumbs', '2') liz.setProperty('album.thumb1.url', 'DefaultBackFanart.png') liz.setProperty('album.thumb1.aspect', '1.78') liz.setProperty('album.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png') liz.setProperty('album.thumb2.aspect', '2.35') xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz) xbmcplugin.endOfDirectory(int(sys.argv[1]))
unknown
codeparrot/codeparrot-clean
from .resolver import resolver from django.utils.importlib import import_module def __repr__(self): return '<%s, %s, %s, %s>' % (self.alias, self.col, self.field.name, self.field.model.__name__) from django.db.models.sql.where import Constraint Constraint.__repr__ = __repr__ # TODO: manipulate a copy of the query instead of the query itself. This has to # be done because the query can be reused afterwards by the user so that a # manipulated query can result in strange behavior for these cases! #TODO: Add watching layer which gives suggestions for indexes via query inspection # at runtime class BaseCompiler(object): def convert_filters(self): resolver.convert_filters(self.query) class SQLCompiler(BaseCompiler): def execute_sql(self, *args, **kwargs): self.convert_filters() return super(SQLCompiler, self).execute_sql(*args, **kwargs) def results_iter(self): self.convert_filters() return super(SQLCompiler, self).results_iter() def has_results(self): self.convert_filters() return super(SQLCompiler, self).has_results() class SQLInsertCompiler(BaseCompiler): def execute_sql(self, return_id=False): resolver.convert_insert_query(self.query) return super(SQLInsertCompiler, self).execute_sql(return_id=return_id) class SQLUpdateCompiler(BaseCompiler): pass class SQLDeleteCompiler(BaseCompiler): pass
unknown
codeparrot/codeparrot-clean
import collections import warnings from math import ceil from django.utils import six from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ class UnorderedObjectListWarning(RuntimeWarning): pass class InvalidPage(Exception): pass class PageNotAnInteger(InvalidPage): pass class EmptyPage(InvalidPage): pass class Paginator(object): def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True): self.object_list = object_list self._check_object_list_is_ordered() self.per_page = int(per_page) self.orphans = int(orphans) self.allow_empty_first_page = allow_empty_first_page def validate_number(self, number): """ Validates the given 1-based page number. """ try: number = int(number) except (TypeError, ValueError): raise PageNotAnInteger(_('That page number is not an integer')) if number < 1: raise EmptyPage(_('That page number is less than 1')) if number > self.num_pages: if number == 1 and self.allow_empty_first_page: pass else: raise EmptyPage(_('That page contains no results')) return number def page(self, number): """ Returns a Page object for the given 1-based page number. """ number = self.validate_number(number) bottom = (number - 1) * self.per_page top = bottom + self.per_page if top + self.orphans >= self.count: top = self.count return self._get_page(self.object_list[bottom:top], number, self) def _get_page(self, *args, **kwargs): """ Returns an instance of a single page. This hook can be used by subclasses to use an alternative to the standard :cls:`Page` object. """ return Page(*args, **kwargs) @cached_property def count(self): """ Returns the total number of objects, across all pages. """ try: return self.object_list.count() except (AttributeError, TypeError): # AttributeError if object_list has no count() method. # TypeError if object_list.count() requires arguments # (i.e. is of type list). return len(self.object_list) @cached_property def num_pages(self): """ Returns the total number of pages. """ if self.count == 0 and not self.allow_empty_first_page: return 0 hits = max(1, self.count - self.orphans) return int(ceil(hits / float(self.per_page))) @property def page_range(self): """ Returns a 1-based range of pages for iterating through within a template for loop. """ return six.moves.range(1, self.num_pages + 1) def _check_object_list_is_ordered(self): """ Warn if self.object_list is unordered (typically a QuerySet). """ ordered = getattr(self.object_list, 'ordered', None) if ordered is not None and not ordered: obj_list_repr = ( '{} {}'.format(self.object_list.model, self.object_list.__class__.__name__) if hasattr(self.object_list, 'model') else '{!r}'.format(self.object_list) ) warnings.warn( 'Pagination may yield inconsistent results with an unordered ' 'object_list: {}.'.format(obj_list_repr), UnorderedObjectListWarning, stacklevel=3 ) QuerySetPaginator = Paginator # For backwards-compatibility. class Page(collections.Sequence): def __init__(self, object_list, number, paginator): self.object_list = object_list self.number = number self.paginator = paginator def __repr__(self): return '<Page %s of %s>' % (self.number, self.paginator.num_pages) def __len__(self): return len(self.object_list) def __getitem__(self, index): if not isinstance(index, (slice,) + six.integer_types): raise TypeError # The object_list is converted to a list so that if it was a QuerySet # it won't be a database hit per __getitem__. if not isinstance(self.object_list, list): self.object_list = list(self.object_list) return self.object_list[index] def has_next(self): return self.number < self.paginator.num_pages def has_previous(self): return self.number > 1 def has_other_pages(self): return self.has_previous() or self.has_next() def next_page_number(self): return self.paginator.validate_number(self.number + 1) def previous_page_number(self): return self.paginator.validate_number(self.number - 1) def start_index(self): """ Returns the 1-based index of the first object on this page, relative to total objects in the paginator. """ # Special case, return zero if no items. if self.paginator.count == 0: return 0 return (self.paginator.per_page * (self.number - 1)) + 1 def end_index(self): """ Returns the 1-based index of the last object on this page, relative to total objects found (hits). """ # Special case for the last page because there can be orphans. if self.number == self.paginator.num_pages: return self.paginator.count return self.number * self.paginator.per_page
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module contains classes that help to emulate xcodebuild behavior on top of other build systems, such as make and ninja. """ import copy import gyp.common import os import os.path import re import shlex import subprocess import sys import tempfile from gyp.common import GypError class XcodeSettings(object): """A class that understands the gyp 'xcode_settings' object.""" # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached # at class-level for efficiency. _sdk_path_cache = {} _sdk_root_cache = {} # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so # cached at class-level for efficiency. _plist_cache = {} # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so # cached at class-level for efficiency. _codesigning_key_cache = {} # Populated lazily by _XcodeVersion. Shared by all XcodeSettings, so cached # at class-level for efficiency. _xcode_version_cache = () def __init__(self, spec): self.spec = spec self.isIOS = False # Per-target 'xcode_settings' are pushed down into configs earlier by gyp. # This means self.xcode_settings[config] always contains all settings # for that config -- the per-target settings as well. Settings that are # the same for all configs are implicitly per-target settings. self.xcode_settings = {} configs = spec['configurations'] for configname, config in configs.iteritems(): self.xcode_settings[configname] = config.get('xcode_settings', {}) self._ConvertConditionalKeys(configname) if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET', None): self.isIOS = True # This is only non-None temporarily during the execution of some methods. self.configname = None # Used by _AdjustLibrary to match .a and .dylib entries in libraries. self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$') def _ConvertConditionalKeys(self, configname): """Converts or warns on conditional keys. Xcode supports conditional keys, such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation with some keys converted while the rest force a warning.""" settings = self.xcode_settings[configname] conditional_keys = [key for key in settings if key.endswith(']')] for key in conditional_keys: # If you need more, speak up at http://crbug.com/122592 if key.endswith("[sdk=iphoneos*]"): if configname.endswith("iphoneos"): new_key = key.split("[")[0] settings[new_key] = settings[key] else: print 'Warning: Conditional keys not implemented, ignoring:', \ ' '.join(conditional_keys) del settings[key] def _Settings(self): assert self.configname return self.xcode_settings[self.configname] def _Test(self, test_key, cond_key, default): return self._Settings().get(test_key, default) == cond_key def _Appendf(self, lst, test_key, format_str, default=None): if test_key in self._Settings(): lst.append(format_str % str(self._Settings()[test_key])) elif default: lst.append(format_str % str(default)) def _WarnUnimplemented(self, test_key): if test_key in self._Settings(): print 'Warning: Ignoring not yet implemented key "%s".' % test_key def _IsBundle(self): return int(self.spec.get('mac_bundle', 0)) != 0 def GetFrameworkVersion(self): """Returns the framework version of the current target. Only valid for bundles.""" assert self._IsBundle() return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A') def GetWrapperExtension(self): """Returns the bundle extension (.app, .framework, .plugin, etc). Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('loadable_module', 'shared_library'): default_wrapper_extension = { 'loadable_module': 'bundle', 'shared_library': 'framework', }[self.spec['type']] wrapper_extension = self.GetPerTargetSetting( 'WRAPPER_EXTENSION', default=default_wrapper_extension) return '.' + self.spec.get('product_extension', wrapper_extension) elif self.spec['type'] == 'executable': return '.' + self.spec.get('product_extension', 'app') else: assert False, "Don't know extension for '%s', target '%s'" % ( self.spec['type'], self.spec['target_name']) def GetProductName(self): """Returns PRODUCT_NAME.""" return self.spec.get('product_name', self.spec['target_name']) def GetFullProductName(self): """Returns FULL_PRODUCT_NAME.""" if self._IsBundle(): return self.GetWrapperName() else: return self._GetStandaloneBinaryPath() def GetWrapperName(self): """Returns the directory name of the bundle represented by this target. Only valid for bundles.""" assert self._IsBundle() return self.GetProductName() + self.GetWrapperExtension() def GetBundleContentsFolderPath(self): """Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" if self.isIOS: return self.GetWrapperName() assert self._IsBundle() if self.spec['type'] == 'shared_library': return os.path.join( self.GetWrapperName(), 'Versions', self.GetFrameworkVersion()) else: # loadable_modules have a 'Contents' folder like executables. return os.path.join(self.GetWrapperName(), 'Contents') def GetBundleResourceFolder(self): """Returns the qualified path to the bundle's resource folder. E.g. Chromium.app/Contents/Resources. Only valid for bundles.""" assert self._IsBundle() if self.isIOS: return self.GetBundleContentsFolderPath() return os.path.join(self.GetBundleContentsFolderPath(), 'Resources') def GetBundlePlistPath(self): """Returns the qualified path to the bundle's plist file. E.g. Chromium.app/Contents/Info.plist. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('executable', 'loadable_module'): return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist') else: return os.path.join(self.GetBundleContentsFolderPath(), 'Resources', 'Info.plist') def GetProductType(self): """Returns the PRODUCT_TYPE of this target.""" if self._IsBundle(): return { 'executable': 'com.apple.product-type.application', 'loadable_module': 'com.apple.product-type.bundle', 'shared_library': 'com.apple.product-type.framework', }[self.spec['type']] else: return { 'executable': 'com.apple.product-type.tool', 'loadable_module': 'com.apple.product-type.library.dynamic', 'shared_library': 'com.apple.product-type.library.dynamic', 'static_library': 'com.apple.product-type.library.static', }[self.spec['type']] def GetMachOType(self): """Returns the MACH_O_TYPE of this target.""" # Weird, but matches Xcode. if not self._IsBundle() and self.spec['type'] == 'executable': return '' return { 'executable': 'mh_execute', 'static_library': 'staticlib', 'shared_library': 'mh_dylib', 'loadable_module': 'mh_bundle', }[self.spec['type']] def _GetBundleBinaryPath(self): """Returns the name of the bundle binary of by this target. E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('shared_library') or self.isIOS: path = self.GetBundleContentsFolderPath() elif self.spec['type'] in ('executable', 'loadable_module'): path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS') return os.path.join(path, self.GetExecutableName()) def _GetStandaloneExecutableSuffix(self): if 'product_extension' in self.spec: return '.' + self.spec['product_extension'] return { 'executable': '', 'static_library': '.a', 'shared_library': '.dylib', 'loadable_module': '.so', }[self.spec['type']] def _GetStandaloneExecutablePrefix(self): return self.spec.get('product_prefix', { 'executable': '', 'static_library': 'lib', 'shared_library': 'lib', # Non-bundled loadable_modules are called foo.so for some reason # (that is, .so and no prefix) with the xcode build -- match that. 'loadable_module': '', }[self.spec['type']]) def _GetStandaloneBinaryPath(self): """Returns the name of the non-bundle binary represented by this target. E.g. hello_world. Only valid for non-bundles.""" assert not self._IsBundle() assert self.spec['type'] in ( 'executable', 'shared_library', 'static_library', 'loadable_module'), ( 'Unexpected type %s' % self.spec['type']) target = self.spec['target_name'] if self.spec['type'] == 'static_library': if target[:3] == 'lib': target = target[3:] elif self.spec['type'] in ('loadable_module', 'shared_library'): if target[:3] == 'lib': target = target[3:] target_prefix = self._GetStandaloneExecutablePrefix() target = self.spec.get('product_name', target) target_ext = self._GetStandaloneExecutableSuffix() return target_prefix + target + target_ext def GetExecutableName(self): """Returns the executable name of the bundle represented by this target. E.g. Chromium.""" if self._IsBundle(): return self.spec.get('product_name', self.spec['target_name']) else: return self._GetStandaloneBinaryPath() def GetExecutablePath(self): """Returns the directory name of the bundle represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium.""" if self._IsBundle(): return self._GetBundleBinaryPath() else: return self._GetStandaloneBinaryPath() def GetActiveArchs(self, configname): """Returns the architectures this target should be built for.""" # TODO: Look at VALID_ARCHS, ONLY_ACTIVE_ARCH; possibly set # CURRENT_ARCH / NATIVE_ARCH env vars? return self.xcode_settings[configname].get('ARCHS', [self._DefaultArch()]) def _GetStdout(self, cmdlist): job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE) out = job.communicate()[0] if job.returncode != 0: sys.stderr.write(out + '\n') raise GypError('Error %d running %s' % (job.returncode, cmdlist[0])) return out.rstrip('\n') def _GetSdkVersionInfoItem(self, sdk, infoitem): # xcodebuild requires Xcode and can't run on Command Line Tools-only # systems from 10.7 onward. # Since the CLT has no SDK paths anyway, returning None is the # most sensible route and should still do the right thing. try: return self._GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem]) except: pass def _SdkRoot(self, configname): if configname is None: configname = self.configname return self.GetPerConfigSetting('SDKROOT', configname, default='') def _SdkPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root.startswith('/'): return sdk_root return self._XcodeSdkPath(sdk_root) def _XcodeSdkPath(self, sdk_root): if sdk_root not in XcodeSettings._sdk_path_cache: sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path') XcodeSettings._sdk_path_cache[sdk_root] = sdk_path if sdk_root: XcodeSettings._sdk_root_cache[sdk_path] = sdk_root return XcodeSettings._sdk_path_cache[sdk_root] def _AppendPlatformVersionMinFlags(self, lst): self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s') if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings(): # TODO: Implement this better? sdk_path_basename = os.path.basename(self._SdkPath()) if sdk_path_basename.lower().startswith('iphonesimulator'): self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-mios-simulator-version-min=%s') else: self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-miphoneos-version-min=%s') def GetCflags(self, configname, arch=None): """Returns flags that need to be added to .c, .cc, .m, and .mm compilations.""" # This functions (and the similar ones below) do not offer complete # emulation of all xcode_settings keys. They're implemented on demand. self.configname = configname cflags = [] sdk_root = self._SdkPath() if 'SDKROOT' in self._Settings() and sdk_root: cflags.append('-isysroot %s' % sdk_root) if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'): cflags.append('-Wconstant-conversion') if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'): cflags.append('-funsigned-char') if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'): cflags.append('-fasm-blocks') if 'GCC_DYNAMIC_NO_PIC' in self._Settings(): if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES': cflags.append('-mdynamic-no-pic') else: pass # TODO: In this case, it depends on the target. xcode passes # mdynamic-no-pic by default for executable and possibly static lib # according to mento if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'): cflags.append('-mpascal-strings') self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s') if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'): dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf') if dbg_format == 'dwarf': cflags.append('-gdwarf-2') elif dbg_format == 'stabs': raise NotImplementedError('stabs debug format is not supported yet.') elif dbg_format == 'dwarf-with-dsym': cflags.append('-gdwarf-2') else: raise NotImplementedError('Unknown debug format %s' % dbg_format) if self._Settings().get('GCC_STRICT_ALIASING') == 'YES': cflags.append('-fstrict-aliasing') elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO': cflags.append('-fno-strict-aliasing') if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'): cflags.append('-fvisibility=hidden') if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'): cflags.append('-Werror') if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'): cflags.append('-Wnewline-eof') self._AppendPlatformVersionMinFlags(cflags) # TODO: if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'): self._WarnUnimplemented('COPY_PHASE_STRIP') self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS') self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS') # TODO: This is exported correctly, but assigning to it is not supported. self._WarnUnimplemented('MACH_O_TYPE') self._WarnUnimplemented('PRODUCT_TYPE') if arch is not None: archs = [arch] else: archs = self._Settings().get('ARCHS', [self._DefaultArch()]) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] cflags.append('-arch ' + archs[0]) if archs[0] in ('i386', 'x86_64'): if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse3') if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES', default='NO'): cflags.append('-mssse3') # Note 3rd 's'. if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.1') if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.2') cflags += self._Settings().get('WARNING_CFLAGS', []) if sdk_root: framework_root = sdk_root else: framework_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root)) self.configname = None return cflags def GetCflagsC(self, configname): """Returns flags that need to be added to .c, and .m compilations.""" self.configname = configname cflags_c = [] if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi': cflags_c.append('-ansi') else: self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s') cflags_c += self._Settings().get('OTHER_CFLAGS', []) self.configname = None return cflags_c def GetCflagsCC(self, configname): """Returns flags that need to be added to .cc, and .mm compilations.""" self.configname = configname cflags_cc = [] clang_cxx_language_standard = self._Settings().get( 'CLANG_CXX_LANGUAGE_STANDARD') # Note: Don't make c++0x to c++11 so that c++0x can be used with older # clangs that don't understand c++11 yet (like Xcode 4.2's). if clang_cxx_language_standard: cflags_cc.append('-std=%s' % clang_cxx_language_standard) self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s') if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'): cflags_cc.append('-fno-rtti') if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'): cflags_cc.append('-fno-exceptions') if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'): cflags_cc.append('-fvisibility-inlines-hidden') if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'): cflags_cc.append('-fno-threadsafe-statics') # Note: This flag is a no-op for clang, it only has an effect for gcc. if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'): cflags_cc.append('-Wno-invalid-offsetof') other_ccflags = [] for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']): # TODO: More general variable expansion. Missing in many other places too. if flag in ('$inherited', '$(inherited)', '${inherited}'): flag = '$OTHER_CFLAGS' if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'): other_ccflags += self._Settings().get('OTHER_CFLAGS', []) else: other_ccflags.append(flag) cflags_cc += other_ccflags self.configname = None return cflags_cc def _AddObjectiveCGarbageCollectionFlags(self, flags): gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported') if gc_policy == 'supported': flags.append('-fobjc-gc') elif gc_policy == 'required': flags.append('-fobjc-gc-only') def _AddObjectiveCARCFlags(self, flags): if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'): flags.append('-fobjc-arc') def _AddObjectiveCMissingPropertySynthesisFlags(self, flags): if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS', 'YES', default='NO'): flags.append('-Wobjc-missing-property-synthesis') def GetCflagsObjC(self, configname): """Returns flags that need to be added to .m compilations.""" self.configname = configname cflags_objc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objc) self._AddObjectiveCARCFlags(cflags_objc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) self.configname = None return cflags_objc def GetCflagsObjCC(self, configname): """Returns flags that need to be added to .mm compilations.""" self.configname = configname cflags_objcc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objcc) self._AddObjectiveCARCFlags(cflags_objcc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc) if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'): cflags_objcc.append('-fobjc-call-cxx-cdtors') self.configname = None return cflags_objcc def GetInstallNameBase(self): """Return DYLIB_INSTALL_NAME_BASE for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None install_base = self.GetPerTargetSetting( 'DYLIB_INSTALL_NAME_BASE', default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib') return install_base def _StandardizePath(self, path): """Do :standardizepath processing for path.""" # I'm not quite sure what :standardizepath does. Just call normpath(), # but don't let @executable_path/../foo collapse to foo. if '/' in path: prefix, rest = '', path if path.startswith('@'): prefix, rest = path.split('/', 1) rest = os.path.normpath(rest) # :standardizepath path = os.path.join(prefix, rest) return path def GetInstallName(self): """Return LD_DYLIB_INSTALL_NAME for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None default_install_name = \ '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)' install_name = self.GetPerTargetSetting( 'LD_DYLIB_INSTALL_NAME', default=default_install_name) # Hardcode support for the variables used in chromium for now, to # unblock people using the make build. if '$' in install_name: assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/' '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), ( 'Variables in LD_DYLIB_INSTALL_NAME are not generally supported ' 'yet in target \'%s\' (got \'%s\')' % (self.spec['target_name'], install_name)) install_name = install_name.replace( '$(DYLIB_INSTALL_NAME_BASE:standardizepath)', self._StandardizePath(self.GetInstallNameBase())) if self._IsBundle(): # These are only valid for bundles, hence the |if|. install_name = install_name.replace( '$(WRAPPER_NAME)', self.GetWrapperName()) install_name = install_name.replace( '$(PRODUCT_NAME)', self.GetProductName()) else: assert '$(WRAPPER_NAME)' not in install_name assert '$(PRODUCT_NAME)' not in install_name install_name = install_name.replace( '$(EXECUTABLE_PATH)', self.GetExecutablePath()) return install_name def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path): """Checks if ldflag contains a filename and if so remaps it from gyp-directory-relative to build-directory-relative.""" # This list is expanded on demand. # They get matched as: # -exported_symbols_list file # -Wl,exported_symbols_list file # -Wl,exported_symbols_list,file LINKER_FILE = '(\S+)' WORD = '\S+' linker_flags = [ ['-exported_symbols_list', LINKER_FILE], # Needed for NaCl. ['-unexported_symbols_list', LINKER_FILE], ['-reexported_symbols_list', LINKER_FILE], ['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting. ] for flag_pattern in linker_flags: regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern)) m = regex.match(ldflag) if m: ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \ ldflag[m.end(1):] # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS, # TODO(thakis): Update ffmpeg.gyp): if ldflag.startswith('-L'): ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):]) return ldflag def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None): """Returns flags that need to be passed to the linker. Args: configname: The name of the configuration to get ld flags for. product_dir: The directory where products such static and dynamic libraries are placed. This is added to the library search path. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ self.configname = configname ldflags = [] # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS # can contain entries that depend on this. Explicitly absolutify these. for ldflag in self._Settings().get('OTHER_LDFLAGS', []): ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path)) if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'): ldflags.append('-Wl,-dead_strip') if self._Test('PREBINDING', 'YES', default='NO'): ldflags.append('-Wl,-prebind') self._Appendf( ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s') self._Appendf( ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s') self._AppendPlatformVersionMinFlags(ldflags) if 'SDKROOT' in self._Settings() and self._SdkPath(): ldflags.append('-isysroot ' + self._SdkPath()) for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []): ldflags.append('-L' + gyp_to_build_path(library_path)) if 'ORDER_FILE' in self._Settings(): ldflags.append('-Wl,-order_file ' + '-Wl,' + gyp_to_build_path( self._Settings()['ORDER_FILE'])) if arch is not None: archs = [arch] else: archs = self._Settings().get('ARCHS', [self._DefaultArch()]) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] ldflags.append('-arch ' + archs[0]) # Xcode adds the product directory by default. ldflags.append('-L' + product_dir) install_name = self.GetInstallName() if install_name and self.spec['type'] != 'loadable_module': ldflags.append('-install_name ' + install_name.replace(' ', r'\ ')) for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []): ldflags.append('-Wl,-rpath,' + rpath) sdk_root = self._SdkPath() if not sdk_root: sdk_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root)) self.configname = None return ldflags def GetLibtoolflags(self, configname): """Returns flags that need to be passed to the static linker. Args: configname: The name of the configuration to get ld flags for. """ self.configname = configname libtoolflags = [] for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []): libtoolflags.append(libtoolflag) # TODO(thakis): ARCHS? self.configname = None return libtoolflags def GetPerTargetSettings(self): """Gets a list of all the per-target settings. This will only fetch keys whose values are the same across all configurations.""" first_pass = True result = {} for configname in sorted(self.xcode_settings.keys()): if first_pass: result = dict(self.xcode_settings[configname]) first_pass = False else: for key, value in self.xcode_settings[configname].iteritems(): if key not in result: continue elif result[key] != value: del result[key] return result def GetPerConfigSetting(self, setting, configname, default=None): if configname in self.xcode_settings: return self.xcode_settings[configname].get(setting, default) else: return self.GetPerTargetSetting(setting, default) def GetPerTargetSetting(self, setting, default=None): """Tries to get xcode_settings.setting from spec. Assumes that the setting has the same value in all configurations and throws otherwise.""" is_first_pass = True result = None for configname in sorted(self.xcode_settings.keys()): if is_first_pass: result = self.xcode_settings[configname].get(setting, None) is_first_pass = False else: assert result == self.xcode_settings[configname].get(setting, None), ( "Expected per-target setting for '%s', got per-config setting " "(target %s)" % (setting, self.spec['target_name'])) if result is None: return default return result def _GetStripPostbuilds(self, configname, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to strip this target's binary. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname result = [] if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')): default_strip_style = 'debugging' if self.spec['type'] == 'loadable_module' and self._IsBundle(): default_strip_style = 'non-global' elif self.spec['type'] == 'executable': default_strip_style = 'all' strip_style = self._Settings().get('STRIP_STYLE', default_strip_style) strip_flags = { 'all': '', 'non-global': '-x', 'debugging': '-S', }[strip_style] explicit_strip_flags = self._Settings().get('STRIPFLAGS', '') if explicit_strip_flags: strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags) if not quiet: result.append('echo STRIP\\(%s\\)' % self.spec['target_name']) result.append('strip %s %s' % (strip_flags, output_binary)) self.configname = None return result def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to massage this target's debug information. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname # For static libraries, no dSYMs are created. result = [] if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and self._Test( 'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and self.spec['type'] != 'static_library'): if not quiet: result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name']) result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM')) self.configname = None return result def _GetTargetPostbuilds(self, configname, output, output_binary, quiet=False): """Returns a list of shell commands that contain the shell commands to run as postbuilds for this target, before the actual postbuilds.""" # dSYMs need to build before stripping happens. return ( self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) + self._GetStripPostbuilds(configname, output_binary, quiet)) def _GetIOSPostbuilds(self, configname, output_binary): """Return a shell command to codesign the iOS output binary so it can be deployed to a device. This should be run as the very last step of the build.""" if not (self.isIOS and self.spec['type'] == "executable"): return [] settings = self.xcode_settings[configname] key = self._GetIOSCodeSignIdentityKey(settings) if not key: return [] # Warn for any unimplemented signing xcode keys. unimpl = ['OTHER_CODE_SIGN_FLAGS'] unimpl = set(unimpl) & set(self.xcode_settings[configname].keys()) if unimpl: print 'Warning: Some codesign keys not implemented, ignoring: %s' % ( ', '.join(sorted(unimpl))) return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % ( os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''), settings.get('CODE_SIGN_ENTITLEMENTS', ''), settings.get('PROVISIONING_PROFILE', '')) ] def _GetIOSCodeSignIdentityKey(self, settings): identity = settings.get('CODE_SIGN_IDENTITY') if not identity: return None if identity not in XcodeSettings._codesigning_key_cache: output = subprocess.check_output( ['security', 'find-identity', '-p', 'codesigning', '-v']) for line in output.splitlines(): if identity in line: fingerprint = line.split()[1] cache = XcodeSettings._codesigning_key_cache assert identity not in cache or fingerprint == cache[identity], ( "Multiple codesigning fingerprints for identity: %s" % identity) XcodeSettings._codesigning_key_cache[identity] = fingerprint return XcodeSettings._codesigning_key_cache.get(identity, '') def AddImplicitPostbuilds(self, configname, output, output_binary, postbuilds=[], quiet=False): """Returns a list of shell commands that should run before and after |postbuilds|.""" assert output_binary is not None pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet) post = self._GetIOSPostbuilds(configname, output_binary) return pre + postbuilds + post def _AdjustLibrary(self, library, config_name=None): if library.endswith('.framework'): l = '-framework ' + os.path.splitext(os.path.basename(library))[0] else: m = self.library_re.match(library) if m: l = '-l' + m.group(1) else: l = library sdk_root = self._SdkPath(config_name) if not sdk_root: sdk_root = '' return l.replace('$(SDKROOT)', sdk_root) def AdjustLibraries(self, libraries, config_name=None): """Transforms entries like 'Cocoa.framework' in libraries into entries like '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc. """ libraries = [self._AdjustLibrary(library, config_name) for library in libraries] return libraries def _BuildMachineOSBuild(self): return self._GetStdout(['sw_vers', '-buildVersion']) # This method ported from the logic in Homebrew's CLT version check def _CLTVersion(self): # pkgutil output looks like # package-id: com.apple.pkg.CLTools_Executables # version: 5.0.1.0.1.1382131676 # volume: / # location: / # install-time: 1382544035 # groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo" FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI" MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables" regex = re.compile('version: (?P<version>.+)') for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]: try: output = self._GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key]) return re.search(regex, output).groupdict()['version'] except: continue def _XcodeVersion(self): # `xcodebuild -version` output looks like # Xcode 4.6.3 # Build version 4H1503 # or like # Xcode 3.2.6 # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0 # BuildVersion: 10M2518 # Convert that to '0463', '4H1503'. if len(XcodeSettings._xcode_version_cache) == 0: try: version_list = self._GetStdout(['xcodebuild', '-version']).splitlines() # In some circumstances xcodebuild exits 0 but doesn't return # the right results; for example, a user on 10.7 or 10.8 with # a bogus path set via xcode-select # In that case this may be a CLT-only install so fall back to # checking that version. if len(version_list) < 2: raise GypError, "xcodebuild returned unexpected results" except: version = self._CLTVersion() if version: version = re.match('(\d\.\d\.?\d*)', version).groups()[0] else: raise GypError, "No Xcode or CLT version detected!" # The CLT has no build information, so we return an empty string. version_list = [version, ''] version = version_list[0] build = version_list[-1] # Be careful to convert "4.2" to "0420": version = version.split()[-1].replace('.', '') version = (version + '0' * (3 - len(version))).zfill(4) if build: build = build.split()[-1] XcodeSettings._xcode_version_cache = (version, build) return XcodeSettings._xcode_version_cache def _XcodeIOSDeviceFamily(self, configname): family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1') return [int(x) for x in family.split(',')] def GetExtraPlistItems(self, configname=None): """Returns a dictionary with extra items to insert into Info.plist.""" if configname not in XcodeSettings._plist_cache: cache = {} cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild() xcode, xcode_build = self._XcodeVersion() cache['DTXcode'] = xcode cache['DTXcodeBuild'] = xcode_build sdk_root = self._SdkRoot(configname) if not sdk_root: sdk_root = self._DefaultSdkRoot() cache['DTSDKName'] = sdk_root if xcode >= '0430': cache['DTSDKBuild'] = self._GetSdkVersionInfoItem( sdk_root, 'ProductBuildVersion') else: cache['DTSDKBuild'] = cache['BuildMachineOSBuild'] if self.isIOS: cache['DTPlatformName'] = cache['DTSDKName'] if configname.endswith("iphoneos"): cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem( sdk_root, 'ProductVersion') cache['CFBundleSupportedPlatforms'] = ['iPhoneOS'] else: cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator'] XcodeSettings._plist_cache[configname] = cache # Include extra plist items that are per-target, not per global # XcodeSettings. items = dict(XcodeSettings._plist_cache[configname]) if self.isIOS: items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname) return items def _DefaultSdkRoot(self): """Returns the default SDKROOT to use. Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode project, then the environment variable was empty. Starting with this version, Xcode uses the name of the newest SDK installed. """ if self._XcodeVersion() < '0500': return '' default_sdk_path = self._XcodeSdkPath('') default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path) if default_sdk_root: return default_sdk_root try: all_sdks = self._GetStdout(['xcodebuild', '-showsdks']) except: # If xcodebuild fails, there will be no valid SDKs return '' for line in all_sdks.splitlines(): items = line.split() if len(items) >= 3 and items[-2] == '-sdk': sdk_root = items[-1] sdk_path = self._XcodeSdkPath(sdk_root) if sdk_path == default_sdk_path: return sdk_root return '' def _DefaultArch(self): # For Mac projects, Xcode changed the default value used when ARCHS is not # set from "i386" to "x86_64". # # For iOS projects, if ARCHS is unset, it defaults to "armv7 armv7s" when # building for a device, and the simulator binaries are always build for # "i386". # # For new projects, ARCHS is set to $(ARCHS_STANDARD_INCLUDING_64_BIT), # which correspond to "armv7 armv7s arm64", and when building the simulator # the architecture is either "i386" or "x86_64" depending on the simulated # device (respectively 32-bit or 64-bit device). # # Since the value returned by this function is only used when ARCHS is not # set, then on iOS we return "i386", as the default xcode project generator # does not set ARCHS if it is not set in the .gyp file. if self.isIOS: return 'i386' version, build = self._XcodeVersion() if version >= '0500': return 'x86_64' return 'i386' class MacPrefixHeader(object): """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature. This feature consists of several pieces: * If GCC_PREFIX_HEADER is present, all compilations in that project get an additional |-include path_to_prefix_header| cflag. * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is instead compiled, and all other compilations in the project get an additional |-include path_to_compiled_header| instead. + Compiled prefix headers have the extension gch. There is one gch file for every language used in the project (c, cc, m, mm), since gch files for different languages aren't compatible. + gch files themselves are built with the target's normal cflags, but they obviously don't get the |-include| flag. Instead, they need a -x flag that describes their language. + All o files in the target need to depend on the gch file, to make sure it's built before any o file is built. This class helps with some of these tasks, but it needs help from the build system for writing dependencies to the gch files, for writing build commands for the gch files, and for figuring out the location of the gch files. """ def __init__(self, xcode_settings, gyp_path_to_build_path, gyp_path_to_build_output): """If xcode_settings is None, all methods on this class are no-ops. Args: gyp_path_to_build_path: A function that takes a gyp-relative path, and returns a path relative to the build directory. gyp_path_to_build_output: A function that takes a gyp-relative path and a language code ('c', 'cc', 'm', or 'mm'), and that returns a path to where the output of precompiling that path for that language should be placed (without the trailing '.gch'). """ # This doesn't support per-configuration prefix headers. Good enough # for now. self.header = None self.compile_headers = False if xcode_settings: self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER') self.compile_headers = xcode_settings.GetPerTargetSetting( 'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO' self.compiled_headers = {} if self.header: if self.compile_headers: for lang in ['c', 'cc', 'm', 'mm']: self.compiled_headers[lang] = gyp_path_to_build_output( self.header, lang) self.header = gyp_path_to_build_path(self.header) def _CompiledHeader(self, lang, arch): assert self.compile_headers h = self.compiled_headers[lang] if arch: h += '.' + arch return h def GetInclude(self, lang, arch=None): """Gets the cflags to include the prefix header for language |lang|.""" if self.compile_headers and lang in self.compiled_headers: return '-include %s' % self._CompiledHeader(lang, arch) elif self.header: return '-include %s' % self.header else: return '' def _Gch(self, lang, arch): """Returns the actual file name of the prefix header for language |lang|.""" assert self.compile_headers return self._CompiledHeader(lang, arch) + '.gch' def GetObjDependencies(self, sources, objs, arch=None): """Given a list of source files and the corresponding object files, returns a list of (source, object, gch) tuples, where |gch| is the build-directory relative path to the gch file each object file depends on. |compilable[i]| has to be the source file belonging to |objs[i]|.""" if not self.header or not self.compile_headers: return [] result = [] for source, obj in zip(sources, objs): ext = os.path.splitext(source)[1] lang = { '.c': 'c', '.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc', '.m': 'm', '.mm': 'mm', }.get(ext, None) if lang: result.append((source, obj, self._Gch(lang, arch))) return result def GetPchBuildCommands(self, arch=None): """Returns [(path_to_gch, language_flag, language, header)]. |path_to_gch| and |header| are relative to the build directory. """ if not self.header or not self.compile_headers: return [] return [ (self._Gch('c', arch), '-x c-header', 'c', self.header), (self._Gch('cc', arch), '-x c++-header', 'cc', self.header), (self._Gch('m', arch), '-x objective-c-header', 'm', self.header), (self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header), ] def MergeGlobalXcodeSettingsToSpec(global_dict, spec): """Merges the global xcode_settings dictionary into each configuration of the target represented by spec. For keys that are both in the global and the local xcode_settings dict, the local key gets precendence. """ # The xcode generator special-cases global xcode_settings and does something # that amounts to merging in the global xcode_settings into each local # xcode_settings dict. global_xcode_settings = global_dict.get('xcode_settings', {}) for config in spec['configurations'].values(): if 'xcode_settings' in config: new_settings = global_xcode_settings.copy() new_settings.update(config['xcode_settings']) config['xcode_settings'] = new_settings def IsMacBundle(flavor, spec): """Returns if |spec| should be treated as a bundle. Bundles are directories with a certain subdirectory structure, instead of just a single file. Bundle rules do not produce a binary but also package resources into that directory.""" is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac') if is_mac_bundle: assert spec['type'] != 'none', ( 'mac_bundle targets cannot have type none (target "%s")' % spec['target_name']) return is_mac_bundle def GetMacBundleResources(product_dir, xcode_settings, resources): """Yields (output, resource) pairs for every resource in |resources|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. resources: A list of bundle resources, relative to the build directory. """ dest = os.path.join(product_dir, xcode_settings.GetBundleResourceFolder()) for res in resources: output = dest # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in res, ( "Spaces in resource filenames not supported (%s)" % res) # Split into (path,file). res_parts = os.path.split(res) # Now split the path into (prefix,maybe.lproj). lproj_parts = os.path.split(res_parts[0]) # If the resource lives in a .lproj bundle, add that to the destination. if lproj_parts[1].endswith('.lproj'): output = os.path.join(output, lproj_parts[1]) output = os.path.join(output, res_parts[1]) # Compiled XIB files are referred to by .nib. if output.endswith('.xib'): output = os.path.splitext(output)[0] + '.nib' # Compiled storyboard files are referred to by .storyboardc. if output.endswith('.storyboard'): output = os.path.splitext(output)[0] + '.storyboardc' yield output, res def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path): """Returns (info_plist, dest_plist, defines, extra_env), where: * |info_plist| is the source plist path, relative to the build directory, * |dest_plist| is the destination plist path, relative to the build directory, * |defines| is a list of preprocessor defines (empty if the plist shouldn't be preprocessed, * |extra_env| is a dict of env variables that should be exported when invoking |mac_tool copy-info-plist|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE') if not info_plist: return None, None, [], {} # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in info_plist, ( "Spaces in Info.plist filenames not supported (%s)" % info_plist) info_plist = gyp_path_to_build_path(info_plist) # If explicitly set to preprocess the plist, invoke the C preprocessor and # specify any defines as -D flags. if xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESS', default='NO') == 'YES': # Create an intermediate file based on the path. defines = shlex.split(xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESSOR_DEFINITIONS', default='')) else: defines = [] dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath()) extra_env = xcode_settings.GetPerTargetSettings() return info_plist, dest_plist, defines, extra_env def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): """Return the environment variables that Xcode would set. See http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153 for a full list. Args: xcode_settings: An XcodeSettings object. If this is None, this function returns an empty dict. built_products_dir: Absolute path to the built products dir. srcroot: Absolute path to the source root. configuration: The build configuration name. additional_settings: An optional dict with more values to add to the result. """ if not xcode_settings: return {} # This function is considered a friend of XcodeSettings, so let it reach into # its implementation details. spec = xcode_settings.spec # These are filled in on a as-needed basis. env = { 'BUILT_PRODUCTS_DIR' : built_products_dir, 'CONFIGURATION' : configuration, 'PRODUCT_NAME' : xcode_settings.GetProductName(), # See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME 'SRCROOT' : srcroot, 'SOURCE_ROOT': '${SRCROOT}', # This is not true for static libraries, but currently the env is only # written for bundles: 'TARGET_BUILD_DIR' : built_products_dir, 'TEMP_DIR' : '${TMPDIR}', } if xcode_settings.GetPerConfigSetting('SDKROOT', configuration): env['SDKROOT'] = xcode_settings._SdkPath(configuration) else: env['SDKROOT'] = '' if spec['type'] in ( 'executable', 'static_library', 'shared_library', 'loadable_module'): env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName() env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath() env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName() mach_o_type = xcode_settings.GetMachOType() if mach_o_type: env['MACH_O_TYPE'] = mach_o_type env['PRODUCT_TYPE'] = xcode_settings.GetProductType() if xcode_settings._IsBundle(): env['CONTENTS_FOLDER_PATH'] = \ xcode_settings.GetBundleContentsFolderPath() env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \ xcode_settings.GetBundleResourceFolder() env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath() env['WRAPPER_NAME'] = xcode_settings.GetWrapperName() install_name = xcode_settings.GetInstallName() if install_name: env['LD_DYLIB_INSTALL_NAME'] = install_name install_name_base = xcode_settings.GetInstallNameBase() if install_name_base: env['DYLIB_INSTALL_NAME_BASE'] = install_name_base if not additional_settings: additional_settings = {} else: # Flatten lists to strings. for k in additional_settings: if not isinstance(additional_settings[k], str): additional_settings[k] = ' '.join(additional_settings[k]) additional_settings.update(env) for k in additional_settings: additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k]) return additional_settings def _NormalizeEnvVarReferences(str): """Takes a string containing variable references in the form ${FOO}, $(FOO), or $FOO, and returns a string with all variable references in the form ${FOO}. """ # $FOO -> ${FOO} str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str) # $(FOO) -> ${FOO} matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str) for match in matches: to_replace, variable = match assert '$(' not in match, '$($(FOO)) variables not supported: ' + match str = str.replace(to_replace, '${' + variable + '}') return str def ExpandEnvVars(string, expansions): """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the expansions list. If the variable expands to something that references another variable, this variable is expanded as well if it's in env -- until no variables present in env are left.""" for k, v in reversed(expansions): string = string.replace('${' + k + '}', v) string = string.replace('$(' + k + ')', v) string = string.replace('$' + k, v) return string def _TopologicallySortedEnvVarKeys(env): """Takes a dict |env| whose values are strings that can refer to other keys, for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of env such that key2 is after key1 in L if env[key2] refers to env[key1]. Throws an Exception in case of dependency cycles. """ # Since environment variables can refer to other variables, the evaluation # order is important. Below is the logic to compute the dependency graph # and sort it. regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}') def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. # We can then reverse the result of the topological sort at the end. # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) matches = set([v for v in regex.findall(env[node]) if v in env]) for dependee in matches: assert '${' not in dependee, 'Nested variables not supported: ' + dependee return matches try: # Topologically sort, and then reverse, because we used an edge definition # that's inverted from the expected result of this function (see comment # above). order = gyp.common.TopologicallySorted(env.keys(), GetEdges) order.reverse() return order except gyp.common.CycleError, e: raise GypError( 'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings) return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)] def GetSpecPostbuildCommands(spec, quiet=False): """Returns the list of postbuilds explicitly defined on |spec|, in a form executable by a shell.""" postbuilds = [] for postbuild in spec.get('postbuilds', []): if not quiet: postbuilds.append('echo POSTBUILD\\(%s\\) %s' % ( spec['target_name'], postbuild['postbuild_name'])) postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action'])) return postbuilds def _HasIOSTarget(targets): """Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.""" for target_dict in targets.values(): for config in target_dict['configurations'].values(): if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'): return True return False def _AddIOSDeviceConfigurations(targets): """Clone all targets and append -iphoneos to the name. Configure these targets to build for iOS devices.""" for target_dict in targets.values(): for config_name in target_dict['configurations'].keys(): config = target_dict['configurations'][config_name] new_config_name = config_name + '-iphoneos' new_config_dict = copy.deepcopy(config) if target_dict['toolset'] == 'target': new_config_dict['xcode_settings']['ARCHS'] = ['armv7'] new_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos' target_dict['configurations'][new_config_name] = new_config_dict return targets def CloneConfigurationForDeviceAndEmulator(target_dicts): """If |target_dicts| contains any iOS targets, automatically create -iphoneos targets for iOS device builds.""" if _HasIOSTarget(target_dicts): return _AddIOSDeviceConfigurations(target_dicts) return target_dicts
unknown
codeparrot/codeparrot-clean
# Copyright 2009-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for representing files stored in GridFS.""" import datetime import math import os from bson.binary import Binary from bson.objectid import ObjectId from bson.py3compat import b, binary_type, string_types, text_type, StringIO from gridfs.errors import (CorruptGridFile, FileExists, NoFile, UnsupportedAPI) from pymongo import ASCENDING from pymongo.collection import Collection from pymongo.errors import DuplicateKeyError try: _SEEK_SET = os.SEEK_SET _SEEK_CUR = os.SEEK_CUR _SEEK_END = os.SEEK_END # before 2.5 except AttributeError: _SEEK_SET = 0 _SEEK_CUR = 1 _SEEK_END = 2 EMPTY = b("") NEWLN = b("\n") """Default chunk size, in bytes.""" DEFAULT_CHUNK_SIZE = 256 * 1024 def _create_property(field_name, docstring, read_only=False, closed_only=False): """Helper for creating properties to read/write to files. """ def getter(self): if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 if field_name == 'length': return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self, value): if self._closed: self._coll.files.update({"_id": self._file["_id"]}, {"$set": {field_name: value}}, **self._coll._get_wc_override()) self._file[field_name] = value if read_only: docstring = docstring + "\n\nThis attribute is read-only." elif closed_only: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring) class GridIn(object): """Class to write data to GridFS. """ def __init__(self, root_collection, **kwargs): """Write a file to GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. Any of the file level options specified in the `GridFS Spec <http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as keyword arguments. Any additional keyword arguments will be set as additional fields on the file document. Valid keyword arguments include: - ``"_id"``: unique ID for this file (default: :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must not have already been used for another file - ``"filename"``: human name for the file - ``"contentType"`` or ``"content_type"``: valid mime-type for the file - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 256 kb) - ``"encoding"``: encoding used for this file. In Python 2, any :class:`unicode` that is written to the file will be converted to a :class:`str`. In Python 3, any :class:`str` that is written to the file will be converted to :class:`bytes`. If you turn off write-acknowledgment for performance reasons, it is critical to wrap calls to :meth:`write` and :meth:`close` within a single request: >>> from pymongo import MongoClient >>> from gridfs import GridFS >>> client = MongoClient(w=0) # turn off write acknowledgment >>> fs = GridFS(client) >>> gridin = fs.new_file() >>> request = client.start_request() >>> try: ... for i in range(10): ... gridin.write('foo') ... gridin.close() ... finally: ... request.end() In Python 2.5 and later this code can be simplified with a with-statement, see :doc:`/examples/requests` for more information. :Parameters: - `root_collection`: root collection to write to - `**kwargs` (optional): file level options (see above) """ if not isinstance(root_collection, Collection): raise TypeError("root_collection must be an " "instance of Collection") # Handle alternative naming if "content_type" in kwargs: kwargs["contentType"] = kwargs.pop("content_type") if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) root_collection.chunks.ensure_index([("files_id", ASCENDING), ("n", ASCENDING)], unique=True) object.__setattr__(self, "_coll", root_collection) object.__setattr__(self, "_chunks", root_collection.chunks) object.__setattr__(self, "_file", kwargs) object.__setattr__(self, "_buffer", StringIO()) object.__setattr__(self, "_position", 0) object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) @property def closed(self): """Is this file closed? """ return self._closed _id = _create_property("_id", "The ``'_id'`` value for this file.", read_only=True) filename = _create_property("filename", "Name of this file.") name = _create_property("filename", "Alias for `filename`.") content_type = _create_property("contentType", "Mime-type for this file.") length = _create_property("length", "Length (in bytes) of this file.", closed_only=True) chunk_size = _create_property("chunkSize", "Chunk size for this file.", read_only=True) upload_date = _create_property("uploadDate", "Date that this file was uploaded.", closed_only=True) md5 = _create_property("md5", "MD5 of the contents of this file " "(generated on the server).", closed_only=True) def __getattr__(self, name): if name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name, value): # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: object.__setattr__(self, name, value) else: # All other attributes are part of the document in db.fs.files. # Store them to be sent to server on close() or if closed, send # them now. self._file[name] = value if self._closed: self._coll.files.update({"_id": self._file["_id"]}, {"$set": {name: value}}, **self._coll._get_wc_override()) def __flush_data(self, data): """Flush `data` to a chunk. """ if not data: return assert(len(data) <= self.chunk_size) chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} try: self._chunks.insert(chunk) except DuplicateKeyError: self._raise_file_exists(self._file['_id']) self._chunk_number += 1 self._position += len(data) def __flush_buffer(self): """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = StringIO() def __flush(self): """Flush the file to the database. """ try: self.__flush_buffer() db = self._coll.database # See PYTHON-417, "Sharded GridFS fails with exception: chunks out # of order." Inserts via mongos, even if they use a single # connection, can succeed out-of-order due to the writebackListener. # We mustn't call "filemd5" until all inserts are complete, which # we ensure by calling getLastError (and ignoring the result). db.error() md5 = db.command( "filemd5", self._id, root=self._coll.name)["md5"] self._file["md5"] = md5 self._file["length"] = self._position self._file["uploadDate"] = datetime.datetime.utcnow() return self._coll.files.insert(self._file, **self._coll._get_wc_override()) except DuplicateKeyError: self._raise_file_exists(self._id) def _raise_file_exists(self, file_id): """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) def close(self): """Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed. """ if not self._closed: self.__flush() object.__setattr__(self, "_closed", True) def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` (:class:`bytes` in python 3), a file-like object, or an instance of :class:`unicode` (:class:`str` in python 3). Unicode data is only allowed if the file has an :attr:`encoding` attribute. :Parameters: - `data`: string of bytes or file-like object to be written to the file .. versionadded:: 1.9 The ability to write :class:`unicode`, if the file has an :attr:`encoding` attribute. """ if self._closed: raise ValueError("cannot write to a closed file") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, string_types): raise TypeError("can only write strings or file-like objects") if isinstance(data, unicode): try: data = data.encode(self.encoding) except AttributeError: raise TypeError("must specify an encoding for file in " "order to write %s" % (text_type.__name__,)) read = StringIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: to_write = read(space) self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete self.__flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: self.__flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write) def writelines(self, sequence): """Write a sequence of strings to the file. Does not add seperators. """ for line in sequence: self.write(line) def __enter__(self): """Support for the context manager protocol. """ return self def __exit__(self, exc_type, exc_val, exc_tb): """Support for the context manager protocol. Close the file and allow exceptions to propagate. """ self.close() # propagate exceptions return False class GridOut(object): """Class to read data out of GridFS. """ def __init__(self, root_collection, file_id=None, file_document=None): """Read a file from GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Either `file_id` or `file_document` must be specified, `file_document` will be given priority if present. Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. :Parameters: - `root_collection`: root collection to read from - `file_id`: value of ``"_id"`` for the file to read - `file_document`: file document from `root_collection.files` .. versionadded:: 1.9 The `file_document` parameter. """ if not isinstance(root_collection, Collection): raise TypeError("root_collection must be an " "instance of Collection") self.__chunks = root_collection.chunks files = root_collection.files self._file = file_document or files.find_one({"_id": file_id}) if not self._file: raise NoFile("no file in gridfs collection %r with _id %r" % (files, file_id)) self.__buffer = EMPTY self.__position = 0 _id = _create_property("_id", "The ``'_id'`` value for this file.", True) filename = _create_property("filename", "Name of this file.", True) name = _create_property("filename", "Alias for `filename`.", True) content_type = _create_property("contentType", "Mime-type for this file.", True) length = _create_property("length", "Length (in bytes) of this file.", True) chunk_size = _create_property("chunkSize", "Chunk size for this file.", True) upload_date = _create_property("uploadDate", "Date that this file was first uploaded.", True) aliases = _create_property("aliases", "List of aliases for this file.", True) metadata = _create_property("metadata", "Metadata attached to this file.", True) md5 = _create_property("md5", "MD5 of the contents of this file " "(generated on the server).", True) def __getattr__(self, name): if name in self._file: return self._file[name] raise AttributeError("GridOut object has no attribute '%s'" % name) def read(self, size=-1): """Read at most `size` bytes from the file (less if there isn't enough data). The bytes are returned as an instance of :class:`str` (:class:`bytes` in python 3). If `size` is negative or omitted all data is read. :Parameters: - `size` (optional): the number of bytes to read """ if size == 0: return "" remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder received = len(self.__buffer) chunk_number = int((received + self.__position) / self.chunk_size) chunks = [] while received < size: chunk = self.__chunks.find_one({"files_id": self._id, "n": chunk_number}) if not chunk: raise CorruptGridFile("no chunk #%d" % chunk_number) if received: chunk_data = chunk["data"] else: chunk_data = chunk["data"][self.__position % self.chunk_size:] received += len(chunk_data) chunks.append(chunk_data) chunk_number += 1 data = EMPTY.join([self.__buffer] + chunks) self.__position += size to_return = data[:size] self.__buffer = data[size:] return to_return def readline(self, size=-1): """Read one line or up to `size` bytes from the file. :Parameters: - `size` (optional): the maximum number of bytes to read .. versionadded:: 1.9 """ bytes = EMPTY while len(bytes) != size: byte = self.read(1) bytes += byte if byte == EMPTY or byte == NEWLN: break return bytes def tell(self): """Return the current position of this file. """ return self.__position def seek(self, pos, whence=_SEEK_SET): """Set the current position of this file. :Parameters: - `pos`: the position (or offset if using relative positioning) to seek to - `whence` (optional): where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end. """ if whence == _SEEK_SET: new_pos = pos elif whence == _SEEK_CUR: new_pos = self.__position + pos elif whence == _SEEK_END: new_pos = int(self.length) + pos else: raise IOError(22, "Invalid value for `whence`") if new_pos < 0: raise IOError(22, "Invalid value for `pos` - must be positive") self.__position = new_pos self.__buffer = EMPTY def __iter__(self): """Return an iterator over all of this file's data. The iterator will return chunk-sized instances of :class:`str` (:class:`bytes` in python 3). This can be useful when serving files using a webserver that handles such an iterator efficiently. """ return GridOutIterator(self, self.__chunks) def close(self): """Make GridOut more generically file-like.""" pass def __enter__(self): """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ return self def __exit__(self, exc_type, exc_val, exc_tb): """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ return False class GridOutIterator(object): def __init__(self, grid_out, chunks): self.__id = grid_out._id self.__chunks = chunks self.__current_chunk = 0 self.__max_chunk = math.ceil(float(grid_out.length) / grid_out.chunk_size) def __iter__(self): return self def next(self): if self.__current_chunk >= self.__max_chunk: raise StopIteration chunk = self.__chunks.find_one({"files_id": self.__id, "n": self.__current_chunk}) if not chunk: raise CorruptGridFile("no chunk #%d" % self.__current_chunk) self.__current_chunk += 1 return binary_type(chunk["data"]) class GridFile(object): """No longer supported. .. versionchanged:: 1.6 The GridFile class is no longer supported. """ def __init__(self, *args, **kwargs): raise UnsupportedAPI("The GridFile class is no longer supported. " "Please use GridIn or GridOut instead.")
unknown
codeparrot/codeparrot-clean
A common challenge with combining `[chunkhash]` and Code Splitting is that the entry chunk includes the webpack runtime and with it the chunkhash mappings. This means it's always updated and the `[chunkhash]` is pretty useless because this chunk won't be cached. A very simple solution to this problem is to create another chunk that contains only the webpack runtime (including chunkhash map). This can be achieved with `optimization.runtimeChunk` options. To avoid the additional request for another chunk, this pretty small chunk can be inlined into the HTML page. The configuration required for this is: - use `[chunkhash]` in `output.filename` (Note that this example doesn't do this because of the example generator infrastructure, but you should) - use `[chunkhash]` in `output.chunkFilename` (Note that this example doesn't do this because of the example generator infrastructure, but you should) # example.js ```javascript _{{example.js}}_ ``` # webpack.config.js ```javascript _{{webpack.config.js}}_ ``` # index.html ```html <html> <head> </head> <body> <!-- inlined minimized file "runtime~main.[chunkhash].js" --> <script> _{{production:dist/runtime~main.chunkhash.js}}_ </script> <script src="dist/main.[chunkhash].js"></script> </body> </html> ``` # dist/runtime~main.[chunkhash].js ```javascript _{{dist/runtime~main.chunkhash.js}}_ ``` # dist/main.[chunkhash].js ```javascript _{{dist/main.chunkhash.js}}_ ``` # Info ## Unoptimized ``` _{{stdout}}_ ``` ## Production mode ``` _{{production:stdout}}_ ```
unknown
github
https://github.com/webpack/webpack
examples/chunkhash/template.md
from model_base import ModelBase from metrics_base import * class H2ODimReductionModel(ModelBase): def num_iterations(self): """ Get the number of iterations that it took to converge or reach max iterations. :return: number of iterations (integer) """ o = self._model_json["output"] return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('number_of_iterations')] def objective(self): """ Get the final value of the objective function from the GLRM model. :return: final objective value (double) """ o = self._model_json["output"] return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_objective_value')] def final_step(self): """ Get the final step size from the GLRM model. :return: final step size (double) """ o = self._model_json["output"] return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_step_size')] def archetypes(self): """ :return: the archetypes (Y) of the GLRM model. """ o = self._model_json["output"] yvals = o["archetypes"].cell_values archetypes = [] for yidx, yval in enumerate(yvals): archetypes.append(list(yvals[yidx])[1:]) return archetypes def screeplot(self, type="barplot", **kwargs): """ Produce the scree plot :param type: type of plot. "barplot" and "lines" currently supported :param show: if False, the plot is not shown. matplotlib show method is blocking. :return: None """ # check for matplotlib. exit if absent. try: imp.find_module('matplotlib') import matplotlib if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False) import matplotlib.pyplot as plt except ImportError: print "matplotlib is required for this function!" return variances = [s**2 for s in self._model_json['output']['importance'].cell_values[0][1:]] plt.xlabel('Components') plt.ylabel('Variances') plt.title('Scree Plot') plt.xticks(range(1,len(variances)+1)) if type == "barplot": plt.bar(range(1,len(variances)+1), variances) elif type == "lines": plt.plot(range(1,len(variances)+1), variances, 'b--') if not ('server' in kwargs.keys() and kwargs['server']): plt.show()
unknown
codeparrot/codeparrot-clean
package tarexport import ( "context" "encoding/json" "fmt" "io" "os" "path" "path/filepath" "time" c8dimages "github.com/containerd/containerd/v2/core/images" "github.com/containerd/containerd/v2/pkg/tracing" "github.com/containerd/log" "github.com/containerd/platforms" "github.com/distribution/reference" "github.com/docker/distribution" "github.com/moby/go-archive" "github.com/moby/go-archive/compression" "github.com/moby/moby/api/types/events" "github.com/moby/moby/v2/daemon/internal/image" v1 "github.com/moby/moby/v2/daemon/internal/image/v1" "github.com/moby/moby/v2/daemon/internal/ioutils" "github.com/moby/moby/v2/daemon/internal/layer" "github.com/moby/moby/v2/daemon/internal/system" "github.com/moby/sys/sequential" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type imageDescriptor struct { refs []reference.NamedTagged layers []layer.DiffID image *image.Image layerRef layer.Layer } type saveSession struct { *tarexporter outDir string images map[image.ID]*imageDescriptor savedLayers map[layer.DiffID]distribution.Descriptor savedConfigs map[string]struct{} } func (l *tarexporter) Save(ctx context.Context, names []string, outStream io.Writer) error { imgDescriptors, err := l.parseNames(ctx, names) if err != nil { return err } // Release all the image top layer references defer l.releaseLayerReferences(imgDescriptors) return (&saveSession{tarexporter: l, images: imgDescriptors}).save(ctx, outStream) } // parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. // Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. func (l *tarexporter) parseNames(ctx context.Context, names []string) (desc map[image.ID]*imageDescriptor, rErr error) { imgDescr := make(map[image.ID]*imageDescriptor) defer func() { if rErr != nil { l.releaseLayerReferences(imgDescr) } }() addAssoc := func(id image.ID, ref reference.Named) error { if _, ok := imgDescr[id]; !ok { descr := &imageDescriptor{} if err := l.takeLayerReference(id, descr); err != nil { return err } imgDescr[id] = descr } if ref != nil { if _, ok := ref.(reference.Canonical); ok { return nil } tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) if !ok { return nil } for _, t := range imgDescr[id].refs { if tagged.String() == t.String() { return nil } } imgDescr[id].refs = append(imgDescr[id].refs, tagged) } return nil } for _, name := range names { select { case <-ctx.Done(): return nil, ctx.Err() default: } ref, err := reference.ParseAnyReference(name) if err != nil { return nil, err } namedRef, ok := ref.(reference.Named) if !ok { // Check if digest ID reference if digested, ok := ref.(reference.Digested); ok { if err := addAssoc(image.ID(digested.Digest()), nil); err != nil { return nil, err } continue } return nil, errors.Errorf("invalid reference: %v", name) } if reference.FamiliarName(namedRef) == string(digest.Canonical) { imgID, err := l.is.Search(name) if err != nil { return nil, err } if err := addAssoc(imgID, nil); err != nil { return nil, err } continue } if reference.IsNameOnly(namedRef) { assocs := l.rs.ReferencesByName(namedRef) for _, assoc := range assocs { if err := addAssoc(image.ID(assoc.ID), assoc.Ref); err != nil { return nil, err } } if len(assocs) == 0 { imgID, err := l.is.Search(name) if err != nil { return nil, err } if err := addAssoc(imgID, nil); err != nil { return nil, err } } continue } id, err := l.rs.Get(namedRef) if err != nil { return nil, err } if err := addAssoc(image.ID(id), namedRef); err != nil { return nil, err } } return imgDescr, nil } // takeLayerReference will take/Get the image top layer reference func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { img, err := l.is.Get(id) if err != nil { return err } if err := image.CheckOS(img.OperatingSystem()); err != nil { return fmt.Errorf("os %q is not supported", img.OperatingSystem()) } if l.platform != nil { if !l.platformMatcher.Match(img.Platform()) { return errors.New("no suitable export target found for platform " + platforms.FormatAll(*l.platform)) } } imgDescr.image = img topLayerID := img.RootFS.ChainID() if topLayerID == "" { return nil } topLayer, err := l.lss.Get(topLayerID) if err != nil { return err } imgDescr.layerRef = topLayer return nil } // releaseLayerReferences will release all the image top layer references func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { for _, descr := range imgDescr { if descr.layerRef != nil { l.lss.Release(descr.layerRef) } } return nil } func (s *saveSession) save(ctx context.Context, outStream io.Writer) error { s.savedConfigs = make(map[string]struct{}) s.savedLayers = make(map[layer.DiffID]distribution.Descriptor) // get image json tempDir, err := os.MkdirTemp("", "docker-export-") if err != nil { return err } defer os.RemoveAll(tempDir) s.outDir = tempDir reposLegacy := make(map[string]map[string]string) var manifest []manifestItem var parentLinks []parentLink var manifestDescriptors []ocispec.Descriptor for id, imageDescr := range s.images { select { case <-ctx.Done(): return ctx.Err() default: } foreignSrcs, err := s.saveImage(ctx, id) if err != nil { return err } var ( repoTags []string layers []string foreign = make([]ocispec.Descriptor, 0, len(foreignSrcs)) ) // Layers in manifest must follow the actual layer order from config. for _, l := range imageDescr.layers { desc := foreignSrcs[l] foreign = append(foreign, ocispec.Descriptor{ MediaType: desc.MediaType, Digest: desc.Digest, Size: desc.Size, URLs: desc.URLs, Annotations: desc.Annotations, Platform: desc.Platform, }) } data, err := json.Marshal(ocispec.Manifest{ Versioned: specs.Versioned{ SchemaVersion: 2, }, MediaType: ocispec.MediaTypeImageManifest, Config: ocispec.Descriptor{ MediaType: ocispec.MediaTypeImageConfig, Digest: digest.Digest(imageDescr.image.ID()), Size: int64(len(imageDescr.image.RawJSON())), }, Layers: foreign, }) if err != nil { return errors.Wrap(err, "error marshaling manifest") } dgst := digest.FromBytes(data) mFile := filepath.Join(s.outDir, ocispec.ImageBlobsDir, dgst.Algorithm().String(), dgst.Encoded()) if err := mkdirAllWithChtimes(filepath.Dir(mFile), 0o755, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return errors.Wrap(err, "error creating blob directory") } if err := os.WriteFile(mFile, data, 0o644); err != nil { return errors.Wrap(err, "error writing oci manifest file") } if err := system.Chtimes(mFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return errors.Wrap(err, "error setting oci manifest timestamp") } if err := system.Chtimes(filepath.Dir(mFile), time.Unix(0, 0), time.Unix(0, 0)); err != nil { return errors.Wrap(err, "error setting blob digest directory timestamp") } untaggedMfstDesc := ocispec.Descriptor{ MediaType: ocispec.MediaTypeImageManifest, Digest: dgst, Size: int64(len(data)), } for _, ref := range imageDescr.refs { familiarName := reference.FamiliarName(ref) if _, ok := reposLegacy[familiarName]; !ok { reposLegacy[familiarName] = make(map[string]string) } reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1].Encoded() repoTags = append(repoTags, reference.FamiliarString(ref)) taggedManifest := untaggedMfstDesc taggedManifest.Annotations = map[string]string{ c8dimages.AnnotationImageName: ref.String(), ocispec.AnnotationRefName: ref.Tag(), } manifestDescriptors = append(manifestDescriptors, taggedManifest) } // If no ref was assigned, make sure still add the image is still included in index.json. if len(manifestDescriptors) == 0 { manifestDescriptors = append(manifestDescriptors, untaggedMfstDesc) } for _, lDgst := range imageDescr.layers { // IMPORTANT: We use path, not filepath here to ensure the layers // in the manifest use Unix-style forward-slashes. layers = append(layers, path.Join(ocispec.ImageBlobsDir, lDgst.Algorithm().String(), lDgst.Encoded())) } manifest = append(manifest, manifestItem{ Config: path.Join(ocispec.ImageBlobsDir, id.Digest().Algorithm().String(), id.Digest().Encoded()), RepoTags: repoTags, Layers: layers, LayerSources: foreignSrcs, }) parentID, _ := s.is.GetParent(id) parentLinks = append(parentLinks, parentLink{id, parentID}) s.tarexporter.loggerImgEvent.LogImageEvent(ctx, id.String(), id.String(), events.ActionSave) } for i, p := range validatedParentLinks(parentLinks) { if p.parentID != "" { manifest[i].Parent = p.parentID } } if len(reposLegacy) > 0 { reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644) if err != nil { return err } if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { rf.Close() return err } rf.Close() if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } } manifestPath := filepath.Join(tempDir, manifestFileName) f, err := os.OpenFile(manifestPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644) if err != nil { return err } if err := json.NewEncoder(f).Encode(manifest); err != nil { f.Close() return err } f.Close() if err := system.Chtimes(manifestPath, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } const ociLayoutContent = `{"imageLayoutVersion": "` + ocispec.ImageLayoutVersion + `"}` layoutPath := filepath.Join(tempDir, ocispec.ImageLayoutFile) if err := os.WriteFile(layoutPath, []byte(ociLayoutContent), 0o644); err != nil { return errors.Wrap(err, "error writing oci layout file") } if err := system.Chtimes(layoutPath, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return errors.Wrap(err, "error setting oci layout file timestamps") } data, err := json.Marshal(ocispec.Index{ Versioned: specs.Versioned{ SchemaVersion: 2, }, MediaType: ocispec.MediaTypeImageIndex, Manifests: manifestDescriptors, }) if err != nil { return errors.Wrap(err, "error marshaling oci index") } idxFile := filepath.Join(s.outDir, ocispec.ImageIndexFile) if err := os.WriteFile(idxFile, data, 0o644); err != nil { return errors.Wrap(err, "error writing oci index file") } if err := system.Chtimes(idxFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return errors.Wrap(err, "error setting oci index file timestamps") } return s.writeTar(ctx, tempDir, outStream) } func (s *saveSession) writeTar(ctx context.Context, tempDir string, outStream io.Writer) error { ctx, span := tracing.StartSpan(ctx, "writeTar") defer span.End() fs, err := archive.Tar(tempDir, compression.None) if err != nil { span.SetStatus(err) return err } defer fs.Close() _, err = ioutils.CopyCtx(ctx, outStream, fs) span.SetStatus(err) return err } func (s *saveSession) saveImage(ctx context.Context, id image.ID) (_ map[layer.DiffID]distribution.Descriptor, outErr error) { ctx, span := tracing.StartSpan(ctx, "saveImage") span.SetAttributes(tracing.Attribute("image.id", id.String())) defer span.End() defer func() { span.SetStatus(outErr) }() img := s.images[id].image if len(img.RootFS.DiffIDs) == 0 { return nil, errors.New("empty export - not implemented") } ts := time.Unix(0, 0) if img.Created != nil { ts = *img.Created } var parent digest.Digest var layers []layer.DiffID var foreignSrcs map[layer.DiffID]distribution.Descriptor for i, diffID := range img.RootFS.DiffIDs { select { case <-ctx.Done(): return nil, ctx.Err() default: } v1ImgCreated := time.Unix(0, 0) v1Img := image.V1Image{ // This is for backward compatibility used for // pre v1.9 docker. Created: &v1ImgCreated, } if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return nil, err } v1Img.ID = v1ID.Encoded() if parent != "" { v1Img.Parent = parent.Encoded() } v1Img.OS = img.OS src, err := s.saveConfigAndLayer(ctx, rootFS.ChainID(), v1Img, &ts) if err != nil { return nil, err } layers = append(layers, diffID) parent = v1ID if src.Digest != "" { if foreignSrcs == nil { foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) } foreignSrcs[img.RootFS.DiffIDs[i]] = src } } data := img.RawJSON() dgst := digest.FromBytes(data) blobDir := filepath.Join(s.outDir, ocispec.ImageBlobsDir, dgst.Algorithm().String()) if err := mkdirAllWithChtimes(blobDir, 0o755, ts, ts); err != nil { return nil, err } if err := system.Chtimes(blobDir, ts, ts); err != nil { return nil, err } if err := system.Chtimes(filepath.Dir(blobDir), ts, ts); err != nil { return nil, err } configFile := filepath.Join(blobDir, dgst.Encoded()) if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil { return nil, err } if err := system.Chtimes(configFile, ts, ts); err != nil { return nil, err } s.images[id].layers = layers return foreignSrcs, nil } func (s *saveSession) saveConfigAndLayer(ctx context.Context, id layer.ChainID, legacyImg image.V1Image, createdTime *time.Time) (_ distribution.Descriptor, outErr error) { ctx, span := tracing.StartSpan(ctx, "saveConfigAndLayer") span.SetAttributes( tracing.Attribute("layer.id", id.String()), tracing.Attribute("image.id", legacyImg.ID), ) defer span.End() defer func() { span.SetStatus(outErr) }() ts := time.Unix(0, 0) if createdTime != nil { ts = *createdTime } outDir := filepath.Join(s.outDir, ocispec.ImageBlobsDir) if _, ok := s.savedConfigs[legacyImg.ID]; !ok { if err := s.saveConfig(legacyImg, outDir, createdTime); err != nil { return distribution.Descriptor{}, err } } // serialize filesystem l, err := s.lss.Get(id) if err != nil { return distribution.Descriptor{}, err } lDiffID := l.DiffID() lDgst := lDiffID if _, ok := s.savedLayers[lDiffID]; ok { return s.savedLayers[lDiffID], nil } layerPath := filepath.Join(outDir, lDiffID.Algorithm().String(), lDiffID.Encoded()) defer layer.ReleaseAndLog(s.lss, l) if _, err = os.Stat(layerPath); err == nil { // This is should not happen. If the layer path was already created, we should have returned early. // Log a warning an proceed to recreate the archive. log.G(context.TODO()).WithFields(log.Fields{ "layerPath": layerPath, "id": id, "lDgst": lDgst, }).Warn("LayerPath already exists but the descriptor is not cached") } else if !os.IsNotExist(err) { return distribution.Descriptor{}, err } // We use sequential file access to avoid depleting the standby list on // Windows. On Linux, this equates to a regular os.Create. if err := mkdirAllWithChtimes(filepath.Dir(layerPath), 0o755, ts, ts); err != nil { return distribution.Descriptor{}, errors.Wrap(err, "could not create layer dir parent") } tarFile, err := sequential.Create(layerPath) if err != nil { return distribution.Descriptor{}, errors.Wrap(err, "error creating layer file") } defer tarFile.Close() arch, err := l.TarStream() if err != nil { return distribution.Descriptor{}, err } defer arch.Close() digester := digest.Canonical.Digester() digestedArch := io.TeeReader(arch, digester.Hash()) tarSize, err := ioutils.CopyCtx(ctx, tarFile, digestedArch) if err != nil { return distribution.Descriptor{}, err } tarDigest := digester.Digest() if lDgst != tarDigest { log.G(context.TODO()).WithFields(log.Fields{ "layerDigest": lDgst, "actualDigest": tarDigest, }).Warn("layer digest doesn't match its tar archive digest") lDgst = digester.Digest() layerPath = filepath.Join(outDir, lDgst.Algorithm().String(), lDgst.Encoded()) } for _, fname := range []string{outDir, layerPath} { // todo: maybe save layer created timestamp? if err := system.Chtimes(fname, ts, ts); err != nil { return distribution.Descriptor{}, errors.Wrap(err, "could not set layer timestamp") } } var desc distribution.Descriptor if fs, ok := l.(distribution.Describable); ok { desc = fs.Descriptor() } if desc.Digest == "" { desc.Digest = tarDigest desc.Size = tarSize } if desc.MediaType == "" { desc.MediaType = ocispec.MediaTypeImageLayer } s.savedLayers[lDiffID] = desc return desc, nil } func (s *saveSession) saveConfig(legacyImg image.V1Image, outDir string, createdTime *time.Time) error { imageConfig, err := json.Marshal(legacyImg) if err != nil { return err } ts := time.Unix(0, 0) if createdTime != nil { ts = *createdTime } cfgDgst := digest.FromBytes(imageConfig) configPath := filepath.Join(outDir, cfgDgst.Algorithm().String(), cfgDgst.Encoded()) if err := mkdirAllWithChtimes(filepath.Dir(configPath), 0o755, ts, ts); err != nil { return errors.Wrap(err, "could not create layer dir parent") } if err := os.WriteFile(configPath, imageConfig, 0o644); err != nil { return err } if err := system.Chtimes(configPath, ts, ts); err != nil { return errors.Wrap(err, "could not set config timestamp") } s.savedConfigs[legacyImg.ID] = struct{}{} return nil }
go
github
https://github.com/moby/moby
daemon/internal/image/tarexport/save.go
# Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException COVERAGE_DIR = None def enable_coverage(dirname): """Maintain a log of which RPC calls are made during testing.""" global COVERAGE_DIR COVERAGE_DIR = dirname def get_rpc_proxy(url, node_number, timeout=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( COVERAGE_DIR, node_number) if COVERAGE_DIR else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections, wait=1): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(wait) def sync_mempools(rpc_connections, wait=1): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(wait) bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. bitcoind and bitcoin-cli must be in search path. """ if (not os.path.isdir(os.path.join("cache","node0")) or not os.path.isdir(os.path.join("cache","node1")) or not os.path.isdir(os.path.join("cache","node2")) or not os.path.isdir(os.path.join("cache","node3"))): #find and delete old cache directories if any exist for i in range(4): if os.path.isdir(os.path.join("cache","node"+str(i))): shutil.rmtree(os.path.join("cache","node"+str(i))) devnull = open(os.devnull, "w") # Create cache directories, run bitcoinds: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed" devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:rt@127.0.0.1:%d" % (rpc_port(i),) rpcs.append(get_rpc_proxy(url, i)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].generate(1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): """ Start a bitcoind and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) if binary is None: binary = os.getenv("BITCOIND", "bitcoind") args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) bitcoind_processes[i] = subprocess.Popen(args) devnull = open(os.devnull, "w") if os.getenv("PYTHON_DEBUG", ""): print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "start_node: calling bitcoin-cli -rpcwait getblockcount returned" devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) proxy = get_rpc_proxy(url, i, timeout=timewait) if COVERAGE_DIR: coverage.write_all_rpc_commands(COVERAGE_DIR, proxy) return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): """ Start multiple bitcoinds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] if binary is None: binary = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() bitcoind_processes[i].wait() del bitcoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes.values(): bitcoind.wait() bitcoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
unknown
codeparrot/codeparrot-clean
# create-report.py # # Copyright (C) 2012 Carlos Garcia Campos <carlosgc@gnome.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from commands import Command, register_command from HTMLReport import HTMLReport from Config import Config import os import tempfile class CreateReport(Command): name = 'create-report' usage_args = '[ options ... ] tests ' description = 'Create report of test results' def __init__(self): Command.__init__(self) parser = self._get_args_parser() parser.add_argument('--refs-dir', action = 'store', dest = 'refs_dir', default = os.path.join(tempfile.gettempdir(), 'refs'), help = 'Directory containing the references') parser.add_argument('-o', '--out-dir', action = 'store', dest = 'out_dir', default = os.path.join(tempfile.gettempdir(), 'out'), help = 'Directory containing the results') parser.add_argument('-p', '--pretty-diff', action = 'store_true', dest = 'pretty_diff', default = False, help = 'Include pretty diff output') parser.add_argument('-n', '--no-browser', action = 'store_false', dest = 'launch_browser', default = True, help = 'Do not launch a web browser with the results') parser.add_argument('tests') def run(self, options): config = Config() config.pretty_diff = options['pretty_diff'] doc = options['tests'] if os.path.isdir(doc): docs_dir = doc else: docs_dir = os.path.dirname(doc) report = HTMLReport(docs_dir, options['refs_dir'], options['out_dir']) report.create(options['launch_browser']) return 0 register_command('create-report', CreateReport)
unknown
codeparrot/codeparrot-clean
'use strict'; const assert = require('assert'); const common = require('../common.js'); const bench = common.createBenchmark(main, { n: [32], size: [8 << 20], }); function main({ n, size }) { const s = 'abcd'.repeat(size); const encodedSize = s.length * 3 / 4; // eslint-disable-next-line node-core/no-unescaped-regexp-dot s.match(/./); // Flatten string. assert.strictEqual(s.length % 4, 0); const b = Buffer.allocUnsafe(encodedSize); b.write(s, 0, encodedSize, 'base64'); let tmp; bench.start(); for (let i = 0; i < n; i += 1) tmp = b.base64Write(s, 0, s.length); bench.end(n); assert.strictEqual(tmp, encodedSize); }
javascript
github
https://github.com/nodejs/node
benchmark/buffers/buffer-base64-decode.js
import pymc from parmed.topologyobjects import DihedralType import numpy as np from simtk.unit import kilojoules_per_mole import torsionfit.TorsionScanSet as TorsionScan class TorsionFitModel(object): """pymc model Attributes: ---------- pymc_parameters: dict() of pymc parameters parameters_to_optimize: list of tuples (dihedrals to optimize) fags: list of TorsionScanSet for fragments platform: OpenMM platform to use for potential energy calculations """ def __init__(self, param, stream, frags, platform=None): """Create a PyMC model for fitting torsions. Parameters --------- param : parmed ParameterSet Set of parameters that will not be optimized. stream : parmed ParameterSet Set of parameters including those that will be optimized. Existing parameters will be used as initial parameters. frags : list of fragments List of small molecule fragments with QM torsion data to fit. platform : simtk.openmm.Platform OpenMM Platform to use for computing potential energies. """ if type(frags) != list: frags = [frags] self.pymc_parameters = dict() self.frags = frags self.platform = platform self.parameters_to_optimize = TorsionScan.to_optimize(param, stream) # add missing multiplicity terms to parameterSet so that the system has the same number of parameters self.add_missing(param) multiplicities = [1, 2, 3, 4, 6] multiplicity_bitstrings = dict() # offset for frag in self.frags: name = '%s_offset' % frag.topology._residues[0] offset = pymc.Uniform(name, lower=-50, upper=50, value=0) self.pymc_parameters[name] = offset for p in self.parameters_to_optimize: torsion_name = p[0] + '_' + p[1] + '_' + p[2] + '_' + p[3] if torsion_name not in multiplicity_bitstrings.keys(): multiplicity_bitstrings[torsion_name] = 0 for m in multiplicities: name = p[0] + '_' + p[1] + '_' + p[2] + '_' + p[3] + '_' + str(m) + '_K' k = pymc.Uniform(name, lower=0, upper=20, value=0) for i in range(len(param.dihedral_types[p])): if param.dihedral_types[p][i].per == m: multiplicity_bitstrings[torsion_name] += 2 ** (m - 1) k = pymc.Uniform(name, lower=0, upper=20, value=param.dihedral_types[p][i].phi_k) break self.pymc_parameters[name] = k name = p[0] + '_' + p[1] + '_' + p[2] + '_' + p[3] + '_' + str(m) + '_Phase' phase = pymc.DiscreteUniform(name, lower=0, upper=1, value=0) for i in range(len(param.dihedral_types[p])): if param.dihedral_types[p][i].per == m: if param.dihedral_types[p][i].phase == 0: phase = pymc.DiscreteUniform(name, lower=0, upper=1, value=0) break if param.dihedral_types[p][i].phase == 180.0: phase = pymc.DiscreteUniform(name, lower=0, upper=1, value=1) break self.pymc_parameters[name] = phase for torsion_name in multiplicity_bitstrings.keys(): name = torsion_name + '_multiplicity_bitstring' bitstring = pymc.DiscreteUniform(name, lower=0, upper=63, value=multiplicity_bitstrings[torsion_name]) self.pymc_parameters[name] = bitstring self.pymc_parameters['log_sigma'] = pymc.Uniform('log_sigma', lower=-10, upper=3, value=np.log(0.01)) self.pymc_parameters['sigma'] = pymc.Lambda('sigma', lambda log_sigma=self.pymc_parameters['log_sigma']: np.exp( log_sigma)) self.pymc_parameters['precision'] = pymc.Lambda('precision', lambda log_sigma=self.pymc_parameters['log_sigma']: np.exp( -2 * log_sigma)) @pymc.deterministic def mm_energy(pymc_parameters=self.pymc_parameters, param=param): mm = np.ndarray(0) self.update_param(param) for mol in self.frags: mol.compute_energy(param, offset=self.pymc_parameters['%s_offset' % mol.topology._residues[0]], platform=self.platform) mm = np.append(mm, mol.mm_energy / kilojoules_per_mole) return mm size = sum([len(i.qm_energy) for i in self.frags]) qm_energy = np.ndarray(0) for i in range(len(frags)): qm_energy = np.append(qm_energy, frags[i].qm_energy) self.pymc_parameters['mm_energy'] = mm_energy self.pymc_parameters['qm_fit'] = pymc.Normal('qm_fit', mu=self.pymc_parameters['mm_energy'], tau=self.pymc_parameters['precision'], size=size, observed=True, value=qm_energy) def add_missing(self, param): """ Update param set with missing multiplicities. :param: chemistry.charmm.CharmmParameterSet :return: updated CharmmParameterSet with multiplicities 1-6 for parameters to optimize """ multiplicities = [1, 2, 3, 4, 6] for p in self.parameters_to_optimize: reverse = tuple(reversed(p)) per = [] for i in range(len(param.dihedral_types[p])): per.append(param.dihedral_types[p][i].per) per.append(param.dihedral_types[reverse][i].per) for j in multiplicities: if j not in per: param.dihedral_types[p].append(DihedralType(0, j, 0)) param.dihedral_types[reverse].append(DihedralType(0, j, 0)) def update_param(self, param): """ Update param set based on current pymc model parameters. :mol: torsionfit.TorsionScanSet :return: updated torsionfit.TorsionScanSet parameters based on current TorsionFitModel parameters """ for p in self.parameters_to_optimize: torsion_name = p[0] + '_' + p[1] + '_' + p[2] + '_' + p[3] multiplicity_bitstring = self.pymc_parameters[torsion_name + '_multiplicity_bitstring'].value reverse_p = tuple(reversed(p)) for i in range(len(param.dihedral_types[p])): m = int(param.dihedral_types[p][i].per) multiplicity_bitmask = 2 ** (m - 1) # multiplicity bitmask if multiplicity_bitstring & multiplicity_bitmask: if m == 5: continue k = torsion_name + '_' + str(m) + '_K' phase = torsion_name + '_' + str(m) + '_Phase' pymc_variable = self.pymc_parameters[k] param.dihedral_types[p][i].phi_k = pymc_variable.value param.dihedral_types[reverse_p][i].phi_k = pymc_variable.value pymc_variable = self.pymc_parameters[phase] if pymc_variable == 1: param.dihedral_types[p][i].phase = 180 param.dihedral_types[reverse_p][i].phase = 180 break if pymc_variable == 0: param.dihedral_types[p][i].phase = 0 param.dihedral_types[reverse_p][i].phase = 0 break else: # This torsion periodicity is disabled. param.dihedral_types[p][i].phi_k = 0 param.dihedral_types[reverse_p][i].phi_k = 0
unknown
codeparrot/codeparrot-clean
""" Unittests for importing a course via management command """ import os from path import Path as path import shutil import tempfile from django.core.management import call_command from django_comment_common.utils import are_permissions_roles_seeded from xmodule.modulestore.django import modulestore from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase class TestImport(ModuleStoreTestCase): """ Unit tests for importing a course from command line """ def create_course_xml(self, content_dir, course_id): directory = tempfile.mkdtemp(dir=content_dir) os.makedirs(os.path.join(directory, "course")) with open(os.path.join(directory, "course.xml"), "w+") as f: f.write('<course url_name="{0.run}" org="{0.org}" ' 'course="{0.course}"/>'.format(course_id)) with open(os.path.join(directory, "course", "{0.run}.xml".format(course_id)), "w+") as f: f.write('<course><chapter name="Test Chapter"></chapter></course>') return directory def setUp(self): """ Build course XML for importing """ super(TestImport, self).setUp() self.content_dir = path(tempfile.mkdtemp()) self.addCleanup(shutil.rmtree, self.content_dir) self.base_course_key = self.store.make_course_key(u'edX', u'test_import_course', u'2013_Spring') self.truncated_key = self.store.make_course_key(u'edX', u'test_import', u'2014_Spring') # Create good course xml self.good_dir = self.create_course_xml(self.content_dir, self.base_course_key) # Create course XML where TRUNCATED_COURSE.org == BASE_COURSE_ID.org # and BASE_COURSE_ID.startswith(TRUNCATED_COURSE.course) self.course_dir = self.create_course_xml(self.content_dir, self.truncated_key) def test_forum_seed(self): """ Tests that forum roles were created with import. """ self.assertFalse(are_permissions_roles_seeded(self.base_course_key)) call_command('import', self.content_dir, self.good_dir) self.assertTrue(are_permissions_roles_seeded(self.base_course_key)) def test_truncated_course_with_url(self): """ Check to make sure an import only blocks true duplicates: new courses with similar but not unique org/course combinations aren't blocked if the original course's course starts with the new course's org/course combinations (i.e. EDx/0.00x/Spring -> EDx/0.00/Spring) """ # Load up base course and verify it is available call_command('import', self.content_dir, self.good_dir) store = modulestore() self.assertIsNotNone(store.get_course(self.base_course_key)) # Now load up the course with a similar course_id and verify it loads call_command('import', self.content_dir, self.course_dir) self.assertIsNotNone(store.get_course(self.truncated_key)) def test_existing_course_with_different_modulestore(self): """ Checks that a course that originally existed in old mongo can be re-imported when split is the default modulestore. """ with modulestore().default_store(ModuleStoreEnum.Type.mongo): call_command('import', self.content_dir, self.good_dir) # Clear out the modulestore mappings, else when the next import command goes to create a destination # course_key, it will find the existing course and return the mongo course_key. To reproduce TNL-1362, # the destination course_key needs to be the one for split modulestore. modulestore().mappings = {} with modulestore().default_store(ModuleStoreEnum.Type.split): call_command('import', self.content_dir, self.good_dir) course = modulestore().get_course(self.base_course_key) # With the bug, this fails because the chapter's course_key is the split mongo form, # while the course's course_key is the old mongo form. self.assertEqual(unicode(course.location.course_key), unicode(course.children[0].course_key))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from openerp.tools.translate import _ from datetime import date, datetime, timedelta import calendar import logging _logger = logging.getLogger(__name__) # display top 3 in ranking, could be db variable MAX_VISIBILITY_RANKING = 3 def start_end_date_for_period(period, default_start_date=False, default_end_date=False): """Return the start and end date for a goal period based on today :return: (start_date, end_date), datetime.date objects, False if the period is not defined or unknown""" today = date.today() if period == 'daily': start_date = today end_date = start_date elif period == 'weekly': delta = timedelta(days=today.weekday()) start_date = today - delta end_date = start_date + timedelta(days=7) elif period == 'monthly': month_range = calendar.monthrange(today.year, today.month) start_date = today.replace(day=1) end_date = today.replace(day=month_range[1]) elif period == 'yearly': start_date = today.replace(month=1, day=1) end_date = today.replace(month=12, day=31) else: # period == 'once': start_date = default_start_date # for manual goal, start each time end_date = default_end_date if start_date and end_date: return (datetime.strftime(start_date, DF), datetime.strftime(end_date, DF)) else: return (start_date, end_date) class gamification_challenge(osv.Model): """Gamification challenge Set of predifined objectives assigned to people with rules for recurrence and rewards If 'user_ids' is defined and 'period' is different than 'one', the set will be assigned to the users for each period (eg: every 1st of each month if 'monthly' is selected) """ _name = 'gamification.challenge' _description = 'Gamification challenge' _inherit = 'mail.thread' def _get_next_report_date(self, cr, uid, ids, field_name, arg, context=None): """Return the next report date based on the last report date and report period. :return: a string in DEFAULT_SERVER_DATE_FORMAT representing the date""" res = {} for challenge in self.browse(cr, uid, ids, context): last = datetime.strptime(challenge.last_report_date, DF).date() if challenge.report_message_frequency == 'daily': next = last + timedelta(days=1) res[challenge.id] = next.strftime(DF) elif challenge.report_message_frequency == 'weekly': next = last + timedelta(days=7) res[challenge.id] = next.strftime(DF) elif challenge.report_message_frequency == 'monthly': month_range = calendar.monthrange(last.year, last.month) next = last.replace(day=month_range[1]) + timedelta(days=1) res[challenge.id] = next.strftime(DF) elif challenge.report_message_frequency == 'yearly': res[challenge.id] = last.replace(year=last.year + 1).strftime(DF) # frequency == 'once', reported when closed only else: res[challenge.id] = False return res def _get_categories(self, cr, uid, context=None): return [ ('hr', 'Human Ressources / Engagement'), ('other', 'Settings / Gamification Tools'), ] def _get_report_template(self, cr, uid, context=None): try: return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'simple_report_template')[1] except ValueError: return False _order = 'end_date, start_date, name, id' _columns = { 'name': fields.char('Challenge Name', required=True, translate=True), 'description': fields.text('Description', translate=True), 'state': fields.selection([ ('draft', 'Draft'), ('inprogress', 'In Progress'), ('done', 'Done'), ], string='State', required=True, track_visibility='onchange'), 'manager_id': fields.many2one('res.users', string='Responsible', help="The user responsible for the challenge."), 'user_ids': fields.many2many('res.users', 'user_ids', string='Users', help="List of users participating to the challenge"), 'autojoin_group_id': fields.many2one('res.groups', string='Auto-subscription Group', help='Group of users whose members will be automatically added to user_ids once the challenge is started'), 'period': fields.selection([ ('once', 'Non recurring'), ('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('yearly', 'Yearly') ], string='Periodicity', help='Period of automatic goal assigment. If none is selected, should be launched manually.', required=True), 'start_date': fields.date('Start Date', help="The day a new challenge will be automatically started. If no periodicity is set, will use this date as the goal start date."), 'end_date': fields.date('End Date', help="The day a new challenge will be automatically closed. If no periodicity is set, will use this date as the goal end date."), 'invited_user_ids': fields.many2many('res.users', 'invited_user_ids', string="Suggest to users"), 'line_ids': fields.one2many('gamification.challenge.line', 'challenge_id', string='Lines', help="List of goals that will be set", required=True), 'reward_id': fields.many2one('gamification.badge', string="For Every Succeding User"), 'reward_first_id': fields.many2one('gamification.badge', string="For 1st user"), 'reward_second_id': fields.many2one('gamification.badge', string="For 2nd user"), 'reward_third_id': fields.many2one('gamification.badge', string="For 3rd user"), 'reward_failure': fields.boolean('Reward Bests if not Succeeded?'), 'visibility_mode': fields.selection([ ('personal', 'Individual Goals'), ('ranking', 'Leader Board (Group Ranking)'), ], string="Display Mode", required=True), 'report_message_frequency': fields.selection([ ('never', 'Never'), ('onchange', 'On change'), ('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('yearly', 'Yearly') ], string="Report Frequency", required=True), 'report_message_group_id': fields.many2one('mail.group', string='Send a copy to', help='Group that will receive a copy of the report in addition to the user'), 'report_template_id': fields.many2one('email.template', string="Report Template", required=True), 'remind_update_delay': fields.integer('Non-updated manual goals will be reminded after', help="Never reminded if no value or zero is specified."), 'last_report_date': fields.date('Last Report Date'), 'next_report_date': fields.function(_get_next_report_date, type='date', string='Next Report Date', store=True), 'category': fields.selection(lambda s, *a, **k: s._get_categories(*a, **k), string="Appears in", help="Define the visibility of the challenge through menus", required=True), } _defaults = { 'period': 'once', 'state': 'draft', 'visibility_mode': 'personal', 'report_message_frequency': 'never', 'last_report_date': fields.date.today, 'manager_id': lambda s, cr, uid, c: uid, 'category': 'hr', 'reward_failure': False, 'report_template_id': lambda s, *a, **k: s._get_report_template(*a, **k), } def create(self, cr, uid, vals, context=None): """Overwrite the create method to add the user of groups""" # add users when change the group auto-subscription if vals.get('autojoin_group_id'): new_group = self.pool.get('res.groups').browse(cr, uid, vals['autojoin_group_id'], context=context) if not vals.get('user_ids'): vals['user_ids'] = [] vals['user_ids'] += [(4, user.id) for user in new_group.users] create_res = super(gamification_challenge, self).create(cr, uid, vals, context=context) # subscribe new users to the challenge if vals.get('user_ids'): # done with browse after super to be sure catch all after orm process challenge = self.browse(cr, uid, create_res, context=context) self.message_subscribe_users(cr, uid, [challenge.id], [user.id for user in challenge.user_ids], context=context) return create_res def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int,long)): ids = [ids] # add users when change the group auto-subscription if vals.get('autojoin_group_id'): new_group = self.pool.get('res.groups').browse(cr, uid, vals['autojoin_group_id'], context=context) if not vals.get('user_ids'): vals['user_ids'] = [] vals['user_ids'] += [(4, user.id) for user in new_group.users] if vals.get('state') == 'inprogress': # starting a challenge if not vals.get('autojoin_group_id'): # starting challenge, add users in autojoin group if not vals.get('user_ids'): vals['user_ids'] = [] for challenge in self.browse(cr, uid, ids, context=context): if challenge.autojoin_group_id: vals['user_ids'] += [(4, user.id) for user in challenge.autojoin_group_id.users] self.generate_goals_from_challenge(cr, uid, ids, context=context) elif vals.get('state') == 'done': self.check_challenge_reward(cr, uid, ids, force=True, context=context) elif vals.get('state') == 'draft': # resetting progress if self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', 'in', ids), ('state', 'in', ['inprogress', 'inprogress_update'])], context=context): raise osv.except_osv("Error", "You can not reset a challenge with unfinished goals.") write_res = super(gamification_challenge, self).write(cr, uid, ids, vals, context=context) # subscribe new users to the challenge if vals.get('user_ids'): # done with browse after super if changes in groups for challenge in self.browse(cr, uid, ids, context=context): self.message_subscribe_users(cr, uid, [challenge.id], [user.id for user in challenge.user_ids], context=context) return write_res ##### Update ##### def _cron_update(self, cr, uid, context=None, ids=False): """Daily cron check. - Start planned challenges (in draft and with start_date = today) - Create the missing goals (eg: modified the challenge to add lines) - Update every running challenge """ # start planned challenges planned_challenge_ids = self.search(cr, uid, [ ('state', '=', 'draft'), ('start_date', '<=', fields.date.today())]) if planned_challenge_ids: self.write(cr, uid, planned_challenge_ids, {'state': 'inprogress'}, context=context) # close planned challenges planned_challenge_ids = self.search(cr, uid, [ ('state', '=', 'inprogress'), ('end_date', '>=', fields.date.today())]) if planned_challenge_ids: self.write(cr, uid, planned_challenge_ids, {'state': 'done'}, context=context) if not ids: ids = self.search(cr, uid, [('state', '=', 'inprogress')], context=context) return self._update_all(cr, uid, ids, context=context) def _update_all(self, cr, uid, ids, context=None): """Update the challenges and related goals :param list(int) ids: the ids of the challenges to update, if False will update only challenges in progress.""" if isinstance(ids, (int,long)): ids = [ids] goal_obj = self.pool.get('gamification.goal') # we use yesterday to update the goals that just ended yesterday = date.today() - timedelta(days=1) goal_ids = goal_obj.search(cr, uid, [ ('challenge_id', 'in', ids), '|', ('state', 'in', ('inprogress', 'inprogress_update')), '&', ('state', 'in', ('reached', 'failed')), '|', ('end_date', '>=', yesterday.strftime(DF)), ('end_date', '=', False) ], context=context) # update every running goal already generated linked to selected challenges goal_obj.update(cr, uid, goal_ids, context=context) for challenge in self.browse(cr, uid, ids, context=context): if challenge.autojoin_group_id: # check in case of new users in challenge, this happens if manager removed users in challenge manually self.write(cr, uid, [challenge.id], {'user_ids': [(4, user.id) for user in challenge.autojoin_group_id.users]}, context=context) self.generate_goals_from_challenge(cr, uid, [challenge.id], context=context) if challenge.last_report_date != fields.date.today(): # goals closed but still opened at the last report date closed_goals_to_report = goal_obj.search(cr, uid, [ ('challenge_id', '=', challenge.id), ('start_date', '>=', challenge.last_report_date), ('end_date', '<=', challenge.last_report_date) ]) if fields.date.today() >= challenge.next_report_date: self.report_progress(cr, uid, challenge, context=context) elif len(closed_goals_to_report) > 0: # some goals need a final report self.report_progress(cr, uid, challenge, subset_goal_ids=closed_goals_to_report, context=context) self.check_challenge_reward(cr, uid, ids, context=context) return True def quick_update(self, cr, uid, challenge_id, context=None): """Update all the goals of a challenge, no generation of new goals""" goal_ids = self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', '=', challenge_id)], context=context) self.pool.get('gamification.goal').update(cr, uid, goal_ids, context=context) return True def action_check(self, cr, uid, ids, context=None): """Check a challenge Create goals that haven't been created yet (eg: if added users) Recompute the current value for each goal related""" return self._update_all(cr, uid, ids=ids, context=context) def action_report_progress(self, cr, uid, ids, context=None): """Manual report of a goal, does not influence automatic report frequency""" if isinstance(ids, (int,long)): ids = [ids] for challenge in self.browse(cr, uid, ids, context): self.report_progress(cr, uid, challenge, context=context) return True ##### Automatic actions ##### def generate_goals_from_challenge(self, cr, uid, ids, context=None): """Generate the goals for each line and user. If goals already exist for this line and user, the line is skipped. This can be called after each change in the list of users or lines. :param list(int) ids: the list of challenge concerned""" goal_obj = self.pool.get('gamification.goal') for challenge in self.browse(cr, uid, ids, context): (start_date, end_date) = start_end_date_for_period(challenge.period) # if no periodicity, use challenge dates if not start_date and challenge.start_date: start_date = challenge.start_date if not end_date and challenge.end_date: end_date = challenge.end_date for line in challenge.line_ids: # FIXME: allow to restrict to a subset of users for user in challenge.user_ids: domain = [('line_id', '=', line.id), ('user_id', '=', user.id)] if start_date: domain.append(('start_date', '=', start_date)) # goal already existing for this line ? if len(goal_obj.search(cr, uid, domain, context=context)) > 0: # resume canceled goals domain.append(('state', '=', 'canceled')) canceled_goal_ids = goal_obj.search(cr, uid, domain, context=context) if canceled_goal_ids: goal_obj.write(cr, uid, canceled_goal_ids, {'state': 'inprogress'}, context=context) goal_obj.update(cr, uid, canceled_goal_ids, context=context) # skip to next user continue values = { 'definition_id': line.definition_id.id, 'line_id': line.id, 'user_id': user.id, 'target_goal': line.target_goal, 'state': 'inprogress', } if start_date: values['start_date'] = start_date if end_date: values['end_date'] = end_date # the goal is initialised over the limit to make sure we will compute it at least once if line.condition == 'higher': values['current'] = line.target_goal - 1 else: values['current'] = line.target_goal + 1 if challenge.remind_update_delay: values['remind_update_delay'] = challenge.remind_update_delay new_goal_id = goal_obj.create(cr, uid, values, context) goal_obj.update(cr, uid, [new_goal_id], context=context) return True ##### JS utilities ##### def _get_serialized_challenge_lines(self, cr, uid, challenge, user_id=False, restrict_goal_ids=False, restrict_top=False, context=None): """Return a serialised version of the goals information if the user has not completed every goal :challenge: browse record of challenge to compute :user_id: res.users id of the user retrieving progress (False if no distinction, only for ranking challenges) :restrict_goal_ids: <list(int)> compute only the results for this subset if gamification.goal ids, if False retrieve every goal of current running challenge :restrict_top: <int> for challenge lines where visibility_mode == 'ranking', retrieve only these bests results and itself, if False retrieve all restrict_goal_ids has priority over restrict_top format list # if visibility_mode == 'ranking' { 'name': <gamification.goal.description name>, 'description': <gamification.goal.description description>, 'condition': <reach condition {lower,higher}>, 'computation_mode': <target computation {manually,count,sum,python}>, 'monetary': <{True,False}>, 'suffix': <value suffix>, 'action': <{True,False}>, 'display_mode': <{progress,boolean}>, 'target': <challenge line target>, 'own_goal_id': <gamification.goal id where user_id == uid>, 'goals': [ { 'id': <gamification.goal id>, 'rank': <user ranking>, 'user_id': <res.users id>, 'name': <res.users name>, 'state': <gamification.goal state {draft,inprogress,inprogress_update,reached,failed,canceled}>, 'completeness': <percentage>, 'current': <current value>, } ] }, # if visibility_mode == 'personal' { 'id': <gamification.goal id>, 'name': <gamification.goal.description name>, 'description': <gamification.goal.description description>, 'condition': <reach condition {lower,higher}>, 'computation_mode': <target computation {manually,count,sum,python}>, 'monetary': <{True,False}>, 'suffix': <value suffix>, 'action': <{True,False}>, 'display_mode': <{progress,boolean}>, 'target': <challenge line target>, 'state': <gamification.goal state {draft,inprogress,inprogress_update,reached,failed,canceled}>, 'completeness': <percentage>, 'current': <current value>, } """ goal_obj = self.pool.get('gamification.goal') (start_date, end_date) = start_end_date_for_period(challenge.period) res_lines = [] all_reached = True for line in challenge.line_ids: line_data = { 'name': line.definition_id.name, 'description': line.definition_id.description, 'condition': line.definition_id.condition, 'computation_mode': line.definition_id.computation_mode, 'monetary': line.definition_id.monetary, 'suffix': line.definition_id.suffix, 'action': True if line.definition_id.action_id else False, 'display_mode': line.definition_id.display_mode, 'target': line.target_goal, } domain = [ ('line_id', '=', line.id), ('state', '!=', 'draft'), ] if restrict_goal_ids: domain.append(('ids', 'in', restrict_goal_ids)) else: # if no subset goals, use the dates for restriction if start_date: domain.append(('start_date', '=', start_date)) if end_date: domain.append(('end_date', '=', end_date)) if challenge.visibility_mode == 'personal': if not user_id: raise osv.except_osv(_('Error!'),_("Retrieving progress for personal challenge without user information")) domain.append(('user_id', '=', user_id)) sorting = goal_obj._order limit = 1 else: line_data.update({ 'own_goal_id': False, 'goals': [], }) sorting = "completeness desc, current desc" limit = False goal_ids = goal_obj.search(cr, uid, domain, order=sorting, limit=limit, context=context) ranking = 0 for goal in goal_obj.browse(cr, uid, goal_ids, context=context): if challenge.visibility_mode == 'personal': # limit=1 so only one result line_data.update({ 'id': goal.id, 'current': goal.current, 'completeness': goal.completeness, 'state': goal.state, }) if goal.state != 'reached': all_reached = False else: ranking += 1 if user_id and goal.user_id.id == user_id: line_data['own_goal_id'] = goal.id elif restrict_top and ranking > restrict_top: # not own goal, over top, skipping continue line_data['goals'].append({ 'id': goal.id, 'user_id': goal.user_id.id, 'name': goal.user_id.name, 'rank': ranking, 'current': goal.current, 'completeness': goal.completeness, 'state': goal.state, }) if goal.state != 'reached': all_reached = False if goal_ids: res_lines.append(line_data) if all_reached: return [] return res_lines ##### Reporting ##### def report_progress(self, cr, uid, challenge, context=None, users=False, subset_goal_ids=False): """Post report about the progress of the goals :param challenge: the challenge object that need to be reported :param users: the list(res.users) of users that are concerned by the report. If False, will send the report to every user concerned (goal users and group that receive a copy). Only used for challenge with a visibility mode set to 'personal'. :param goal_ids: the list(int) of goal ids linked to the challenge for the report. If not specified, use the goals for the current challenge period. This parameter can be used to produce report for previous challenge periods. :param subset_goal_ids: a list(int) of goal ids to restrict the report """ if context is None: context = {} temp_obj = self.pool.get('email.template') ctx = context.copy() if challenge.visibility_mode == 'ranking': lines_boards = self._get_serialized_challenge_lines(cr, uid, challenge, user_id=False, restrict_goal_ids=subset_goal_ids, restrict_top=False, context=context) ctx.update({'challenge_lines': lines_boards}) body_html = temp_obj.render_template(cr, uid, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx) # send to every follower of the challenge self.message_post(cr, uid, challenge.id, body=body_html, context=context, subtype='mail.mt_comment') if challenge.report_message_group_id: self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id, body=body_html, context=context, subtype='mail.mt_comment') else: # generate individual reports for user in users or challenge.user_ids: goals = self._get_serialized_challenge_lines(cr, uid, challenge, user.id, restrict_goal_ids=subset_goal_ids, context=context) if not goals: continue ctx.update({'challenge_lines': goals}) body_html = temp_obj.render_template(cr, user.id, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx) # send message only to users, not on the challenge self.message_post(cr, uid, 0, body=body_html, partner_ids=[(4, user.partner_id.id)], context=context, subtype='mail.mt_comment') if challenge.report_message_group_id: self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id, body=body_html, context=context, subtype='mail.mt_comment') return self.write(cr, uid, challenge.id, {'last_report_date': fields.date.today()}, context=context) ##### Challenges ##### # TODO in trunk, remove unused parameter user_id def accept_challenge(self, cr, uid, challenge_ids, context=None, user_id=None): """The user accept the suggested challenge""" return self._accept_challenge(cr, uid, uid, challenge_ids, context=context) def _accept_challenge(self, cr, uid, user_id, challenge_ids, context=None): user = self.pool.get('res.users').browse(cr, uid, user_id, context=context) message = "%s has joined the challenge" % user.name self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context) self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': [(3, user_id)], 'user_ids': [(4, user_id)]}, context=context) return self.generate_goals_from_challenge(cr, SUPERUSER_ID, challenge_ids, context=context) # TODO in trunk, remove unused parameter user_id def discard_challenge(self, cr, uid, challenge_ids, context=None, user_id=None): """The user discard the suggested challenge""" return self._discard_challenge(cr, uid, uid, challenge_ids, context=context) def _discard_challenge(self, cr, uid, user_id, challenge_ids, context=None): user = self.pool.get('res.users').browse(cr, uid, user_id, context=context) message = "%s has refused the challenge" % user.name self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context) return self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': (3, user_id)}, context=context) def reply_challenge_wizard(self, cr, uid, challenge_id, context=None): result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'challenge_wizard') id = result and result[1] or False result = self.pool.get('ir.actions.act_window').read(cr, uid, [id], context=context)[0] result['res_id'] = challenge_id return result def check_challenge_reward(self, cr, uid, ids, force=False, context=None): """Actions for the end of a challenge If a reward was selected, grant it to the correct users. Rewards granted at: - the end date for a challenge with no periodicity - the end of a period for challenge with periodicity - when a challenge is manually closed (if no end date, a running challenge is never rewarded) """ if isinstance(ids, (int,long)): ids = [ids] context = context or {} for challenge in self.browse(cr, uid, ids, context=context): (start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date) yesterday = date.today() - timedelta(days=1) if end_date == yesterday.strftime(DF) or force: # open chatter message message_body = _("The challenge %s is finished." % challenge.name) # reward for everybody succeeding rewarded_users = [] if challenge.reward_id: for user in challenge.user_ids: reached_goal_ids = self.pool.get('gamification.goal').search(cr, uid, [ ('challenge_id', '=', challenge.id), ('user_id', '=', user.id), ('start_date', '=', start_date), ('end_date', '=', end_date), ('state', '=', 'reached') ], context=context) if len(reached_goal_ids) == len(challenge.line_ids): self.reward_user(cr, uid, user.id, challenge.reward_id.id, context) rewarded_users.append(user) if rewarded_users: message_body += _("<br/>Reward (badge %s) for every succeeding user was sent to %s." % (challenge.reward_id.name, ", ".join([user.name for user in rewarded_users]))) else: message_body += _("<br/>Nobody has succeeded to reach every goal, no badge is rewared for this challenge.") # reward bests if challenge.reward_first_id: (first_user, second_user, third_user) = self.get_top3_users(cr, uid, challenge, context) if first_user: self.reward_user(cr, uid, first_user.id, challenge.reward_first_id.id, context) message_body += _("<br/>Special rewards were sent to the top competing users. The ranking for this challenge is :") message_body += "<br/> 1. %s - %s" % (first_user.name, challenge.reward_first_id.name) else: message_body += _("Nobody reached the required conditions to receive special badges.") if second_user and challenge.reward_second_id: self.reward_user(cr, uid, second_user.id, challenge.reward_second_id.id, context) message_body += "<br/> 2. %s - %s" % (second_user.name, challenge.reward_second_id.name) if third_user and challenge.reward_third_id: self.reward_user(cr, uid, third_user.id, challenge.reward_second_id.id, context) message_body += "<br/> 3. %s - %s" % (third_user.name, challenge.reward_third_id.name) self.message_post(cr, uid, challenge.id, body=message_body, context=context) return True def get_top3_users(self, cr, uid, challenge, context=None): """Get the top 3 users for a defined challenge Ranking criterias: 1. succeed every goal of the challenge 2. total completeness of each goal (can be over 100) Top 3 is computed only for users succeeding every goal of the challenge, except if reward_failure is True, in which case every user is considered. :return: ('first', 'second', 'third'), tuple containing the res.users objects of the top 3 users. If no user meets the criterias for a rank, it is set to False. Nobody can receive a rank is noone receives the higher one (eg: if 'second' == False, 'third' will be False) """ goal_obj = self.pool.get('gamification.goal') (start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date) challengers = [] for user in challenge.user_ids: all_reached = True total_completness = 0 # every goal of the user for the running period goal_ids = goal_obj.search(cr, uid, [ ('challenge_id', '=', challenge.id), ('user_id', '=', user.id), ('start_date', '=', start_date), ('end_date', '=', end_date) ], context=context) for goal in goal_obj.browse(cr, uid, goal_ids, context=context): if goal.state != 'reached': all_reached = False if goal.definition_condition == 'higher': # can be over 100 total_completness += 100.0 * goal.current / goal.target_goal elif goal.state == 'reached': # for lower goals, can not get percentage so 0 or 100 total_completness += 100 challengers.append({'user': user, 'all_reached': all_reached, 'total_completness': total_completness}) sorted_challengers = sorted(challengers, key=lambda k: (k['all_reached'], k['total_completness']), reverse=True) if len(sorted_challengers) == 0 or (not challenge.reward_failure and not sorted_challengers[0]['all_reached']): # nobody succeeded return (False, False, False) if len(sorted_challengers) == 1 or (not challenge.reward_failure and not sorted_challengers[1]['all_reached']): # only one user succeeded return (sorted_challengers[0]['user'], False, False) if len(sorted_challengers) == 2 or (not challenge.reward_failure and not sorted_challengers[2]['all_reached']): # only one user succeeded return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], False) return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], sorted_challengers[2]['user']) def reward_user(self, cr, uid, user_id, badge_id, context=None): """Create a badge user and send the badge to him :param user_id: the user to reward :param badge_id: the concerned badge """ badge_user_obj = self.pool.get('gamification.badge.user') user_badge_id = badge_user_obj.create(cr, uid, {'user_id': user_id, 'badge_id': badge_id}, context=context) return badge_user_obj._send_badge(cr, uid, [user_badge_id], context=context) class gamification_challenge_line(osv.Model): """Gamification challenge line Predifined goal for 'gamification_challenge' These are generic list of goals with only the target goal defined Should only be created for the gamification_challenge object """ _name = 'gamification.challenge.line' _description = 'Gamification generic goal for challenge' _order = "sequence, id" def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None): goal_definition = self.pool.get('gamification.goal.definition') if not definition_id: return {'value': {'definition_id': False}} goal_definition = goal_definition.browse(cr, uid, definition_id, context=context) ret = { 'value': { 'condition': goal_definition.condition, 'definition_full_suffix': goal_definition.full_suffix } } return ret _columns = { 'name': fields.related('definition_id', 'name', string="Name"), 'challenge_id': fields.many2one('gamification.challenge', string='Challenge', required=True, ondelete="cascade"), 'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"), 'target_goal': fields.float('Target Value to Reach', required=True), 'sequence': fields.integer('Sequence', help='Sequence number for ordering'), 'condition': fields.related('definition_id', 'condition', type="selection", readonly=True, string="Condition", selection=[('lower', '<='), ('higher', '>=')]), 'definition_suffix': fields.related('definition_id', 'suffix', type="char", readonly=True, string="Unit"), 'definition_monetary': fields.related('definition_id', 'monetary', type="boolean", readonly=True, string="Monetary"), 'definition_full_suffix': fields.related('definition_id', 'full_suffix', type="char", readonly=True, string="Suffix"), } _default = { 'sequence': 1, }
unknown
codeparrot/codeparrot-clean
'''Application =========== The :class:`App` class is the base for creating Kivy applications. Think of it as your main entry point into the Kivy run loop. In most cases, you subclass this class and make your own app. You create an instance of your specific app class and then, when you are ready to start the application's life cycle, you call your instance's :func:`App.run` method. Creating an Application ----------------------- Method using build() override ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To initialize your app with a widget tree, override the build() method in your app class and return the widget tree you constructed. Here's an example of a very simple application that just shows a button: .. include:: ../../examples/application/app_with_build.py :literal: The file is also available in the examples folder at :file:`kivy/examples/application/app_with_build.py`. Here, no widget tree was constructed (or if you will, a tree with only the root node). Method using kv file ~~~~~~~~~~~~~~~~~~~~ You can also use the :doc:`api-kivy.lang` for creating applications. The .kv can contain rules and root widget definitions at the same time. Here is the same example as the Button one in a kv file. Contents of 'test.kv': .. include:: ../../examples/application/test.kv :literal: Contents of 'main.py': .. include:: ../../examples/application/app_with_kv.py :literal: See :file:`kivy/examples/application/app_with_kv.py`. The relation between main.py and test.kv is explained in :func:`App.load_kv`. Application configuration ------------------------- .. versionadded:: 1.0.7 Use the configuration file ~~~~~~~~~~~~~~~~~~~~~~~~~~ Your application might want to have its own configuration file. The :class:`App` is able to handle an INI file automatically. You add your section/key/value in the :meth:`App.build_config` method by using the `config` parameters (instance of :class:`~kivy.config.ConfigParser`:: class TestApp(App): def build_config(self, config): config.setdefaults('section1', { 'key1': 'value1', 'key2': '42' }) As soon as you add one section in the config, a file is created on the disk, and named from the mangled name of your class:. "TestApp" will give a config file-name "test.ini" with the content:: [section1] key1 = value1 key2 = 42 The "test.ini" will be automatically loaded at runtime, and you can access the configuration in your :meth:`App.build` method:: class TestApp(App): def build_config(self, config): config.setdefaults('section1', { 'key1': 'value1', 'key2': '42' }) def build(self): config = self.config return Label(text='key1 is %s and key2 is %d' % ( config.get('section1', 'key1'), config.getint('section1', 'key2'))) Create a settings panel ~~~~~~~~~~~~~~~~~~~~~~~ Your application can have a settings panel to let your user configure some of your config tokens. Here is an example done in the KinectViewer example (available in the examples directory): .. image:: images/app-settings.jpg :align: center You can add your own panels of settings by extending the :meth:`App.build_settings` method. Check the :class:`~kivy.uix.settings.Settings` about how to create a panel, because you need a JSON file / data first. Let's take as an example the previous snippet of TestApp with custom config. We could create a JSON like this:: [ { "type": "title", "title": "Test application" }, { "type": "options", "title": "My first key", "desc": "Description of my first key", "section": "section1", "key": "key1", "options": ["value1", "value2", "another value"] }, { "type": "numeric", "title": "My second key", "desc": "Description of my second key", "section": "section1", "key": "key2" } ] Then, we can create a panel using this JSON to create automatically all the options, and link them to our :data:`App.config` ConfigParser instance:: class TestApp(App): # ... def build_settings(self, settings): jsondata = """... put the json data here ...""" settings.add_json_panel('Test application', self.config, data=jsondata) That's all ! Now you can press F1 (default keystroke) to toggle the settings panel, or press the "settings" key on your android device. You can manually call :meth:`App.open_settings` and :meth:`App.close_settings` if you want. Every change in the panel is automatically saved in the config file. You can also use :meth:`App.build_settings` to modify properties of the settings panel. For instance, the default panel has a sidebar for switching between json panels, whose width defaults to 200dp. If you'd prefer this to be narrower, you could add:: settings.interface.menu.width = dp(100) to your :meth:`build_settings` method. You might want to know when a config value has been changed by the user, in order to adapt or reload your UI. You can overload the :meth:`on_config_change` method:: class TestApp(App): # ... def on_config_change(self, config, section, key, value): if config is self.config: token = (section, key) if token == ('section1', 'key1'): print('Our key1 have been changed to', value) elif token == ('section1', 'key2'): print('Our key2 have been changed to', value) The Kivy configuration panel is added by default to the settings instance. If you don't want this panel, you can declare your Application like this:: class TestApp(App): use_kivy_settings = False # ... This only removes the Kivy panel, but does not stop the settings instance from appearing. If you want to prevent the settings instance from appearing altogether, you can do this:: class TestApp(App): def open_settings(self, *largs): pass Profiling with on_start and on_stop ----------------------------------- It is often useful to profile python code in order to discover locations to optimise. The standard library profilers (http://docs.python.org/2/library/profile.html) provide multiple options for profiling code. For profiling the entire program, the natural approaches of using profile as a module or profile's run method do not work with Kivy. It is however possible to use :meth:`App.on_start` and :meth:`App.on_stop` methods:: import cProfile class MyApp(App): def on_start(self): self.profile = cProfile.Profile() self.profile.enable() def on_stop(self): self.profile.disable() self.profile.dump_stats('myapp.profile') This will create a file called `myapp.profile` when you exit your app. Customising layout ------------------ You can choose different settings widget layouts by setting :attr:`App.settings_cls`. By default, this is :class:`~kivy.uix.settings.Settings` which provides the pictured sidebar layout, but you could set it to any of the other layouts provided in :mod:`kivy.uix.settings` or create your own. See the module documentation for :mod:`kivy.uix.settings` for more information. You can customise how the settings panel is actually displayed by overriding :meth:`App.display_settings`, which is called to actually display the settings panel on the screen. By default it simply draws the panel on top of the window, but you could modify it to (for instance) show the settings in a :class:`~kivy.uix.popup.Popup` or add them to your app's :class:`~kivy.uix.screenmanager.ScreenManager` if you are using one. If you do so, you should also modify :meth:`App.close_settings` to exit the panel appropriately. For instance, to have the settings panel appear in a popup you can do:: def display_settings(self, settings): try: p = self.settings_popup except AttributeError: self.settings_popup = Popup(content=settings, title='Settings', size_hint=(0.8, 0.8)) p = self.settings_popup if p.content is not settings: p.content = settings p.open() def close_settings(self, *args): try: p = self.settings_popup p.dismiss() except AttributeError: pass # Settings popup doesn't exist Finally, if you want to replace the current settings panel widget, you can remove the internal references to it using :meth:`App.destroy_settings`. If you have modified :meth:`App.display_settings`, you should be careful to detect if the settings panel has been replaced. Pause mode ---------- .. versionadded:: 1.1.0 .. warning:: This mode is experimental, and designed for phones/tablets. There are some cases where your application could crash on resume. On tablets and phones, the user can switch at any moment to another application. By default, your application will reach :func:`App.on_stop` behavior. You can support the Pause mode: when switching to another application, the application goes into Pause mode and waits infinitely until the user switches back to your application. There is an issue with OpenGL on Android devices: you're not ensured that the OpenGL ES Context is restored when your app resumes. The mechanism for restoring all the OpenGL data is not yet implemented into Kivy(we are looking for device with this behavior). The current implemented Pause mechanism is: #. Kivy checks every frame, if Pause mode is activated by the Operating System, due to user switching to another application, phone shutdown or any other reason. #. :func:`App.on_pause` is called: #. If False is returned (default case), then :func:`App.on_stop` is called. #. Otherwise the application will sleep until the OS will resume our App #. We got a `resume`, :func:`App.on_resume` is called. #. If our app memory has been reclaimed by the OS, then nothing will be called. Here is a simple example of how on_pause() should be used:: class TestApp(App): def on_pause(self): # Here you can save data if needed return True def on_resume(self): # Here you can check if any data needs replacing (usually nothing) pass .. warning:: Both `on_pause` and `on_stop` must save important data, because after `on_pause` call, on_resume may not be called at all. ''' __all__ = ('App', ) import os from inspect import getfile from os.path import dirname, join, exists, sep, expanduser, isfile from kivy.config import ConfigParser from kivy.base import runTouchApp, stopTouchApp from kivy.logger import Logger from kivy.event import EventDispatcher from kivy.lang import Builder from kivy.resources import resource_find from kivy.utils import platform as core_platform from kivy.uix.widget import Widget from kivy.uix.settings import SettingsWithSpinner from kivy.properties import ObjectProperty platform = core_platform class App(EventDispatcher): ''' Application class, see module documentation for more information. :Events: `on_start`: Fired when the application is being started (before the :func:`~kivy.base.runTouchApp` call. `on_stop`: Fired when the application stops. `on_pause`: Fired when the application is paused by the OS. `on_resume`: Fired when the application is resumed from pause by the OS, beware, you have no garantee that this event will be fired after the on_pause event has been called. :Parameters: `kv_directory`: <path>, default to None If a kv_directory is set, it will be used to get the initial kv file. By default, the file is searched in the same directory as the current App definition file. `kv_file`: <filename>, default to None If a kv_file is set, it will be loaded when the application start. The loading of the "default" kv will be avoided. .. versionchanged:: 1.7.0 Parameter `kv_file` added. ''' title = None '''.. versionadded:: 1.0.5 Title of your application. You can set by doing:: class MyApp(App): title = 'Custom title' ''' icon = None '''.. versionadded:: 1.0.5 Icon of your application. You can set by doing:: class MyApp(App): icon = 'customicon.png' The icon can be located in the same directory as your main file. ''' use_kivy_settings = True '''.. versionadded:: 1.0.7 If True, the application settings will include also the Kivy settings. If you don't want the user to change any kivy settings from your settings UI, change this to False. ''' settings_cls = ObjectProperty(SettingsWithSpinner) '''.. versionadded:: 1.8.0 The class to use to construct the settings panel, used to construct the instance passed to :meth:`build_config`. You should use either :class:`~kivy.uix.settings.Settings`, or one of the provided subclasses with different layouts (:class:`~kivy.uix.settings.SettingsWithSidebar`, :class:`~kivy.uix.settings.SettingsWithSpinner`, :class:`~kivy.uix.settings.SettingsWithTabbedPanel`, :class:`~kivy.uix.settings.SettingsWithNoMenu`), or your own Settings subclass. See the documentation of :mod:`kivy.uix.Settings` for more information. :attr:`~App.settings_cls` is an :class:`~kivy.properties.ObjectProperty`. it defaults to :class:`~kivy.uix.settings.SettingsWithSpinner`, which displays settings panels with a spinner to switch between them. ''' # Return the current running App instance _running_app = None __events__ = ('on_start', 'on_stop', 'on_pause', 'on_resume') def __init__(self, **kwargs): App._running_app = self self._app_directory = None self._app_name = None self._app_settings = None self._app_window = None super(App, self).__init__(**kwargs) self.built = False #: Options passed to the __init__ of the App self.options = kwargs #: Instance to the :class:`~kivy.config.ConfigParser` of the application #: configuration. Can be used to query some config token in the build() self.config = None #: Root widget set by the :meth:`build` method or by the #: :meth:`load_kv` method if the kv file contains a root widget. self.root = None def build(self): '''Initializes the application; will be called only once. If this method returns a widget (tree), it will be used as the root widget and added to the window. :return: None or a root :class:`~kivy.uix.widget.Widget` instance is no self.root exist. ''' if not self.root: return Widget() def build_config(self, config): '''.. versionadded:: 1.0.7 This method is called before the application is initialized to construct your :class:`~kivy.config.ConfigParser` object. This is where you can put any default section / key / value for your config. If anything is set, the configuration will be automatically saved in the file returned by :meth:`get_application_config`. :param config: Use this to add defaults section / key / value :type config: :class:`~kivy.config.ConfigParser` ''' def build_settings(self, settings): '''.. versionadded:: 1.0.7 This method is called when the user (or you) want to show the application settings. This will be called only once, the first time when the user will show the settings. You can use this method to add settings panels and to customise the settings widget, e.g. by changing the sidebar width. See the module documentation for full details. :param settings: Settings instance for adding panels :type settings: :class:`~kivy.uix.settings.Settings` ''' def load_kv(self, filename=None): '''This method is invoked the first time the app is being run if no widget tree has been constructed before for this app. This method then looks for a matching kv file in the same directory as the file that contains the application class. For example, if you have a file named main.py that contains:: class ShowcaseApp(App): pass This method will search for a file named `showcase.kv` in the directory that contains main.py. The name of the kv file has to be the lowercase name of the class, without the 'App' postfix at the end if it exists. You can define rules and a root widget in your kv file:: <ClassName>: # this is a rule ... ClassName: # this is a root widget ... There must be only one root widget. See the :doc:`api-kivy.lang` documentation for more information on how to create kv files. If your kv file contains a root widget, it will be used as self.root, the root widget for the application. ''' # Detect filename automatically if it was not specified. if filename: filename = resource_find(filename) else: try: default_kv_directory = dirname(getfile(self.__class__)) if default_kv_directory == '': default_kv_directory = '.' except TypeError: # if it's a builtin module.. use the current dir. default_kv_directory = '.' kv_directory = self.options.get('kv_directory', default_kv_directory) clsname = self.__class__.__name__.lower() if clsname.endswith('app') and \ not isfile(join(kv_directory, '%s.kv' % clsname)): clsname = clsname[:-3] filename = join(kv_directory, '%s.kv' % clsname) # Load KV file Logger.debug('App: Loading kv <{0}>'.format(filename)) if not exists(filename): Logger.debug('App: kv <%s> not found' % filename) return False root = Builder.load_file(filename) if root: self.root = root return True def get_application_name(self): '''Return the name of the application. ''' if self.title is not None: return self.title clsname = self.__class__.__name__ if clsname.endswith('App'): clsname = clsname[:-3] return clsname def get_application_icon(self): '''Return the icon of the application. ''' if not resource_find(self.icon): return '' else: return resource_find(self.icon) def get_application_config(self, defaultpath='%(appdir)s/%(appname)s.ini'): '''.. versionadded:: 1.0.7 .. versionchanged:: 1.4.0 Customize the default path for iOS and Android platform. Add defaultpath parameter for desktop computer (not applicatable for iOS and Android.) Return the filename of your application configuration. Depending the platform, the application file will be stored at different places: - on iOS: <appdir>/Documents/.<appname>.ini - on Android: /sdcard/.<appname>.ini - otherwise: <appdir>/<appname>.ini When you are distributing your application on Desktop, please note than if the application is meant to be installed system-wise, then the user might not have any write-access to the application directory. You could overload this method to change the default behavior, and save the configuration file in the user directory by default:: class TestApp(App): def get_application_config(self): return super(TestApp, self).get_application_config( '~/.%(appname)s.ini') Some notes: - The tilda '~' will be expanded to the user directory. - %(appdir)s will be replaced with the application :data:`directory` - %(appname)s will be replaced with the application :data:`name` ''' if platform == 'android': defaultpath = '/sdcard/.%(appname)s.ini' elif platform == 'ios': defaultpath = '~/Documents/%(appname)s.ini' elif platform == 'win': defaultpath = defaultpath.replace('/', sep) return expanduser(defaultpath) % { 'appname': self.name, 'appdir': self.directory} def load_config(self): '''(internal) This function is used for returning a ConfigParser with the application configuration. It's doing 3 things: #. Create an instance of a ConfigParser #. Load the default configuration by calling :meth:`build_config`, then #. If exist, load the application configuration file, or create it if it's not existing. :return: ConfigParser instance ''' self.config = config = ConfigParser() self.build_config(config) # if no sections are created, that's mean the user don't have # configuration. if len(config.sections()) == 0: return # ok, the user have some sections, read the default file if exist # or write it ! filename = self.get_application_config() if filename is None: return config Logger.debug('App: Loading configuration <{0}>'.format(filename)) if exists(filename): try: config.read(filename) except: Logger.error('App: Corrupted config file, ignored.') self.config = config = ConfigParser() self.build_config(config) pass else: Logger.debug('App: First configuration, create <{0}>'.format( filename)) config.filename = filename config.write() return config @property def directory(self): '''.. versionadded:: 1.0.7 Return the directory where the application live ''' if self._app_directory is None: try: self._app_directory = dirname(getfile(self.__class__)) if self._app_directory == '': self._app_directory = '.' except TypeError: # if it's a builtin module.. use the current dir. self._app_directory = '.' return self._app_directory @property def user_data_dir(self): ''' .. versionadded:: 1.7.0 Returns the path to a directory in the users files system, which the application can use to store additional data. Different platforms have different conventions for where to save user data like preferences, saved games, and settings. This function implements those conventions. On iOS `~/Documents<app_name>` is returned (which is inside the apps sandbox). On Android `/sdcard/<app_name>` is returned. On Windows `%APPDATA%/<app_name>` is returned. On Mac OS X `~/Library/Application Support <app_name>` is returned. On Linux, `$XDG_CONFIG_HOME/<app_name>` is returned. ''' data_dir = "" if platform == 'ios': data_dir = join('~/Documents', self.name) elif platform == 'android': data_dir = join('/sdcard', self.name) elif platform == 'win': data_dir = os.path.join(os.environ['APPDATA'], self.name) elif platform == 'macosx': data_dir = '~/Library/Application Support/{}'.format(self.name) else: # _platform == 'linux' or anything else...: data_dir = os.environ.get('XDG_CONFIG_HOME', '~/.config') data_dir = join(data_dir, self.name) data_dir = expanduser(data_dir) if not exists(data_dir): os.mkdir(data_dir) return data_dir @property def name(self): '''.. versionadded:: 1.0.7 Return the name of the application, based on the class name ''' if self._app_name is None: clsname = self.__class__.__name__ if clsname.endswith('App'): clsname = clsname[:-3] self._app_name = clsname.lower() return self._app_name def run(self): '''Launches the app in standalone mode. ''' if not self.built: self.load_config() self.load_kv(filename=self.options.get('kv_file')) root = self.build() if root: self.root = root if self.root: from kivy.core.window import Window Window.add_widget(self.root) # Check if the window is already created from kivy.base import EventLoop window = EventLoop.window if window: self._app_window = window window.set_title(self.get_application_name()) icon = self.get_application_icon() if icon: window.set_icon(icon) self._install_settings_keys(window) else: Logger.critical("Application: No window is created." " Terminating application run.") return self.dispatch('on_start') runTouchApp() self.stop() def stop(self, *largs): '''Stop the application. If you use this method, the whole application will stop by issuing a call to :func:`~kivy.base.stopTouchApp`. ''' self.dispatch('on_stop') stopTouchApp() # Clear the window children for child in self._app_window.children: self._app_window.remove_widget(child) def on_start(self): '''Event handler for the on_start event, which is fired after initialization (after build() has been called), and before the application is being run. ''' pass def on_stop(self): '''Event handler for the on_stop event, which is fired when the application has finished running (e.g. the window is about to be closed). ''' pass def on_pause(self): '''Event handler called when pause mode is asked. You must return True if you can go to the Pause mode. Otherwise, return False, and your application will be stopped. You cannot control when the application is going to this mode. It's mostly used for embed devices (android/ios), and for resizing. Default is False. .. versionadded:: 1.1.0 ''' return False def on_resume(self): '''Event handler called when your application is resuming from the Pause mode. .. versionadded:: 1.1.0 .. warning:: When resuming, OpenGL Context might have been damaged / freed. This is where you should reconstruct some of your OpenGL state, like FBO content. ''' pass @staticmethod def get_running_app(): '''Return the current runned application instance. .. versionadded:: 1.1.0 ''' return App._running_app def on_config_change(self, config, section, key, value): '''Event handler fired when one configuration token have been changed by the settings page. ''' pass def open_settings(self, *largs): '''Open the application settings panel. It will be created the very first time. The settings panel will be displayed with the :meth:`display_settings` method, which by default adds the settings panel to the Window attached to your application. You should override that method if you want to display the settings panel differently. :return: True if the settings have been opened ''' if self._app_settings is None: self._app_settings = self.create_settings() displayed = self.display_settings(self._app_settings) if displayed: return True return False def display_settings(self, settings): '''.. versionadded:: 1.8.0 Display the settings panel. By default, the panel is drawn directly on top of the window. You can define other behaviour by overriding this method, such as adding it to a ScreenManager or Popup. You should return True if the display is successful, otherwise False. :param settings: A :class:`~kivy.uix.settings.Settings` instance. You should define how to display it. :type config: :class:`~kivy.uix.settings.Settings` ''' win = self._app_window if not win: raise Exception('No windows are set on the application, you cannot' ' open settings yet.') if settings not in win.children: win.add_widget(settings) return True return False def close_settings(self, *largs): '''Close the previously opened settings panel. :return: True if the settings have been closed ''' win = self._app_window settings = self._app_settings if win is None or settings is None: return if settings in win.children: win.remove_widget(settings) return True return False def create_settings(self): '''Create the settings panel. This method is called only one time per application life-time, and the result is cached internally. By default, it will build a setting panel according to :data:`settings_cls`, call :meth:`build_settings`, add the Kivy panel if :data:`use_kivy_settings` is True, and bind to on_close/on_config_change. If you want to plug your own way of doing settings, without Kivy panel or close/config change events, this is the method you want to overload. .. versionadded:: 1.8.0 ''' s = self.settings_cls() self.build_settings(s) if self.use_kivy_settings: s.add_kivy_panel() s.bind(on_close=self.close_settings, on_config_change=self._on_config_change) return s def destroy_settings(self): '''.. versionadded:: 1.8.0 Dereferences the current settings panel, if one exists. This means that when :meth:`App.open_settings` is next run, a new panel will be created and displayed. It doesn't affect any of the contents of the panel, but lets you (for instance) refresh the settings panel layout if you have changed the settings widget in response to a screen size change. If you have modified :meth:`~App.open_settings` or :meth:`~App.display_settings`, you should be careful to correctly detect if the previous settings widget has been destroyed. ''' if self._app_settings is not None: self._app_settings = None def _on_config_change(self, *largs): self.on_config_change(*largs[1:]) def _install_settings_keys(self, window): window.bind(on_keyboard=self._on_keyboard_settings) def _on_keyboard_settings(self, window, *largs): key = largs[0] setting_key = 282 # F1 # android hack, if settings key is pygame K_MENU if platform == 'android': import pygame setting_key = pygame.K_MENU if key == setting_key: # toggle settings panel if not self.open_settings(): self.close_settings() return True if key == 27: return self.close_settings()
unknown
codeparrot/codeparrot-clean
from sys import argv from sys import stdout from sys import stderr import logging from argparse import ArgumentParser parser = ArgumentParser("Sorting Particles by the halo label", description= """ Sorting particles by the Halo Label. Particles not in any halo is put to the end. The first halo offsets at 0. """, epilog= """ This script is written by Yu Feng, as part of `nbodykit'. """ ) parser.add_argument("snapfilename", help='basename of the snapshot, only runpb format is supported in this script') parser.add_argument("halolabel", help='basename of the halo label files, only nbodykit format is supported in this script') parser.add_argument("output", help='write output to this file') ns = parser.parse_args() logging.basicConfig(level=logging.DEBUG) import numpy import nbodykit from nbodykit import files from nbodykit import halos from nbodykit.corrfrompower import corrfrompower from kdcount import correlate import mpsort from mpi4py import MPI def main(): comm = MPI.COMM_WORLD if comm.rank == 0: snapfile = files.Snapshot(ns.snapfilename, files.TPMSnapshotFile) labelfile = files.Snapshot(ns.halolabel, files.HaloLabelFile) npart = snapfile.npart output = files.Snapshot.create(ns.output, files.TPMSnapshotFile, npart) comm.bcast((snapfile, labelfile, output)) else: snapfile, labelfile, output = comm.bcast(None) comm.barrier() Ntot = sum(snapfile.npart) mystart = Ntot * comm.rank // comm.size myend = Ntot * (comm.rank + 1) // comm.size for field in ['Position', 'Velocity', 'ID']: content = snapfile.read(field, mystart, myend) if len(content.shape) == 1: dtype = numpy.dtype(content.dtype) else: dtype = numpy.dtype((content.dtype, content.shape[1:])) data = numpy.empty(myend - mystart, dtype=[ ('Label', 'u8'), ('content', dtype), ]) data['content'] = content content = None data['Label'] = labelfile.read('Label', mystart, myend) nonhalo = data['Label'] == 0 data['Label'][nonhalo] = numpy.iinfo('u8').max mpsort.sort(data, orderby='Label') output.write(field, mystart, data['content']) main()
unknown
codeparrot/codeparrot-clean
# encoding: utf-8 # module PyQt4.QtGui # from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so # by generator 1.135 # no doc # imports import PyQt4.QtCore as __PyQt4_QtCore class QMatrix4x3(): # skipped bases: <class 'sip.simplewrapper'> """ QMatrix4x3() QMatrix4x3(QMatrix4x3) QMatrix4x3(sequence-of-float) """ def copyDataTo(self): # real signature unknown; restored from __doc__ """ QMatrix4x3.copyDataTo() -> list-of-float """ pass def data(self): # real signature unknown; restored from __doc__ """ QMatrix4x3.data() -> list-of-float """ pass def fill(self, p_float): # real signature unknown; restored from __doc__ """ QMatrix4x3.fill(float) """ pass def isIdentity(self): # real signature unknown; restored from __doc__ """ QMatrix4x3.isIdentity() -> bool """ return False def setToIdentity(self): # real signature unknown; restored from __doc__ """ QMatrix4x3.setToIdentity() """ pass def transposed(self): # real signature unknown; restored from __doc__ """ QMatrix4x3.transposed() -> QMatrix3x4 """ return QMatrix3x4 def __delitem__(self, *args, **kwargs): # real signature unknown """ Delete self[key]. """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __getitem__(self, *args, **kwargs): # real signature unknown """ Return self[key]. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __iadd__(self, *args, **kwargs): # real signature unknown """ Return self+=value. """ pass def __imul__(self, *args, **kwargs): # real signature unknown """ Return self*=value. """ pass def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads pass def __isub__(self, *args, **kwargs): # real signature unknown """ Return self-=value. """ pass def __itruediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __reduce__(self, *args, **kwargs): # real signature unknown pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __setitem__(self, *args, **kwargs): # real signature unknown """ Set self[key] to value. """ pass __weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """list of weak references to the object (if defined)""" __hash__ = None
unknown
codeparrot/codeparrot-clean
//===- bolt/Passes/CMOVConversion.cpp ------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the CMOV conversion pass. // //===----------------------------------------------------------------------===// #include "bolt/Passes/CMOVConversion.h" #include "bolt/Core/BinaryBasicBlock.h" #include "bolt/Core/BinaryContext.h" #include "bolt/Utils/CommandLineOpts.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #define DEBUG_TYPE "cmov" using namespace llvm; namespace opts { extern cl::OptionCategory BoltOptCategory; static cl::opt<int> BiasThreshold( "cmov-conversion-bias-threshold", cl::desc("minimum condition bias (pct) to perform a CMOV conversion, " "-1 to not account bias"), cl::ReallyHidden, cl::init(1), cl::cat(BoltOptCategory)); static cl::opt<int> MispredictionThreshold( "cmov-conversion-misprediction-threshold", cl::desc("minimum misprediction rate (pct) to perform a CMOV conversion, " "-1 to not account misprediction rate"), cl::ReallyHidden, cl::init(5), cl::cat(BoltOptCategory)); static cl::opt<bool> ConvertStackMemOperand( "cmov-conversion-convert-stack-mem-operand", cl::desc("convert moves with stack memory operand (potentially unsafe)"), cl::ReallyHidden, cl::init(false), cl::cat(BoltOptCategory)); static cl::opt<bool> ConvertBasePtrStackMemOperand( "cmov-conversion-convert-rbp-stack-mem-operand", cl::desc("convert moves with rbp stack memory operand (unsafe, must be off " "for binaries compiled with -fomit-frame-pointer)"), cl::ReallyHidden, cl::init(false), cl::cat(BoltOptCategory)); } // namespace opts namespace llvm { namespace bolt { // Return true if the CFG conforms to the following subgraph: // Predecessor // / \ // | RHS // \ / // LHS // Caller guarantees that LHS and RHS share the same predecessor. bool isIfThenSubgraph(const BinaryBasicBlock &LHS, const BinaryBasicBlock &RHS) { if (LHS.pred_size() != 2 || RHS.pred_size() != 1) return false; // Sanity check BinaryBasicBlock *Predecessor = *RHS.pred_begin(); assert(Predecessor && LHS.isPredecessor(Predecessor) && "invalid subgraph"); (void)Predecessor; if (!LHS.isPredecessor(&RHS)) return false; if (RHS.succ_size() != 1) return false; return true; } bool matchCFGSubgraph(BinaryBasicBlock &BB, BinaryBasicBlock *&ConditionalSucc, BinaryBasicBlock *&UnconditionalSucc, bool &IsConditionalTaken) { BinaryBasicBlock *TakenSucc = BB.getConditionalSuccessor(true); BinaryBasicBlock *FallthroughSucc = BB.getConditionalSuccessor(false); bool IsIfThenTaken = isIfThenSubgraph(*FallthroughSucc, *TakenSucc); bool IsIfThenFallthrough = isIfThenSubgraph(*TakenSucc, *FallthroughSucc); if (!IsIfThenFallthrough && !IsIfThenTaken) return false; assert((!IsIfThenFallthrough || !IsIfThenTaken) && "Invalid subgraph"); // Output parameters ConditionalSucc = IsIfThenTaken ? TakenSucc : FallthroughSucc; UnconditionalSucc = IsIfThenTaken ? FallthroughSucc : TakenSucc; IsConditionalTaken = IsIfThenTaken; return true; } // Return true if basic block instructions can be converted into cmov(s). bool canConvertInstructions(const BinaryContext &BC, const BinaryBasicBlock &BB, unsigned CC) { if (BB.empty()) return false; const MCInst *LastInst = BB.getLastNonPseudoInstr(); // Only pseudo instructions, can't be converted into CMOV if (LastInst == nullptr) return false; for (const MCInst &Inst : BB) { if (BC.MIB->isPseudo(Inst)) continue; // Unconditional branch as a last instruction is OK if (&Inst == LastInst && BC.MIB->isUnconditionalBranch(Inst)) continue; MCInst Cmov(Inst); // GPR move is OK if (!BC.MIB->convertMoveToConditionalMove( Cmov, CC, opts::ConvertStackMemOperand, opts::ConvertBasePtrStackMemOperand)) { LLVM_DEBUG({ dbgs() << BB.getName() << ": can't convert instruction "; BC.printInstruction(dbgs(), Cmov); }); return false; } } return true; } void convertMoves(const BinaryContext &BC, BinaryBasicBlock &BB, unsigned CC) { for (auto II = BB.begin(), IE = BB.end(); II != IE; ++II) { if (BC.MIB->isPseudo(*II)) continue; if (BC.MIB->isUnconditionalBranch(*II)) { // XXX: this invalidates II but we return immediately BB.eraseInstruction(II); return; } bool Result = BC.MIB->convertMoveToConditionalMove( *II, CC, opts::ConvertStackMemOperand, opts::ConvertBasePtrStackMemOperand); assert(Result && "unexpected instruction"); (void)Result; } } // Returns misprediction rate if the profile data is available, -1 otherwise. std::pair<int, uint64_t> calculateMispredictionRate(const BinaryBasicBlock &BB) { uint64_t TotalExecCount = 0; uint64_t TotalMispredictionCount = 0; for (auto BI : BB.branch_info()) { TotalExecCount += BI.Count; if (BI.MispredictedCount != BinaryBasicBlock::COUNT_INFERRED) TotalMispredictionCount += BI.MispredictedCount; } if (!TotalExecCount) return {-1, TotalMispredictionCount}; return {100.0f * TotalMispredictionCount / TotalExecCount, TotalMispredictionCount}; } // Returns conditional succ bias if the profile is available, -1 otherwise. int calculateConditionBias(const BinaryBasicBlock &BB, const BinaryBasicBlock &ConditionalSucc) { if (auto BranchStats = BB.getBranchStats(&ConditionalSucc)) return BranchStats->first; return -1; } void CMOVConversion::Stats::dumpTo(raw_ostream &OS) { OS << "converted static " << StaticPerformed << "/" << StaticPossible << formatv(" ({0:P}) ", getStaticRatio()) << "hammock(s) into CMOV sequences, with dynamic execution count " << DynamicPerformed << "/" << DynamicPossible << formatv(" ({0:P}), ", getDynamicRatio()) << "saving " << RemovedMP << "/" << PossibleMP << formatv(" ({0:P}) ", getMPRatio()) << "mispredictions\n"; } void CMOVConversion::runOnFunction(BinaryFunction &Function) { BinaryContext &BC = Function.getBinaryContext(); bool Modified = false; // Function-local stats Stats Local; // Traverse blocks in RPO, merging block with a converted cmov with its // successor. for (BinaryBasicBlock *BB : post_order(&Function)) { uint64_t BBExecCount = BB->getKnownExecutionCount(); if (BB->empty() || // The block must have instructions BBExecCount == 0 || // must be hot BB->succ_size() != 2 || // with two successors BB->hasJumpTable()) // no jump table continue; assert(BB->isValid() && "traversal internal error"); // Check branch instruction auto BranchInstrIter = BB->getLastNonPseudo(); if (BranchInstrIter == BB->rend() || !BC.MIB->isConditionalBranch(*BranchInstrIter)) continue; // Check successors BinaryBasicBlock *ConditionalSucc, *UnconditionalSucc; bool IsConditionalTaken; if (!matchCFGSubgraph(*BB, ConditionalSucc, UnconditionalSucc, IsConditionalTaken)) { LLVM_DEBUG(dbgs() << BB->getName() << ": couldn't match hammock\n"); continue; } unsigned CC = BC.MIB->getCondCode(*BranchInstrIter); if (!IsConditionalTaken) CC = BC.MIB->getInvertedCondCode(CC); // Check contents of the conditional block if (!canConvertInstructions(BC, *ConditionalSucc, CC)) continue; int ConditionBias = calculateConditionBias(*BB, *ConditionalSucc); int MispredictionRate = 0; uint64_t MispredictionCount = 0; std::tie(MispredictionRate, MispredictionCount) = calculateMispredictionRate(*BB); Local.StaticPossible++; Local.DynamicPossible += BBExecCount; Local.PossibleMP += MispredictionCount; // If the conditional successor is never executed, don't convert it if (ConditionBias < opts::BiasThreshold) { LLVM_DEBUG(dbgs() << BB->getName() << "->" << ConditionalSucc->getName() << " bias = " << ConditionBias << ", less than threshold " << opts::BiasThreshold << '\n'); continue; } // Check the misprediction rate of a branch if (MispredictionRate < opts::MispredictionThreshold) { LLVM_DEBUG(dbgs() << BB->getName() << " misprediction rate = " << MispredictionRate << ", less than threshold " << opts::MispredictionThreshold << '\n'); continue; } // remove conditional branch BB->eraseInstruction(std::prev(BranchInstrIter.base())); BB->removeAllSuccessors(); // Convert instructions from the conditional successor into cmov's in BB. convertMoves(BC, *ConditionalSucc, CC); BB->addInstructions(ConditionalSucc->begin(), ConditionalSucc->end()); ConditionalSucc->markValid(false); // RPO traversal guarantees that the successor is visited and merged if // necessary. Merge the unconditional successor into the current block. BB->addInstructions(UnconditionalSucc->begin(), UnconditionalSucc->end()); UnconditionalSucc->moveAllSuccessorsTo(BB); UnconditionalSucc->markValid(false); Local.StaticPerformed++; Local.DynamicPerformed += BBExecCount; Local.RemovedMP += MispredictionCount; Modified = true; } if (Modified) Function.eraseInvalidBBs(); if (opts::Verbosity > 1) { BC.outs() << "BOLT-INFO: CMOVConversion: " << Function << ", "; Local.dumpTo(BC.outs()); } Global = Global + Local; } Error CMOVConversion::runOnFunctions(BinaryContext &BC) { for (auto &It : BC.getBinaryFunctions()) { BinaryFunction &Function = It.second; if (!shouldOptimize(Function)) continue; runOnFunction(Function); } BC.outs() << "BOLT-INFO: CMOVConversion total: "; Global.dumpTo(BC.outs()); return Error::success(); } } // end namespace bolt } // end namespace llvm
cpp
github
https://github.com/llvm/llvm-project
bolt/lib/Passes/CMOVConversion.cpp
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2003, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// //----------------------------------------------------------------------------- // // class ChromaticitiesAttribute // //----------------------------------------------------------------------------- #include <ImfChromaticitiesAttribute.h> OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_ENTER using namespace OPENEXR_IMF_INTERNAL_NAMESPACE; template <> const char * ChromaticitiesAttribute::staticTypeName () { return "chromaticities"; } template <> void ChromaticitiesAttribute::writeValueTo (OPENEXR_IMF_INTERNAL_NAMESPACE::OStream &os, int version) const { Xdr::write <StreamIO> (os, _value.red.x); Xdr::write <StreamIO> (os, _value.red.y); Xdr::write <StreamIO> (os, _value.green.x); Xdr::write <StreamIO> (os, _value.green.y); Xdr::write <StreamIO> (os, _value.blue.x); Xdr::write <StreamIO> (os, _value.blue.y); Xdr::write <StreamIO> (os, _value.white.x); Xdr::write <StreamIO> (os, _value.white.y); } template <> void ChromaticitiesAttribute::readValueFrom (OPENEXR_IMF_INTERNAL_NAMESPACE::IStream &is, int size, int version) { Xdr::read <StreamIO> (is, _value.red.x); Xdr::read <StreamIO> (is, _value.red.y); Xdr::read <StreamIO> (is, _value.green.x); Xdr::read <StreamIO> (is, _value.green.y); Xdr::read <StreamIO> (is, _value.blue.x); Xdr::read <StreamIO> (is, _value.blue.y); Xdr::read <StreamIO> (is, _value.white.x); Xdr::read <StreamIO> (is, _value.white.y); } OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_EXIT
cpp
github
https://github.com/opencv/opencv
3rdparty/openexr/IlmImf/ImfChromaticitiesAttribute.cpp
"""Support for binary sensor using the PiFace Digital I/O module on a RPi.""" import voluptuous as vol from homeassistant.components import rpi_pfio from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME import homeassistant.helpers.config_validation as cv CONF_INVERT_LOGIC = "invert_logic" CONF_PORTS = "ports" CONF_SETTLE_TIME = "settle_time" DEFAULT_INVERT_LOGIC = False DEFAULT_SETTLE_TIME = 20 PORT_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_SETTLE_TIME, default=DEFAULT_SETTLE_TIME): cv.positive_int, vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean, } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_PORTS, default={}): vol.Schema({cv.positive_int: PORT_SCHEMA})} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PiFace Digital Input devices.""" binary_sensors = [] ports = config.get(CONF_PORTS) for port, port_entity in ports.items(): name = port_entity.get(CONF_NAME) settle_time = port_entity[CONF_SETTLE_TIME] / 1000 invert_logic = port_entity[CONF_INVERT_LOGIC] binary_sensors.append( RPiPFIOBinarySensor(hass, port, name, settle_time, invert_logic) ) add_entities(binary_sensors, True) rpi_pfio.activate_listener(hass) class RPiPFIOBinarySensor(BinarySensorEntity): """Represent a binary sensor that a PiFace Digital Input.""" def __init__(self, hass, port, name, settle_time, invert_logic): """Initialize the RPi binary sensor.""" self._port = port self._name = name or DEVICE_DEFAULT_NAME self._invert_logic = invert_logic self._state = None def read_pfio(port): """Read state from PFIO.""" self._state = rpi_pfio.read_input(self._port) self.schedule_update_ha_state() rpi_pfio.edge_detect(hass, self._port, read_pfio, settle_time) @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return the state of the entity.""" return self._state != self._invert_logic def update(self): """Update the PFIO state.""" self._state = rpi_pfio.read_input(self._port)
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_filelist_paths = { } generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True toplevel = params['options'].toplevel_dir generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( toplevel, generator_dir, output_dir, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) try: filepath = params['generator_flags']['output_dir'] except KeyError: filepath = '.' filename = os.path.join(filepath, 'dump.json') f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
unknown
codeparrot/codeparrot-clean
/*! * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.dev/license */ import {RelativeLink} from './relative-link.pipe'; describe('RelativeLink', () => { let pipe: RelativeLink; beforeEach(() => { pipe = new RelativeLink(); }); it('should transform absolute url to relative', () => { const absoluteUrl = 'https://angular.dev/guide/directives#test'; const result = pipe.transform(absoluteUrl); expect(result).toBe('guide/directives#test'); }); it('should return fragment once result param is equal to `hash`', () => { const absoluteUrl = 'https://angular.dev/guide/directives#test'; const result = pipe.transform(absoluteUrl, 'hash'); expect(result).toBe('test'); }); it('should return relative url without fragment once result param is equal to `pathname`', () => { const absoluteUrl = 'https://angular.dev/guide/directives#test'; const result = pipe.transform(absoluteUrl, 'pathname'); expect(result).toBe('guide/directives'); }); });
typescript
github
https://github.com/angular/angular
adev/shared-docs/pipes/relative-link.pipe.spec.ts
# ----------------------------------------------------------------------------- # Copyright 2015 Esri # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----------------------------------------------------------------------------- # ================================================== # RLOS.py # -------------------------------------------------- # requirments: ArcGIS X.X, Python 2.7 or Python 3.4 # author: ArcGIS Solutions # contact: # company: Esri # ================================================== # description: Radial Line of Sight (RLOS) for viewshed analysis # ================================================== # history: # 4/1/2015 - mf - update for coding standards # 4/23/2015 - mf - Updates for Extent properties # ================================================== import os import sys import traceback import math import decimal import arcpy from arcpy import env, sa observers = arcpy.GetParameterAsText(0) input_surface = arcpy.GetParameterAsText(1) output_rlos = arcpy.GetParameterAsText(2) RADIUS2_to_infinity = arcpy.GetParameterAsText(3) GCS_WGS_1984 = arcpy.GetParameterAsText(4) if RADIUS2_to_infinity is True: arcpy.AddMessage("RLOS to infinity will use horizon for calculation.") RADIUS2_to_infinity = True else: arcpy.AddMessage("RLOS will use local RADIUS2 values for calculation.") RADIUS2_to_infinity = False delete_me = [] terrestrial_refractivity_coefficient = 0.13 polygon_simplify = "SIMPLIFY" debug = True def maxVizModifiers(obs): ''' Finds maximums in the visibility modifiers''' maxVizMods = {} radius2Max = 0.0 offsetMax = 0.0 spotMax = None removeSPOTMVM = False isSPOTPresent = False fieldList = arcpy.ListFields(obs) # check if SPOT is in the fields for f in fieldList: if f.name == "SPOT": isSPOTPresent = True rowsMVM = arcpy.SearchCursor(obs) # find largest RADIUS2 and OFFSETA for rowMVM in rowsMVM: if rowMVM.RADIUS2 > radius2Max: radius2Max = rowMVM.RADIUS2 if rowMVM.OFFSETA > offsetMax: offsetMax = rowMVM.OFFSETA # if the SPOT field is present, continue if isSPOTPresent == True: # get the SPOT value spotValue = row.SPOT # if the SPOT value is not None/Null if spotValue != None: # if the SPOT value is greater than the previous large SPOT value if rowMVM.SPOT > spotMax: spotMax = rowMVM.SPOT else: removeSPOTMVM = True # if any of the SPOT values are None, remove the field del rowMVM del rowsMVM maxVizMods = {'SPOT':spotMax, 'OFFSETA':offsetMax, 'RADIUS2':radius2Max, 'REMOVE_SPOT':removeSPOTMVM} #arcpy.AddMessage("Observer modifier maximums: " + str(maxVizMods)) return maxVizMods def zfactor(dataset): ''' estimates a conversion between xy-plane and z coordinate ''' desc = arcpy.Describe(dataset) # if it's not geographic return 1.0 if desc.spatialReference.type != "Geographic": return 1.0 extent = desc.Extent extent_split = [extent.XMin, extent.YMin, extent.XMax, extent.YMax] top = float(extent_split[3]) bottom = float(extent_split[1]) #find the mid-latitude of the dataset if top > bottom: height = top - bottom mid = (height/2) + bottom elif top < bottom: # Unlikely, but just in case height = bottom - top mid = (height/2) + top else: # top == bottom mid = top # convert degrees to radians mid = math.radians(mid) # Find length of degree at equator based on spheroid's semi-major axis spatial_reference = desc.SpatialReference semi_major_axis = spatial_reference.semiMajorAxis # in meters equatorial_length_of_degree = ((2.0 * math.pi * float(semi_major_axis))/360.0) # function: # Z-Factor = 1.0/(111320 * cos(mid-latitude in radians)) decimal.getcontext().prec = 28 decimal.getcontext().rounding = decimal.ROUND_UP a = decimal.Decimal("1.0") # number of meters in one degree at equator (approximate using WGS84) #b = decimal.Decimal("111320.0") b = decimal.Decimal(str(equatorial_length_of_degree)) c = decimal.Decimal(str(math.cos(mid))) zf = a/(b * c) zf = "%06f" % (zf.__abs__()) return zf def main(): '''Main RLOS''' try: # get/set initial environment env.overwriteOutput = True installInfo = arcpy.GetInstallInfo("desktop") # get observer's vibility modifier maximums obsMaximums = maxVizModifiers(observers) removeSPOT = obsMaximums['REMOVE_SPOT'] if removeSPOT is True: arcpy.AddMessage("Observer SPOT is <NULL>, deleteing field ...") arcpy.DeleteField_management(observers, "SPOT") # Do a Minimum Bounding Geometry (MBG) on the input observers observers_mbg = os.path.join(env.scratchWorkspace, "observers_mbg") delete_me.append(observers_mbg) arcpy.AddMessage("Finding observer's minimum bounding envelope ...") # ENVELOPE would be better but would make it ArcInfo-only. arcpy.MinimumBoundingGeometry_management(observers, observers_mbg, "RECTANGLE_BY_AREA") # Now find the center of the (MBG) arcpy.AddMessage("Finding center of observers ...") mbgCenterPoint = os.path.join(env.scratchWorkspace, "mbgCenterPoint") mbgExtent = arcpy.Describe(observers_mbg).extent mbgSR = arcpy.Describe(observers_mbg).spatialReference mbgCenterX = mbgExtent.XMin + (mbgExtent.XMax - mbgExtent.XMin) mbgCenterY = mbgExtent.YMin + (mbgExtent.YMax - mbgExtent.YMin) arcpy.CreateFeatureclass_management(os.path.dirname(mbgCenterPoint), os.path.basename(mbgCenterPoint), "POINT", "#", "DISABLED", "DISABLED", mbgSR) mbgShapeFieldName = arcpy.Describe(mbgCenterPoint).ShapeFieldName rows = arcpy.InsertCursor(mbgCenterPoint) feat = rows.newRow() feat.setValue(mbgShapeFieldName, arcpy.Point(mbgCenterX, mbgCenterY)) rows.insertRow(feat) del rows delete_me.append(mbgCenterPoint) # Get the maximum radius of the observers maxRad = obsMaximums['RADIUS2'] maxOffset = obsMaximums['OFFSETA'] horizonDistance = 0.0 z_factor = float(zfactor(observers)) if RADIUS2_to_infinity is True: ''' if going to infinity what we really need is the distance to the horizon based on height/elevation''' arcpy.AddMessage("Finding horizon distance ...") result = arcpy.GetCellValue_management(input_surface, str(mbgCenterX) + " " + str(mbgCenterY)) centroid_elev = result.getOutput(0) R2 = float(centroid_elev) + float(maxOffset) # length, in meters, of semimajor axis of WGS_1984 spheroid. R = 6378137.0 horizonDistance = math.sqrt(math.pow((R + R2), 2) - math.pow(R, 2)) arcpy.AddMessage(str(horizonDistance) + " meters.") horizonExtent = (str(mbgCenterX - horizonDistance) + " " + str(mbgCenterY - horizonDistance) + " " + str(mbgCenterX + horizonDistance) + " " + str(mbgCenterY + horizonDistance)) # since we are doing infinity we can drop the RADIUS2 field arcpy.AddMessage("Analysis to edge of surface, dropping RADIUS2 field ...") arcpy.DeleteField_management(observers, "RADIUS2") else: pass # reset center of AZED using Lat/Lon of MBG center point # Project point to WGS 84 arcpy.AddMessage("Recentering Azimuthal Equidistant to centroid ...") mbgCenterWGS84 = os.path.join(env.scratchWorkspace, "mbgCenterWGS84") arcpy.Project_management(mbgCenterPoint, mbgCenterWGS84, GCS_WGS_1984) arcpy.AddXY_management(mbgCenterWGS84) pointx = 0.0 pointy = 0.0 shapeField = arcpy.Describe(mbgCenterWGS84).ShapeFieldName rows = arcpy.SearchCursor(mbgCenterWGS84) for row in rows: feat = row.getValue(shapeField) pnt = feat.getPart() pointx = pnt.X pointy = pnt.Y del row del rows # write new central meridian and latitude of origin... strAZED = '''PROJCS["World_Azimuthal_Equidistant", GEOGCS["GCS_WGS_1984", DATUM["D_WGS_1984", SPHEROID["WGS_1984",6378137.0,298.257223563]], PRIMEM["Greenwich",0.0], UNIT["Degree",0.0174532925199433]], PROJECTION["Azimuthal_Equidistant"], PARAMETER["False_Easting",0.0], PARAMETER["False_Northing",0.0], PARAMETER["Central_Meridian",' + str(pointx) + '], PARAMETER["Latitude_Of_Origin",' + str(pointy) + '], UNIT["Meter",1.0], AUTHORITY["ESRI",54032]]''' delete_me.append(mbgCenterWGS84) # Clip the input surface to the maximum visibilty range and extract # it to a 1000 x 1000 raster # if going to infinity then clip to horizion extent surf_extract = os.path.join(env.scratchWorkspace, "surf_extract") if RADIUS2_to_infinity is True: mbgBuffer = os.path.join(env.scratchWorkspace, "mbgBuffer") arcpy.Buffer_analysis(observers_mbg, mbgBuffer, horizonDistance) delete_me.append(mbgBuffer) surfaceSR = arcpy.Describe(input_surface).spatialReference mbgBufferPrj = os.path.join(env.scratchWorkspace, "mbgBufferPrj") arcpy.Project_management(mbgBuffer, mbgBufferPrj, surfaceSR) delete_me.append(mbgBufferPrj) mbgBufferPrjExtent = arcpy.Describe(mbgBufferPrj).extent cellSize = max(float(mbgBufferPrjExtent.width)/1000.0, float(mbgBufferPrjExtent.height)/1000.0) env.cellSize = cellSize arcpy.AddMessage("Clipping and resampling surface to analysis area with " + str(cellSize) + " meter cell size ...") arcpy.Clip_management(input_surface, "#", surf_extract, mbgBufferPrj) else: # buffer MBG by max RADIUS 2 + 10% mbgBuffer = os.path.join(env.scratchWorkspace, "mbgBuffer") arcpy.Buffer_analysis(observers_mbg, mbgBuffer, obsMaximums['RADIUS2']) delete_me.append(mbgBuffer) # project buffer to surface SR surfaceSR = arcpy.Describe(input_surface).spatialReference mbgBufferPrj = os.path.join(env.scratchWorkspace, "mbgBufferPrj") arcpy.Project_management(mbgBuffer, mbgBufferPrj, surfaceSR) delete_me.append(mbgBufferPrj) # clip surface to projected buffer arcpy.Clip_management(input_surface, "#", surf_extract, mbgBufferPrj) delete_me.append(surf_extract) # Project surface to the new AZED extract_prj = os.path.join(env.scratchWorkspace, "extract_prj") arcpy.AddMessage("Projecting surface ...") arcpy.ProjectRaster_management(surf_extract, extract_prj, strAZED) delete_me.append(extract_prj) # Project observers to the new AZED obs_prj = os.path.join(env.scratchWorkspace, "obs_prj") arcpy.AddMessage("Projecting observers ...") arcpy.Project_management(observers, obs_prj, strAZED) delete_me.append(obs_prj) # Project the MBG buffer to AZED obs_buf = os.path.join(env.scratchWorkspace, "obs_buf") # if RADIUS2_to_infinity == True: # arcpy.Buffer_analysis(obs_prj,obs_buf,horizonDistance) # else: # arcpy.Project_management(mbgBufferPrj,obs_buf,strAZED) arcpy.Project_management(mbgBufferPrj, obs_buf, strAZED) delete_me.append(obs_buf) # Finally ... run Viewshed arcpy.AddMessage("Calculating Viewshed ...") vshed = os.path.join(env.scratchWorkspace, "vshed") delete_me.append(vshed) outVshed = sa.Viewshed(extract_prj, obs_prj, 1.0, "CURVED_EARTH", terrestrial_refractivity_coefficient) outVshed.save(vshed) # Raster To Polygon arcpy.AddMessage("Converting to polygons ...") ras_poly = os.path.join(env.scratchWorkspace, "ras_poly") arcpy.RasterToPolygon_conversion(vshed, ras_poly, polygon_simplify) delete_me.append(ras_poly) # clip output polys to buffer if RADIUS2_to_infinity is not True: out_buf = os.path.join(env.scratchWorkspace, "out_buf") arcpy.Buffer_analysis(obs_prj, out_buf, "RADIUS2") delete_me.append(out_buf) arcpy.Clip_analysis(ras_poly, out_buf, output_rlos) else: arcpy.CopyFeatures_management(ras_poly, output_rlos) # set output arcpy.SetParameter(2, output_rlos) # cleanup arcpy.AddMessage("Removing scratch datasets:") for ds in delete_me: arcpy.AddMessage(str(ds)) arcpy.Delete_management(ds) except arcpy.ExecuteError: # Get the tool error messages msgs = arcpy.GetMessages() arcpy.AddError(msgs) # print msgs #UPDATE2to3 print(msgs) except: # Get the traceback object tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # Concatenate information together concerning the error into a # message string pymsg = ("PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])) msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n" # Return python error messages for use in script tool or Python Window arcpy.AddError(pymsg) arcpy.AddError(msgs) # Print Python error messages for use in Python / Python Window # print pymsg + "\n" #UPDATE2to3 print(pymsg + "\n") # print msgs #UPDATE2to3 print(msgs) # MAIN ============================================= if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
"use strict"; module.exports = [ [/Can't resolve '.\/dependency'/, /Did you mean 'dependency\.js'\?/] ];
javascript
github
https://github.com/webpack/webpack
test/cases/errors/mjs-non-fully-specified/errors.js
"""Implementation of ARFF parsers: via LIAC-ARFF and pandas.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import re from collections import OrderedDict from collections.abc import Generator from typing import List import numpy as np import scipy as sp from sklearn.externals import _arff from sklearn.externals._arff import ArffSparseDataType from sklearn.utils._chunking import chunk_generator, get_chunk_n_rows from sklearn.utils._optional_dependencies import check_pandas_support from sklearn.utils.fixes import pd_fillna def _split_sparse_columns( arff_data: ArffSparseDataType, include_columns: List ) -> ArffSparseDataType: """Obtains several columns from sparse ARFF representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]). Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument. """ arff_data_new: ArffSparseDataType = (list(), list(), list()) reindexed_columns = { column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) } for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: arff_data_new[0].append(val) arff_data_new[1].append(row_idx) arff_data_new[2].append(reindexed_columns[col_idx]) return arff_data_new def _sparse_data_to_array( arff_data: ArffSparseDataType, include_columns: List ) -> np.ndarray: # turns the sparse data back into an array (can't use toarray() function, # as this does only work on numeric data) num_obs = max(arff_data[1]) + 1 y_shape = (num_obs, len(include_columns)) reindexed_columns = { column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) } # TODO: improve for efficiency y = np.empty(y_shape, dtype=np.float64) for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: y[row_idx, reindexed_columns[col_idx]] = val return y def _post_process_frame(frame, feature_names, target_names): """Post process a dataframe to select the desired columns in `X` and `y`. Parameters ---------- frame : dataframe The dataframe to split into `X` and `y`. feature_names : list of str The list of feature names to populate `X`. target_names : list of str The list of target names to populate `y`. Returns ------- X : dataframe The dataframe containing the features. y : {series, dataframe} or None The series or dataframe containing the target. """ X = frame[feature_names] if len(target_names) >= 2: y = frame[target_names] elif len(target_names) == 1: y = frame[target_names[0]] else: y = None return X, y def _liac_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, ): """ARFF parser using the LIAC-ARFF library coded purely in Python. This parser is quite slow but consumes a generator. Currently it is needed to parse sparse datasets. For dense datasets, it is recommended to instead use the pandas-based parser, although it does not always handles the dtypes exactly the same. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ def _io_to_generator(gzip_file): for line in gzip_file: yield line.decode("utf-8") stream = _io_to_generator(gzip_file) # find which type (dense or sparse) ARFF type we will have to deal with return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN # we should not let LIAC-ARFF to encode the nominal attributes with NumPy # arrays to have only numerical values. encode_nominal = not (output_arrays_type == "pandas") arff_container = _arff.load( stream, return_type=return_type, encode_nominal=encode_nominal ) columns_to_select = feature_names_to_select + target_names_to_select categories = { name: cat for name, cat in arff_container["attributes"] if isinstance(cat, list) and name in columns_to_select } if output_arrays_type == "pandas": pd = check_pandas_support("fetch_openml with as_frame=True") columns_info = OrderedDict(arff_container["attributes"]) columns_names = list(columns_info.keys()) # calculate chunksize first_row = next(arff_container["data"]) first_df = pd.DataFrame([first_row], columns=columns_names, copy=False) row_bytes = first_df.memory_usage(deep=True).sum() chunksize = get_chunk_n_rows(row_bytes) # read arff data with chunks columns_to_keep = [col for col in columns_names if col in columns_to_select] dfs = [first_df[columns_to_keep]] for data in chunk_generator(arff_container["data"], chunksize): dfs.append( pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep] ) # dfs[0] contains only one row, which may not have enough data to infer to # column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0] if len(dfs) >= 2: dfs[0] = dfs[0].astype(dfs[1].dtypes) # liac-arff parser does not depend on NumPy and uses None to represent # missing values. To be consistent with the pandas parser, we replace # None with np.nan. frame = pd.concat(dfs, ignore_index=True) frame = pd_fillna(pd, frame) del dfs, first_df # cast the columns frame dtypes = {} for name in frame.columns: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use a pandas extension array instead of np.int64 to be able # to support missing values. dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" else: dtypes[name] = frame.dtypes[name] frame = frame.astype(dtypes) X, y = _post_process_frame( frame, feature_names_to_select, target_names_to_select ) else: arff_data = arff_container["data"] feature_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in feature_names_to_select ] target_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in target_names_to_select ] if isinstance(arff_data, Generator): if shape is None: raise ValueError( "shape must be provided when arr['data'] is a Generator" ) if shape[0] == -1: count = -1 else: count = shape[0] * shape[1] data = np.fromiter( itertools.chain.from_iterable(arff_data), dtype="float64", count=count, ) data = data.reshape(*shape) X = data[:, feature_indices_to_select] y = data[:, target_indices_to_select] elif isinstance(arff_data, tuple): arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select) num_obs = max(arff_data[1]) + 1 X_shape = (num_obs, len(feature_indices_to_select)) X = sp.sparse.coo_matrix( (arff_data_X[0], (arff_data_X[1], arff_data_X[2])), shape=X_shape, dtype=np.float64, ) X = X.tocsr() y = _sparse_data_to_array(arff_data, target_indices_to_select) else: # This should never happen raise ValueError( f"Unexpected type for data obtained from arff: {type(arff_data)}" ) is_classification = { col_name in categories for col_name in target_names_to_select } if not is_classification: # No target pass elif all(is_classification): y = np.hstack( [ np.take( np.asarray(categories.pop(col_name), dtype="O"), y[:, i : i + 1].astype(int, copy=False), ) for i, col_name in enumerate(target_names_to_select) ] ) elif any(is_classification): raise ValueError( "Mix of nominal and non-nominal targets is not currently supported" ) # reshape y back to 1-D array, if there is only 1 target column; # back to None if there are not target columns if y.shape[1] == 1: y = y.reshape((-1,)) elif y.shape[1] == 0: y = None if output_arrays_type == "pandas": return X, y, frame, None return X, y, None, categories def _pandas_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs=None, ): """ARFF parser using `pandas.read_csv`. This parser uses the metadata fetched directly from OpenML and skips the metadata headers of ARFF file itself. The data is loaded as a CSV file. Parameters ---------- gzip_file : GzipFile instance The GZip compressed file with the ARFF formatted payload. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities are: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected to build `X`. target_names_to_select : list of str A list of the target names to be selected to build `y`. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ import pandas as pd # read the file until the data section to skip the ARFF metadata headers for line in gzip_file: if line.decode("utf-8").lower().startswith("@data"): break dtypes = {} for name in openml_columns_info: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use Int64 to infer missing values from data # XXX: this line is not covered by our tests. Is this really needed? dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" # since we will not pass `names` when reading the ARFF file, we need to translate # `dtypes` from column names to column indices to pass to `pandas.read_csv` dtypes_positional = { col_idx: dtypes[name] for col_idx, name in enumerate(openml_columns_info) if name in dtypes } default_read_csv_kwargs = { "header": None, "index_col": False, # always force pandas to not use the first column as index "na_values": ["?"], # missing values are represented by `?` "keep_default_na": False, # only `?` is a missing value given the ARFF specs "comment": "%", # skip line starting by `%` since they are comments "quotechar": '"', # delimiter to use for quoted strings "skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs "escapechar": "\\", "dtype": dtypes_positional, } read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})} frame = pd.read_csv(gzip_file, **read_csv_kwargs) try: # Setting the columns while reading the file will select the N first columns # and not raise a ParserError. Instead, we set the columns after reading the # file and raise a ParserError if the number of columns does not match the # number of columns in the metadata given by OpenML. frame.columns = [name for name in openml_columns_info] except ValueError as exc: raise pd.errors.ParserError( "The number of columns provided by OpenML does not match the number of " "columns inferred by pandas when reading the file." ) from exc columns_to_select = feature_names_to_select + target_names_to_select columns_to_keep = [col for col in frame.columns if col in columns_to_select] frame = frame[columns_to_keep] # `pd.read_csv` automatically handles double quotes for quoting non-numeric # CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to # consider either single quotes and double quotes as valid quoting chars at # the same time since this case does not occur in regular (non-ARFF) CSV files. # To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes # on categories as a post-processing steps if needed. # # Note however that we intentionally do not attempt to do this kind of manual # post-processing of (non-categorical) string-typed columns because we cannot # resolve the ambiguity of the case of CSV cell with nesting quoting such as # `"'some string value'"` with pandas. single_quote_pattern = re.compile(r"^'(?P<contents>.*)'$") def strip_single_quotes(input_string): match = re.search(single_quote_pattern, input_string) if match is None: return input_string return match.group("contents") categorical_columns = [ name for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) ] for col in categorical_columns: frame[col] = frame[col].cat.rename_categories(strip_single_quotes) X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select) if output_arrays_type == "pandas": return X, y, frame, None else: X, y = X.to_numpy(), y.to_numpy() categories = { name: dtype.categories.tolist() for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) } return X, y, None, categories def load_arff_from_gzip_file( gzip_file, parser, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, read_csv_kwargs=None, ): """Load a compressed ARFF file using a given parser. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. parser : {"pandas", "liac-arff"} The parser used to parse the ARFF file. "pandas" is recommended but only supports loading dense datasets. output_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ if parser == "liac-arff": return _liac_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape, ) elif parser == "pandas": return _pandas_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs, ) else: raise ValueError( f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'." )
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
/* MIT License * * Copyright (c) 2023 Brad House * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * SPDX-License-Identifier: MIT */ #include "ares_private.h" #include <limits.h> #ifdef HAVE_STDINT_H # include <stdint.h> #endif static void ares_dns_rr_free(ares_dns_rr_t *rr); static void ares_dns_qd_free_cb(void *arg) { ares_dns_qd_t *qd = arg; if (qd == NULL) { return; } ares_free(qd->name); } static void ares_dns_rr_free_cb(void *arg) { ares_dns_rr_t *rr = arg; if (rr == NULL) { return; } ares_dns_rr_free(rr); } ares_status_t ares_dns_record_create(ares_dns_record_t **dnsrec, unsigned short id, unsigned short flags, ares_dns_opcode_t opcode, ares_dns_rcode_t rcode) { if (dnsrec == NULL) { return ARES_EFORMERR; } *dnsrec = NULL; if (!ares_dns_opcode_isvalid(opcode) || !ares_dns_rcode_isvalid(rcode) || !ares_dns_flags_arevalid(flags)) { return ARES_EFORMERR; } *dnsrec = ares_malloc_zero(sizeof(**dnsrec)); if (*dnsrec == NULL) { return ARES_ENOMEM; } (*dnsrec)->id = id; (*dnsrec)->flags = flags; (*dnsrec)->opcode = opcode; (*dnsrec)->rcode = rcode; (*dnsrec)->qd = ares_array_create(sizeof(ares_dns_qd_t), ares_dns_qd_free_cb); (*dnsrec)->an = ares_array_create(sizeof(ares_dns_rr_t), ares_dns_rr_free_cb); (*dnsrec)->ns = ares_array_create(sizeof(ares_dns_rr_t), ares_dns_rr_free_cb); (*dnsrec)->ar = ares_array_create(sizeof(ares_dns_rr_t), ares_dns_rr_free_cb); if ((*dnsrec)->qd == NULL || (*dnsrec)->an == NULL || (*dnsrec)->ns == NULL || (*dnsrec)->ar == NULL) { ares_dns_record_destroy(*dnsrec); *dnsrec = NULL; return ARES_ENOMEM; } return ARES_SUCCESS; } unsigned short ares_dns_record_get_id(const ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return 0; } return dnsrec->id; } ares_bool_t ares_dns_record_set_id(ares_dns_record_t *dnsrec, unsigned short id) { if (dnsrec == NULL) { return ARES_FALSE; } dnsrec->id = id; return ARES_TRUE; } unsigned short ares_dns_record_get_flags(const ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return 0; } return dnsrec->flags; } ares_dns_opcode_t ares_dns_record_get_opcode(const ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return 0; } return dnsrec->opcode; } ares_dns_rcode_t ares_dns_record_get_rcode(const ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return 0; } return dnsrec->rcode; } static void ares_dns_rr_free(ares_dns_rr_t *rr) { ares_free(rr->name); switch (rr->type) { case ARES_REC_TYPE_A: case ARES_REC_TYPE_AAAA: case ARES_REC_TYPE_ANY: /* Nothing to free */ break; case ARES_REC_TYPE_NS: ares_free(rr->r.ns.nsdname); break; case ARES_REC_TYPE_CNAME: ares_free(rr->r.cname.cname); break; case ARES_REC_TYPE_SOA: ares_free(rr->r.soa.mname); ares_free(rr->r.soa.rname); break; case ARES_REC_TYPE_PTR: ares_free(rr->r.ptr.dname); break; case ARES_REC_TYPE_HINFO: ares_free(rr->r.hinfo.cpu); ares_free(rr->r.hinfo.os); break; case ARES_REC_TYPE_MX: ares_free(rr->r.mx.exchange); break; case ARES_REC_TYPE_TXT: ares_dns_multistring_destroy(rr->r.txt.strs); break; case ARES_REC_TYPE_SIG: ares_free(rr->r.sig.signers_name); ares_free(rr->r.sig.signature); break; case ARES_REC_TYPE_SRV: ares_free(rr->r.srv.target); break; case ARES_REC_TYPE_NAPTR: ares_free(rr->r.naptr.flags); ares_free(rr->r.naptr.services); ares_free(rr->r.naptr.regexp); ares_free(rr->r.naptr.replacement); break; case ARES_REC_TYPE_OPT: ares_array_destroy(rr->r.opt.options); break; case ARES_REC_TYPE_TLSA: ares_free(rr->r.tlsa.data); break; case ARES_REC_TYPE_SVCB: ares_free(rr->r.svcb.target); ares_array_destroy(rr->r.svcb.params); break; case ARES_REC_TYPE_HTTPS: ares_free(rr->r.https.target); ares_array_destroy(rr->r.https.params); break; case ARES_REC_TYPE_URI: ares_free(rr->r.uri.target); break; case ARES_REC_TYPE_CAA: ares_free(rr->r.caa.tag); ares_free(rr->r.caa.value); break; case ARES_REC_TYPE_RAW_RR: ares_free(rr->r.raw_rr.data); break; } } void ares_dns_record_destroy(ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return; } /* Free questions */ ares_array_destroy(dnsrec->qd); /* Free answers */ ares_array_destroy(dnsrec->an); /* Free authority */ ares_array_destroy(dnsrec->ns); /* Free additional */ ares_array_destroy(dnsrec->ar); ares_free(dnsrec); } size_t ares_dns_record_query_cnt(const ares_dns_record_t *dnsrec) { if (dnsrec == NULL) { return 0; } return ares_array_len(dnsrec->qd); } ares_status_t ares_dns_record_query_add(ares_dns_record_t *dnsrec, const char *name, ares_dns_rec_type_t qtype, ares_dns_class_t qclass) { size_t idx; ares_dns_qd_t *qd; ares_status_t status; if (dnsrec == NULL || name == NULL || !ares_dns_rec_type_isvalid(qtype, ARES_TRUE) || !ares_dns_class_isvalid(qclass, qtype, ARES_TRUE)) { return ARES_EFORMERR; } idx = ares_array_len(dnsrec->qd); status = ares_array_insert_last((void **)&qd, dnsrec->qd); if (status != ARES_SUCCESS) { return status; } qd->name = ares_strdup(name); if (qd->name == NULL) { ares_array_remove_at(dnsrec->qd, idx); return ARES_ENOMEM; } qd->qtype = qtype; qd->qclass = qclass; return ARES_SUCCESS; } ares_status_t ares_dns_record_query_set_name(ares_dns_record_t *dnsrec, size_t idx, const char *name) { char *orig_name = NULL; ares_dns_qd_t *qd; if (dnsrec == NULL || idx >= ares_array_len(dnsrec->qd) || name == NULL) { return ARES_EFORMERR; } qd = ares_array_at(dnsrec->qd, idx); orig_name = qd->name; qd->name = ares_strdup(name); if (qd->name == NULL) { qd->name = orig_name; /* LCOV_EXCL_LINE: OutOfMemory */ return ARES_ENOMEM; /* LCOV_EXCL_LINE: OutOfMemory */ } ares_free(orig_name); return ARES_SUCCESS; } ares_status_t ares_dns_record_query_set_type(ares_dns_record_t *dnsrec, size_t idx, ares_dns_rec_type_t qtype) { ares_dns_qd_t *qd; if (dnsrec == NULL || idx >= ares_array_len(dnsrec->qd) || !ares_dns_rec_type_isvalid(qtype, ARES_TRUE)) { return ARES_EFORMERR; } qd = ares_array_at(dnsrec->qd, idx); qd->qtype = qtype; return ARES_SUCCESS; } ares_status_t ares_dns_record_query_get(const ares_dns_record_t *dnsrec, size_t idx, const char **name, ares_dns_rec_type_t *qtype, ares_dns_class_t *qclass) { const ares_dns_qd_t *qd; if (dnsrec == NULL || idx >= ares_array_len(dnsrec->qd)) { return ARES_EFORMERR; } qd = ares_array_at(dnsrec->qd, idx); if (name != NULL) { *name = qd->name; } if (qtype != NULL) { *qtype = qd->qtype; } if (qclass != NULL) { *qclass = qd->qclass; } return ARES_SUCCESS; } size_t ares_dns_record_rr_cnt(const ares_dns_record_t *dnsrec, ares_dns_section_t sect) { if (dnsrec == NULL || !ares_dns_section_isvalid(sect)) { return 0; } switch (sect) { case ARES_SECTION_ANSWER: return ares_array_len(dnsrec->an); case ARES_SECTION_AUTHORITY: return ares_array_len(dnsrec->ns); case ARES_SECTION_ADDITIONAL: return ares_array_len(dnsrec->ar); } return 0; /* LCOV_EXCL_LINE: DefensiveCoding */ } ares_status_t ares_dns_record_rr_prealloc(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t cnt) { ares_array_t *arr = NULL; if (dnsrec == NULL || !ares_dns_section_isvalid(sect)) { return ARES_EFORMERR; } switch (sect) { case ARES_SECTION_ANSWER: arr = dnsrec->an; break; case ARES_SECTION_AUTHORITY: arr = dnsrec->ns; break; case ARES_SECTION_ADDITIONAL: arr = dnsrec->ar; break; } if (cnt < ares_array_len(arr)) { return ARES_EFORMERR; } return ares_array_set_size(arr, cnt); } ares_status_t ares_dns_record_rr_add(ares_dns_rr_t **rr_out, ares_dns_record_t *dnsrec, ares_dns_section_t sect, const char *name, ares_dns_rec_type_t type, ares_dns_class_t rclass, unsigned int ttl) { ares_dns_rr_t *rr = NULL; ares_array_t *arr = NULL; ares_status_t status; size_t idx; if (dnsrec == NULL || name == NULL || rr_out == NULL || !ares_dns_section_isvalid(sect) || !ares_dns_rec_type_isvalid(type, ARES_FALSE) || !ares_dns_class_isvalid(rclass, type, ARES_FALSE)) { return ARES_EFORMERR; } *rr_out = NULL; switch (sect) { case ARES_SECTION_ANSWER: arr = dnsrec->an; break; case ARES_SECTION_AUTHORITY: arr = dnsrec->ns; break; case ARES_SECTION_ADDITIONAL: arr = dnsrec->ar; break; } idx = ares_array_len(arr); status = ares_array_insert_last((void **)&rr, arr); if (status != ARES_SUCCESS) { return status; /* LCOV_EXCL_LINE: OutOfMemory */ } rr->name = ares_strdup(name); if (rr->name == NULL) { ares_array_remove_at(arr, idx); return ARES_ENOMEM; } rr->parent = dnsrec; rr->type = type; rr->rclass = rclass; rr->ttl = ttl; *rr_out = rr; return ARES_SUCCESS; } ares_status_t ares_dns_record_rr_del(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx) { ares_array_t *arr = NULL; if (dnsrec == NULL || !ares_dns_section_isvalid(sect)) { return ARES_EFORMERR; } switch (sect) { case ARES_SECTION_ANSWER: arr = dnsrec->an; break; case ARES_SECTION_AUTHORITY: arr = dnsrec->ns; break; case ARES_SECTION_ADDITIONAL: arr = dnsrec->ar; break; } return ares_array_remove_at(arr, idx); } ares_dns_rr_t *ares_dns_record_rr_get(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx) { ares_array_t *arr = NULL; if (dnsrec == NULL || !ares_dns_section_isvalid(sect)) { return NULL; } switch (sect) { case ARES_SECTION_ANSWER: arr = dnsrec->an; break; case ARES_SECTION_AUTHORITY: arr = dnsrec->ns; break; case ARES_SECTION_ADDITIONAL: arr = dnsrec->ar; break; } return ares_array_at(arr, idx); } const ares_dns_rr_t * ares_dns_record_rr_get_const(const ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx) { return ares_dns_record_rr_get((void *)((size_t)dnsrec), sect, idx); } const char *ares_dns_rr_get_name(const ares_dns_rr_t *rr) { if (rr == NULL) { return NULL; } return rr->name; } ares_dns_rec_type_t ares_dns_rr_get_type(const ares_dns_rr_t *rr) { if (rr == NULL) { return 0; } return rr->type; } ares_dns_class_t ares_dns_rr_get_class(const ares_dns_rr_t *rr) { if (rr == NULL) { return 0; } return rr->rclass; } unsigned int ares_dns_rr_get_ttl(const ares_dns_rr_t *rr) { if (rr == NULL) { return 0; } return rr->ttl; } static void *ares_dns_rr_data_ptr(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, size_t **lenptr) { if (dns_rr == NULL || dns_rr->type != ares_dns_rr_key_to_rec_type(key)) { return NULL; /* LCOV_EXCL_LINE: DefensiveCoding */ } switch (key) { case ARES_RR_A_ADDR: return &dns_rr->r.a.addr; case ARES_RR_NS_NSDNAME: return &dns_rr->r.ns.nsdname; case ARES_RR_CNAME_CNAME: return &dns_rr->r.cname.cname; case ARES_RR_SOA_MNAME: return &dns_rr->r.soa.mname; case ARES_RR_SOA_RNAME: return &dns_rr->r.soa.rname; case ARES_RR_SOA_SERIAL: return &dns_rr->r.soa.serial; case ARES_RR_SOA_REFRESH: return &dns_rr->r.soa.refresh; case ARES_RR_SOA_RETRY: return &dns_rr->r.soa.retry; case ARES_RR_SOA_EXPIRE: return &dns_rr->r.soa.expire; case ARES_RR_SOA_MINIMUM: return &dns_rr->r.soa.minimum; case ARES_RR_PTR_DNAME: return &dns_rr->r.ptr.dname; case ARES_RR_AAAA_ADDR: return &dns_rr->r.aaaa.addr; case ARES_RR_HINFO_CPU: return &dns_rr->r.hinfo.cpu; case ARES_RR_HINFO_OS: return &dns_rr->r.hinfo.os; case ARES_RR_MX_PREFERENCE: return &dns_rr->r.mx.preference; case ARES_RR_MX_EXCHANGE: return &dns_rr->r.mx.exchange; case ARES_RR_SIG_TYPE_COVERED: return &dns_rr->r.sig.type_covered; case ARES_RR_SIG_ALGORITHM: return &dns_rr->r.sig.algorithm; case ARES_RR_SIG_LABELS: return &dns_rr->r.sig.labels; case ARES_RR_SIG_ORIGINAL_TTL: return &dns_rr->r.sig.original_ttl; case ARES_RR_SIG_EXPIRATION: return &dns_rr->r.sig.expiration; case ARES_RR_SIG_INCEPTION: return &dns_rr->r.sig.inception; case ARES_RR_SIG_KEY_TAG: return &dns_rr->r.sig.key_tag; case ARES_RR_SIG_SIGNERS_NAME: return &dns_rr->r.sig.signers_name; case ARES_RR_SIG_SIGNATURE: if (lenptr == NULL) { return NULL; } *lenptr = &dns_rr->r.sig.signature_len; return &dns_rr->r.sig.signature; case ARES_RR_TXT_DATA: return &dns_rr->r.txt.strs; case ARES_RR_SRV_PRIORITY: return &dns_rr->r.srv.priority; case ARES_RR_SRV_WEIGHT: return &dns_rr->r.srv.weight; case ARES_RR_SRV_PORT: return &dns_rr->r.srv.port; case ARES_RR_SRV_TARGET: return &dns_rr->r.srv.target; case ARES_RR_NAPTR_ORDER: return &dns_rr->r.naptr.order; case ARES_RR_NAPTR_PREFERENCE: return &dns_rr->r.naptr.preference; case ARES_RR_NAPTR_FLAGS: return &dns_rr->r.naptr.flags; case ARES_RR_NAPTR_SERVICES: return &dns_rr->r.naptr.services; case ARES_RR_NAPTR_REGEXP: return &dns_rr->r.naptr.regexp; case ARES_RR_NAPTR_REPLACEMENT: return &dns_rr->r.naptr.replacement; case ARES_RR_OPT_UDP_SIZE: return &dns_rr->r.opt.udp_size; case ARES_RR_OPT_VERSION: return &dns_rr->r.opt.version; case ARES_RR_OPT_FLAGS: return &dns_rr->r.opt.flags; case ARES_RR_OPT_OPTIONS: return &dns_rr->r.opt.options; case ARES_RR_TLSA_CERT_USAGE: return &dns_rr->r.tlsa.cert_usage; case ARES_RR_TLSA_SELECTOR: return &dns_rr->r.tlsa.selector; case ARES_RR_TLSA_MATCH: return &dns_rr->r.tlsa.match; case ARES_RR_TLSA_DATA: if (lenptr == NULL) { return NULL; } *lenptr = &dns_rr->r.tlsa.data_len; return &dns_rr->r.tlsa.data; case ARES_RR_SVCB_PRIORITY: return &dns_rr->r.svcb.priority; case ARES_RR_SVCB_TARGET: return &dns_rr->r.svcb.target; case ARES_RR_SVCB_PARAMS: return &dns_rr->r.svcb.params; case ARES_RR_HTTPS_PRIORITY: return &dns_rr->r.https.priority; case ARES_RR_HTTPS_TARGET: return &dns_rr->r.https.target; case ARES_RR_HTTPS_PARAMS: return &dns_rr->r.https.params; case ARES_RR_URI_PRIORITY: return &dns_rr->r.uri.priority; case ARES_RR_URI_WEIGHT: return &dns_rr->r.uri.weight; case ARES_RR_URI_TARGET: return &dns_rr->r.uri.target; case ARES_RR_CAA_CRITICAL: return &dns_rr->r.caa.critical; case ARES_RR_CAA_TAG: return &dns_rr->r.caa.tag; case ARES_RR_CAA_VALUE: if (lenptr == NULL) { return NULL; } *lenptr = &dns_rr->r.caa.value_len; return &dns_rr->r.caa.value; case ARES_RR_RAW_RR_TYPE: return &dns_rr->r.raw_rr.type; case ARES_RR_RAW_RR_DATA: if (lenptr == NULL) { return NULL; } *lenptr = &dns_rr->r.raw_rr.length; return &dns_rr->r.raw_rr.data; } return NULL; } static const void *ares_dns_rr_data_ptr_const(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const size_t **lenptr) { /* We're going to cast off the const */ return ares_dns_rr_data_ptr((void *)((size_t)dns_rr), key, (void *)((size_t)lenptr)); } const struct in_addr *ares_dns_rr_get_addr(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { const struct in_addr *addr; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_INADDR) { return NULL; } addr = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (addr == NULL) { return NULL; } return addr; } const struct ares_in6_addr *ares_dns_rr_get_addr6(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { const struct ares_in6_addr *addr; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_INADDR6) { return NULL; } addr = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (addr == NULL) { return NULL; } return addr; } unsigned char ares_dns_rr_get_u8(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { const unsigned char *u8; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U8) { return 0; } u8 = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (u8 == NULL) { return 0; } return *u8; } unsigned short ares_dns_rr_get_u16(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { const unsigned short *u16; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U16) { return 0; } u16 = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (u16 == NULL) { return 0; } return *u16; } unsigned int ares_dns_rr_get_u32(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { const unsigned int *u32; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U32) { return 0; } u32 = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (u32 == NULL) { return 0; } return *u32; } const unsigned char *ares_dns_rr_get_bin(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, size_t *len) { unsigned char * const *bin = NULL; size_t const *bin_len = NULL; if ((ares_dns_rr_key_datatype(key) != ARES_DATATYPE_BIN && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_BINP && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) || len == NULL) { return NULL; } /* Array of strings, return concatenated version */ if (ares_dns_rr_key_datatype(key) == ARES_DATATYPE_ABINP) { ares_dns_multistring_t * const *strs = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (strs == NULL) { return NULL; } return ares_dns_multistring_combined(*strs, len); } /* Not a multi-string, just straight binary data */ bin = ares_dns_rr_data_ptr_const(dns_rr, key, &bin_len); if (bin == NULL) { return NULL; } /* Shouldn't be possible */ if (bin_len == NULL) { return NULL; } *len = *bin_len; return *bin; } size_t ares_dns_rr_get_abin_cnt(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { ares_dns_multistring_t * const *strs; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return 0; } strs = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (strs == NULL) { return 0; } return ares_dns_multistring_cnt(*strs); } const unsigned char *ares_dns_rr_get_abin(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, size_t idx, size_t *len) { ares_dns_multistring_t * const *strs; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return NULL; } strs = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (strs == NULL) { return NULL; } return ares_dns_multistring_get(*strs, idx, len); } ares_status_t ares_dns_rr_del_abin(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, size_t idx) { ares_dns_multistring_t **strs; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return ARES_EFORMERR; } strs = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (strs == NULL) { return ARES_EFORMERR; } return ares_dns_multistring_del(*strs, idx); } ares_status_t ares_dns_rr_add_abin(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const unsigned char *val, size_t len) { ares_status_t status; ares_dns_datatype_t datatype = ares_dns_rr_key_datatype(key); ares_bool_t is_nullterm = (datatype == ARES_DATATYPE_ABINP) ? ARES_TRUE : ARES_FALSE; size_t alloclen = is_nullterm ? len + 1 : len; unsigned char *temp; ares_dns_multistring_t **strs; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return ARES_EFORMERR; } strs = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (strs == NULL) { return ARES_EFORMERR; } if (*strs == NULL) { *strs = ares_dns_multistring_create(); if (*strs == NULL) { return ARES_ENOMEM; } } temp = ares_malloc(alloclen); if (temp == NULL) { return ARES_ENOMEM; } memcpy(temp, val, len); /* NULL-term ABINP */ if (is_nullterm) { temp[len] = 0; } status = ares_dns_multistring_add_own(*strs, temp, len); if (status != ARES_SUCCESS) { ares_free(temp); } return status; } const char *ares_dns_rr_get_str(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { char * const *str; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_STR && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_NAME) { return NULL; } str = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (str == NULL) { return NULL; } return *str; } size_t ares_dns_rr_get_opt_cnt(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key) { ares_array_t * const *opts; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_OPT) { return 0; } opts = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (opts == NULL || *opts == NULL) { return 0; } return ares_array_len(*opts); } unsigned short ares_dns_rr_get_opt(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, size_t idx, const unsigned char **val, size_t *val_len) { ares_array_t * const *opts; const ares_dns_optval_t *opt; if (val) { *val = NULL; } if (val_len) { *val_len = 0; } if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_OPT) { return 65535; } opts = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (opts == NULL || *opts == NULL) { return 65535; } opt = ares_array_at(*opts, idx); if (opt == NULL) { return 65535; } if (val) { *val = opt->val; } if (val_len) { *val_len = opt->val_len; } return opt->opt; } ares_bool_t ares_dns_rr_get_opt_byid(const ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned short opt, const unsigned char **val, size_t *val_len) { ares_array_t * const *opts; size_t i; size_t cnt; const ares_dns_optval_t *optptr = NULL; if (val) { *val = NULL; } if (val_len) { *val_len = 0; } if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_OPT) { return ARES_FALSE; } opts = ares_dns_rr_data_ptr_const(dns_rr, key, NULL); if (opts == NULL || *opts == NULL) { return ARES_FALSE; } cnt = ares_array_len(*opts); for (i = 0; i < cnt; i++) { optptr = ares_array_at(*opts, i); if (optptr == NULL) { return ARES_FALSE; } if (optptr->opt == opt) { break; } } if (i >= cnt || optptr == NULL) { return ARES_FALSE; } if (val) { *val = optptr->val; } if (val_len) { *val_len = optptr->val_len; } return ARES_TRUE; } ares_status_t ares_dns_rr_set_addr(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const struct in_addr *addr) { struct in_addr *a; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_INADDR || addr == NULL) { return ARES_EFORMERR; } a = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (a == NULL) { return ARES_EFORMERR; } memcpy(a, addr, sizeof(*a)); return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_addr6(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const struct ares_in6_addr *addr) { struct ares_in6_addr *a; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_INADDR6 || addr == NULL) { return ARES_EFORMERR; } a = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (a == NULL) { return ARES_EFORMERR; } memcpy(a, addr, sizeof(*a)); return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_u8(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned char val) { unsigned char *u8; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U8) { return ARES_EFORMERR; } u8 = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (u8 == NULL) { return ARES_EFORMERR; } *u8 = val; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_u16(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned short val) { unsigned short *u16; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U16) { return ARES_EFORMERR; } u16 = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (u16 == NULL) { return ARES_EFORMERR; } *u16 = val; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_u32(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned int val) { unsigned int *u32; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_U32) { return ARES_EFORMERR; } u32 = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (u32 == NULL) { return ARES_EFORMERR; } *u32 = val; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_bin_own(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned char *val, size_t len) { unsigned char **bin; size_t *bin_len = NULL; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_BIN && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_BINP && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return ARES_EFORMERR; } if (ares_dns_rr_key_datatype(key) == ARES_DATATYPE_ABINP) { ares_dns_multistring_t **strs = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (strs == NULL) { return ARES_EFORMERR; } if (*strs == NULL) { *strs = ares_dns_multistring_create(); if (*strs == NULL) { return ARES_ENOMEM; } } /* Clear all existing entries as this is an override */ ares_dns_multistring_clear(*strs); return ares_dns_multistring_add_own(*strs, val, len); } bin = ares_dns_rr_data_ptr(dns_rr, key, &bin_len); if (bin == NULL || bin_len == NULL) { return ARES_EFORMERR; } if (*bin) { ares_free(*bin); } *bin = val; *bin_len = len; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_bin(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const unsigned char *val, size_t len) { ares_status_t status; ares_dns_datatype_t datatype = ares_dns_rr_key_datatype(key); ares_bool_t is_nullterm = (datatype == ARES_DATATYPE_BINP || datatype == ARES_DATATYPE_ABINP) ? ARES_TRUE : ARES_FALSE; size_t alloclen = is_nullterm ? len + 1 : len; unsigned char *temp = ares_malloc(alloclen); if (temp == NULL) { return ARES_ENOMEM; } memcpy(temp, val, len); /* NULL-term BINP */ if (is_nullterm) { temp[len] = 0; } status = ares_dns_rr_set_bin_own(dns_rr, key, temp, len); if (status != ARES_SUCCESS) { ares_free(temp); } return status; } ares_status_t ares_dns_rr_set_str_own(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, char *val) { char **str; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_STR && ares_dns_rr_key_datatype(key) != ARES_DATATYPE_NAME) { return ARES_EFORMERR; } str = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (str == NULL) { return ARES_EFORMERR; } if (*str) { ares_free(*str); } *str = val; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_str(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, const char *val) { ares_status_t status; char *temp = NULL; if (val != NULL) { temp = ares_strdup(val); if (temp == NULL) { return ARES_ENOMEM; } } status = ares_dns_rr_set_str_own(dns_rr, key, temp); if (status != ARES_SUCCESS) { ares_free(temp); } return status; } ares_status_t ares_dns_rr_set_abin_own(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, ares_dns_multistring_t *strs) { ares_dns_multistring_t **strs_ptr; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_ABINP) { return ARES_EFORMERR; } strs_ptr = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (strs_ptr == NULL) { return ARES_EFORMERR; } if (*strs_ptr != NULL) { ares_dns_multistring_destroy(*strs_ptr); } *strs_ptr = strs; return ARES_SUCCESS; } static void ares_dns_opt_free_cb(void *arg) { ares_dns_optval_t *opt = arg; if (opt == NULL) { return; } ares_free(opt->val); } ares_status_t ares_dns_rr_set_opt_own(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned short opt, unsigned char *val, size_t val_len) { ares_array_t **options; ares_dns_optval_t *optptr = NULL; size_t idx; size_t cnt; ares_status_t status; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_OPT) { return ARES_EFORMERR; } options = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (options == NULL) { return ARES_EFORMERR; } if (*options == NULL) { *options = ares_array_create(sizeof(ares_dns_optval_t), ares_dns_opt_free_cb); } if (*options == NULL) { return ARES_ENOMEM; } cnt = ares_array_len(*options); for (idx = 0; idx < cnt; idx++) { optptr = ares_array_at(*options, idx); if (optptr == NULL) { return ARES_EFORMERR; } if (optptr->opt == opt) { break; } } /* Duplicate entry, replace */ if (idx != cnt && optptr != NULL) { goto done; } status = ares_array_insert_last((void **)&optptr, *options); if (status != ARES_SUCCESS) { return status; } done: ares_free(optptr->val); optptr->opt = opt; optptr->val = val; optptr->val_len = val_len; return ARES_SUCCESS; } ares_status_t ares_dns_rr_set_opt(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned short opt, const unsigned char *val, size_t val_len) { unsigned char *temp = NULL; ares_status_t status; if (val != NULL) { temp = ares_malloc(val_len + 1); if (temp == NULL) { return ARES_ENOMEM; } memcpy(temp, val, val_len); temp[val_len] = 0; } status = ares_dns_rr_set_opt_own(dns_rr, key, opt, temp, val_len); if (status != ARES_SUCCESS) { ares_free(temp); } return status; } ares_status_t ares_dns_rr_del_opt_byid(ares_dns_rr_t *dns_rr, ares_dns_rr_key_t key, unsigned short opt) { ares_array_t **options; const ares_dns_optval_t *optptr; size_t idx; size_t cnt; if (ares_dns_rr_key_datatype(key) != ARES_DATATYPE_OPT) { return ARES_EFORMERR; } options = ares_dns_rr_data_ptr(dns_rr, key, NULL); if (options == NULL) { return ARES_EFORMERR; } /* No options */ if (*options == NULL) { return ARES_SUCCESS; } cnt = ares_array_len(*options); for (idx = 0; idx < cnt; idx++) { optptr = ares_array_at_const(*options, idx); if (optptr == NULL) { return ARES_ENOTFOUND; } if (optptr->opt == opt) { return ares_array_remove_at(*options, idx); } } return ARES_ENOTFOUND; } char *ares_dns_addr_to_ptr(const struct ares_addr *addr) { ares_buf_t *buf = NULL; const unsigned char *ptr = NULL; size_t ptr_len = 0; size_t i; ares_status_t status; static const unsigned char hexbytes[] = "0123456789abcdef"; if (addr->family != AF_INET && addr->family != AF_INET6) { goto fail; } buf = ares_buf_create(); if (buf == NULL) { goto fail; } if (addr->family == AF_INET) { ptr = (const unsigned char *)&addr->addr.addr4; ptr_len = 4; } else { ptr = (const unsigned char *)&addr->addr.addr6; ptr_len = 16; } for (i = ptr_len; i > 0; i--) { if (addr->family == AF_INET) { status = ares_buf_append_num_dec(buf, (size_t)ptr[i - 1], 0); } else { unsigned char c; c = ptr[i - 1] & 0xF; status = ares_buf_append_byte(buf, hexbytes[c]); if (status != ARES_SUCCESS) { goto fail; } status = ares_buf_append_byte(buf, '.'); if (status != ARES_SUCCESS) { goto fail; } c = (ptr[i - 1] >> 4) & 0xF; status = ares_buf_append_byte(buf, hexbytes[c]); } if (status != ARES_SUCCESS) { goto fail; } status = ares_buf_append_byte(buf, '.'); if (status != ARES_SUCCESS) { goto fail; } } if (addr->family == AF_INET) { status = ares_buf_append(buf, (const unsigned char *)"in-addr.arpa", 12); } else { status = ares_buf_append(buf, (const unsigned char *)"ip6.arpa", 8); } if (status != ARES_SUCCESS) { goto fail; } return ares_buf_finish_str(buf, NULL); fail: ares_buf_destroy(buf); return NULL; } ares_dns_rr_t *ares_dns_get_opt_rr(ares_dns_record_t *rec) { size_t i; for (i = 0; i < ares_dns_record_rr_cnt(rec, ARES_SECTION_ADDITIONAL); i++) { ares_dns_rr_t *rr = ares_dns_record_rr_get(rec, ARES_SECTION_ADDITIONAL, i); if (ares_dns_rr_get_type(rr) == ARES_REC_TYPE_OPT) { return rr; } } return NULL; } const ares_dns_rr_t *ares_dns_get_opt_rr_const(const ares_dns_record_t *rec) { size_t i; for (i = 0; i < ares_dns_record_rr_cnt(rec, ARES_SECTION_ADDITIONAL); i++) { const ares_dns_rr_t *rr = ares_dns_record_rr_get_const(rec, ARES_SECTION_ADDITIONAL, i); if (ares_dns_rr_get_type(rr) == ARES_REC_TYPE_OPT) { return rr; } } return NULL; } /* Construct a DNS record for a name with given class and type. Used internally * by ares_search() and ares_create_query(). */ ares_status_t ares_dns_record_create_query(ares_dns_record_t **dnsrec, const char *name, ares_dns_class_t dnsclass, ares_dns_rec_type_t type, unsigned short id, ares_dns_flags_t flags, size_t max_udp_size) { ares_status_t status; ares_dns_rr_t *rr = NULL; if (dnsrec == NULL) { return ARES_EFORMERR; } *dnsrec = NULL; /* Per RFC 7686, reject queries for ".onion" domain names with NXDOMAIN */ if (ares_is_onion_domain(name)) { status = ARES_ENOTFOUND; goto done; } status = ares_dns_record_create(dnsrec, id, (unsigned short)flags, ARES_OPCODE_QUERY, ARES_RCODE_NOERROR); if (status != ARES_SUCCESS) { goto done; } status = ares_dns_record_query_add(*dnsrec, name, type, dnsclass); if (status != ARES_SUCCESS) { goto done; } /* max_udp_size > 0 indicates EDNS, so send OPT RR as an additional record */ if (max_udp_size > 0) { /* max_udp_size must fit into a 16 bit unsigned integer field on the OPT * RR, so check here that it fits */ if (max_udp_size > 65535) { status = ARES_EFORMERR; goto done; } status = ares_dns_record_rr_add(&rr, *dnsrec, ARES_SECTION_ADDITIONAL, "", ARES_REC_TYPE_OPT, ARES_CLASS_IN, 0); if (status != ARES_SUCCESS) { goto done; } status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_UDP_SIZE, (unsigned short)max_udp_size); if (status != ARES_SUCCESS) { goto done; } status = ares_dns_rr_set_u8(rr, ARES_RR_OPT_VERSION, 0); if (status != ARES_SUCCESS) { goto done; } status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_FLAGS, 0); if (status != ARES_SUCCESS) { goto done; } } done: if (status != ARES_SUCCESS) { ares_dns_record_destroy(*dnsrec); *dnsrec = NULL; } return status; } ares_status_t ares_dns_record_duplicate_ex(ares_dns_record_t **dest, const ares_dns_record_t *src) { unsigned char *data = NULL; size_t data_len = 0; ares_status_t status; if (dest == NULL || src == NULL) { return ARES_EFORMERR; } *dest = NULL; status = ares_dns_write(src, &data, &data_len); if (status != ARES_SUCCESS) { return status; } status = ares_dns_parse(data, data_len, 0, dest); ares_free(data); return status; } ares_dns_record_t *ares_dns_record_duplicate(const ares_dns_record_t *dnsrec) { ares_dns_record_t *dest = NULL; ares_dns_record_duplicate_ex(&dest, dnsrec); return dest; }
c
github
https://github.com/nodejs/node
deps/cares/src/lib/record/ares_dns_record.c
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from six import iteritems, string_types from ansible.errors import AnsibleError from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.splitter import parse_kv from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode from ansible.plugins import module_loader, lookup_loader from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.block import Block from ansible.playbook.conditional import Conditional from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable __all__ = ['Task'] try: from __main__ import display display = display except ImportError: from ansible.utils.display import Display display = Display() class Task(Base, Conditional, Taggable, Become): """ A task is a language feature that represents a call to a module, with given arguments and other parameters. A handler is a subclass of a task. Usage: Task.load(datastructure) -> Task Task.something(...) """ # ================================================================================= # ATTRIBUTES # load_<attribute_name> and # validate_<attribute_name> # will be used if defined # might be possible to define others _args = FieldAttribute(isa='dict', default=dict()) _action = FieldAttribute(isa='string') _always_run = FieldAttribute(isa='bool') _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _ignore_errors = FieldAttribute(isa='bool') _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) _local_action = FieldAttribute(isa='string') _name = FieldAttribute(isa='string', default='') _notify = FieldAttribute(isa='list') _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=1) _run_once = FieldAttribute(isa='bool') _until = FieldAttribute(isa='list') # ? def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block self._role = role self._task_include = task_include super(Task, self).__init__() def get_name(self): ''' return the name of the task ''' if self._role and self.name: return "%s : %s" % (self._role.get_name(), self.name) elif self.name: return self.name else: flattened_args = self._merge_kv(self.args) if self._role: return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) else: return "%s %s" % (self.action, flattened_args) def _merge_kv(self, ds): if ds is None: return "" elif isinstance(ds, basestring): return ds elif isinstance(ds, dict): buf = "" for (k,v) in iteritems(ds): if k.startswith('_'): continue buf = buf + "%s=%s " % (k,v) buf = buf.strip() return buf @staticmethod def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): t = Task(block=block, role=role, task_include=task_include) return t.load_data(data, variable_manager=variable_manager, loader=loader) def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' loop_name = k.replace("with_", "") if new_ds.get('loop') is not None: raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds) if v is None: raise AnsibleError("you must specify a value when using %s" % k, obj=ds) new_ds['loop'] = loop_name new_ds['loop_args'] = v def preprocess_data(self, ds): ''' tasks are especially complex arguments so need pre-processing. keep it short. ''' assert isinstance(ds, dict) # the new, cleaned datastructure, which will have legacy # items reduced to a standard structure suitable for the # attributes of the task class new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.ansible_pos = ds.ansible_pos # use the args parsing class to determine the action, args, # and the delegate_to value from the various possible forms # supported as legacy args_parser = ModuleArgsParser(task_ds=ds) (action, args, connection) = args_parser.parse() new_ds['action'] = action new_ds['args'] = args new_ds['connection'] = connection # we handle any 'vars' specified in the ds here, as we may # be adding things to them below (special handling for includes). # When that deprecated feature is removed, this can be too. if 'vars' in ds: # _load_vars is defined in Base, and is used to load a dictionary # or list of dictionaries in a standard way new_ds['vars'] = self._load_vars(None, ds.pop('vars')) else: new_ds['vars'] = dict() for (k,v) in iteritems(ds): if k in ('action', 'local_action', 'args', 'connection') or k == action or k == 'shell': # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue elif k.replace("with_", "") in lookup_loader: self._preprocess_loop(ds, new_ds, k, v) else: # pre-2.0 syntax allowed variables for include statements at the # top level of the task, so we move those into the 'vars' dictionary # here, and show a deprecation message as we will remove this at # some point in the future. if action == 'include' and k not in self._get_base_attributes() and k not in self.DEPRECATED_ATTRIBUTES: self._display.deprecated("Specifying include variables at the top-level of the task is deprecated. Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\nfor currently supported syntax regarding included files and variables") new_ds['vars'][k] = v else: new_ds[k] = v return super(Task, self).preprocess_data(new_ds) def _load_any_errors_fatal(self, attr, value): ''' Exists only to show a deprecation warning, as this attribute is not valid at the task level. ''' display.deprecated("Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only") return None def post_validate(self, templar): ''' Override of base class post_validate, to also do final validation on the block and task include (if any) to which this task belongs. ''' if self._block: self._block.post_validate(templar) if self._task_include: self._task_include.post_validate(templar) super(Task, self).post_validate(templar) def _post_validate_loop_args(self, attr, value, templar): ''' Override post validation for the loop args field, which is templated specially in the TaskExecutor class when evaluating loops. ''' return value def _post_validate_environment(self, attr, value, templar): ''' Override post validation of vars on the play, as we don't want to template these too early. ''' if value is None: return dict() for env_item in value: if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): self._display.deprecated("Using bare variables for environment is deprecated. Update your playbooks so that the environment value uses the full variable syntax ('{{foo}}')") break return templar.template(value, convert_bare=True) def get_vars(self): all_vars = dict() if self._block: all_vars.update(self._block.get_vars()) if self._task_include: all_vars.update(self._task_include.get_vars()) all_vars.update(self.vars) if 'tags' in all_vars: del all_vars['tags'] if 'when' in all_vars: del all_vars['when'] return all_vars def copy(self, exclude_block=False): new_me = super(Task, self).copy() new_me._block = None if self._block and not exclude_block: new_me._block = self._block.copy() new_me._role = None if self._role: new_me._role = self._role new_me._task_include = None if self._task_include: new_me._task_include = self._task_include.copy(exclude_block=exclude_block) return new_me def serialize(self): data = super(Task, self).serialize() if self._block: data['block'] = self._block.serialize() if self._role: data['role'] = self._role.serialize() if self._task_include: data['task_include'] = self._task_include.serialize() return data def deserialize(self, data): # import is here to avoid import loops #from ansible.playbook.task_include import TaskInclude block_data = data.get('block') if block_data: b = Block() b.deserialize(block_data) self._block = b del data['block'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] ti_data = data.get('task_include') if ti_data: #ti = TaskInclude() ti = Task() ti.deserialize(ti_data) self._task_include = ti del data['task_include'] super(Task, self).deserialize(data) def evaluate_conditional(self, templar, all_vars): if self._block is not None: if not self._block.evaluate_conditional(templar, all_vars): return False if self._task_include is not None: if not self._task_include.evaluate_conditional(templar, all_vars): return False return super(Task, self).evaluate_conditional(templar, all_vars) def set_loader(self, loader): ''' Sets the loader on this object and recursively on parent, child objects. This is used primarily after the Task has been serialized/deserialized, which does not preserve the loader. ''' self._loader = loader if self._block: self._block.set_loader(loader) if self._task_include: self._task_include.set_loader(loader) def _get_parent_attribute(self, attr, extend=False): ''' Generic logic to get the attribute or parent attribute for a task value. ''' value = self._attributes[attr] if self._block and (value is None or extend): parent_value = getattr(self._block, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value if self._task_include and (value is None or extend): parent_value = getattr(self._task_include, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value return value def _get_attr_environment(self): ''' Override for the 'tags' getattr fetcher, used from Base. ''' environment = self._attributes['environment'] if environment is None: environment = self._get_parent_attribute('environment') return environment
unknown
codeparrot/codeparrot-clean
""" 34. Generic relations Generic relations let an object have a foreign key to any object through a content-type/object-id field. A ``GenericForeignKey`` field can point to any object, be it animal, vegetable, or mineral. The canonical example is tags (although this example implementation is *far* from complete). """ from __future__ import unicode_literals from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class TaggedItem(models.Model): """A tag on an item.""" tag = models.SlugField() content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() content_object = generic.GenericForeignKey() class Meta: ordering = ["tag", "content_type__name"] def __str__(self): return self.tag class ValuableTaggedItem(TaggedItem): value = models.PositiveIntegerField() @python_2_unicode_compatible class Comparison(models.Model): """ A model that tests having multiple GenericForeignKeys """ comparative = models.CharField(max_length=50) content_type1 = models.ForeignKey(ContentType, related_name="comparative1_set") object_id1 = models.PositiveIntegerField() content_type2 = models.ForeignKey(ContentType, related_name="comparative2_set") object_id2 = models.PositiveIntegerField() first_obj = generic.GenericForeignKey(ct_field="content_type1", fk_field="object_id1") other_obj = generic.GenericForeignKey(ct_field="content_type2", fk_field="object_id2") def __str__(self): return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj) @python_2_unicode_compatible class Animal(models.Model): common_name = models.CharField(max_length=150) latin_name = models.CharField(max_length=150) tags = generic.GenericRelation(TaggedItem) comparisons = generic.GenericRelation(Comparison, object_id_field="object_id1", content_type_field="content_type1") def __str__(self): return self.common_name @python_2_unicode_compatible class Vegetable(models.Model): name = models.CharField(max_length=150) is_yucky = models.BooleanField(default=True) tags = generic.GenericRelation(TaggedItem) def __str__(self): return self.name @python_2_unicode_compatible class Mineral(models.Model): name = models.CharField(max_length=150) hardness = models.PositiveSmallIntegerField() # note the lack of an explicit GenericRelation here... def __str__(self): return self.name class GeckoManager(models.Manager): def get_queryset(self): return super(GeckoManager, self).get_queryset().filter(has_tail=True) class Gecko(models.Model): has_tail = models.BooleanField() objects = GeckoManager() # To test fix for #11263 class Rock(Mineral): tags = generic.GenericRelation(TaggedItem)
unknown
codeparrot/codeparrot-clean
/** @type {import('tailwindcss').Config} */ module.exports = { content: ["./app/**/*.{js,ts,jsx,tsx}"], theme: { extend: {}, }, plugins: [], };
javascript
github
https://github.com/vercel/next.js
examples/radix-ui/tailwind.config.js
The type of a const parameter references other generic parameters. Erroneous code example: ```compile_fail,E0770 fn foo<T, const N: T>() {} // error! ``` To fix this error, use a concrete type for the const parameter: ``` fn foo<T, const N: usize>() {} ```
unknown
github
https://github.com/rust-lang/rust
compiler/rustc_error_codes/src/error_codes/E0770.md
from __future__ import absolute_import, division, unicode_literals import re from xml.sax.saxutils import escape, unescape from .tokenizer import HTMLTokenizer from .constants import tokenTypes class HTMLSanitizerMixin(object): """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video'] mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none'] svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse', 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', 'background', 'balance', 'bgcolor', 'bgproperties', 'border', 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'step', 'style', 'summary', 'suppress', 'tabindex', 'target', 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', 'xml:lang'] mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth', 'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', 'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', 'arabic-form', 'ascent', 'attributeName', 'attributeType', 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', 'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end', 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset', 'opacity', 'orient', 'origin', 'overline-position', 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan'] attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', 'xlink:href', 'xml:base'] svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill', 'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end', 'mask', 'stroke'] svg_allow_local_href = ['altGlyph', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter', 'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref', 'set', 'use'] acceptable_css_properties = ['azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width'] acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow'] acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity'] acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc', 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', 'ssh', 'sftp', 'rtsp', 'afs'] # subclasses may define their own versions of these constants allowed_elements = acceptable_elements + mathml_elements + svg_elements allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes allowed_css_properties = acceptable_css_properties allowed_css_keywords = acceptable_css_keywords allowed_svg_properties = acceptable_svg_properties allowed_protocols = acceptable_protocols # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style # attributes are parsed, and a restricted set, # specified by # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified # in ALLOWED_PROTOCOLS are allowed. # # sanitize_html('<script> do_nasty_stuff() </script>') # => &lt;script> do_nasty_stuff() &lt;/script> # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') # => <a>Click here for $100</a> def sanitize_token(self, token): # accommodate filters which use token_type differently token_type = token["type"] if token_type in list(tokenTypes.keys()): token_type = tokenTypes[token_type] if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"]): if token["name"] in self.allowed_elements: return self.allowed_token(token, token_type) else: return self.disallowed_token(token, token_type) elif token_type == tokenTypes["Comment"]: pass else: return token def allowed_token(self, token, token_type): if "data" in token: attrs = dict([(name, val) for name, val in token["data"][::-1] if name in self.allowed_attributes]) for attr in self.attr_val_is_uri: if attr not in attrs: continue val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '', unescape(attrs[attr])).lower() # remove replacement characters from unescaped characters val_unescaped = val_unescaped.replace("\ufffd", "") if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and (val_unescaped.split(':')[0] not in self.allowed_protocols)): del attrs[attr] for attr in self.svg_attr_val_allows_ref: if attr in attrs: attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(attrs[attr])) if (token["name"] in self.svg_allow_local_href and 'xlink:href' in attrs and re.search('^\s*[^#\s].*', attrs['xlink:href'])): del attrs['xlink:href'] if 'style' in attrs: attrs['style'] = self.sanitize_css(attrs['style']) token["data"] = [[name, val] for name, val in list(attrs.items())] return token def disallowed_token(self, token, token_type): if token_type == tokenTypes["EndTag"]: token["data"] = "</%s>" % token["name"] elif token["data"]: attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]]) token["data"] = "<%s%s>" % (token["name"], attrs) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" if token["type"] in list(tokenTypes.keys()): token["type"] = "Characters" else: token["type"] = tokenTypes["Characters"] del token["name"] return token def sanitize_css(self, style): # disallow urls style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): if keyword not in self.acceptable_css_keywords and \ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): break else: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin): def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=False, lowercaseAttrName=False, parser=None): # Change case matching defaults as we only output lowercase html anyway # This solution doesn't seem ideal... HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet, lowercaseElementName, lowercaseAttrName, parser=parser) def __iter__(self): for token in HTMLTokenizer.__iter__(self): token = self.sanitize_token(token) if token: yield token
unknown
codeparrot/codeparrot-clean
from xml.dom.minidom import parseString from django.contrib.auth.decorators import login_required, permission_required from django.core import mail from django.forms import fields from django.forms.forms import Form, ValidationError from django.forms.formsets import BaseFormSet, formset_factory from django.http import ( HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseRedirect, ) from django.shortcuts import render_to_response from django.template import Context, Template from django.test import Client from django.utils.decorators import method_decorator from django.utils.six.moves.urllib.parse import urlencode def get_view(request): "A simple view that expects a GET request, and returns a rendered template" t = Template('This is a test. {{ var }} is the value.', name='GET Template') c = Context({'var': request.GET.get('var', 42)}) return HttpResponse(t.render(c)) def trace_view(request): """ A simple view that expects a TRACE request and echoes its status line. TRACE requests should not have an entity; the view will return a 400 status response if it is present. """ if request.method.upper() != "TRACE": return HttpResponseNotAllowed("TRACE") elif request.body: return HttpResponseBadRequest("TRACE requests MUST NOT include an entity") else: protocol = request.META["SERVER_PROTOCOL"] t = Template( '{{ method }} {{ uri }} {{ version }}', name="TRACE Template", ) c = Context({ 'method': request.method, 'uri': request.path, 'version': protocol, }) return HttpResponse(t.render(c)) def post_view(request): """A view that expects a POST, and returns a different template depending on whether any POST data is available """ if request.method == 'POST': if request.POST: t = Template('Data received: {{ data }} is the value.', name='POST Template') c = Context({'data': request.POST['value']}) else: t = Template('Viewing POST page.', name='Empty POST Template') c = Context() else: t = Template('Viewing GET page.', name='Empty GET Template') c = Context() return HttpResponse(t.render(c)) def view_with_header(request): "A view that has a custom header" response = HttpResponse() response['X-DJANGO-TEST'] = 'Slartibartfast' return response def raw_post_view(request): """A view which expects raw XML to be posted and returns content extracted from the XML""" if request.method == 'POST': root = parseString(request.body) first_book = root.firstChild.firstChild title, author = [n.firstChild.nodeValue for n in first_book.childNodes] t = Template("{{ title }} - {{ author }}", name="Book template") c = Context({"title": title, "author": author}) else: t = Template("GET request.", name="Book GET template") c = Context() return HttpResponse(t.render(c)) def redirect_view(request): "A view that redirects all requests to the GET view" if request.GET: query = '?' + urlencode(request.GET, True) else: query = '' return HttpResponseRedirect('/get_view/' + query) def view_with_secure(request): "A view that indicates if the request was secure" response = HttpResponse() response.test_was_secure_request = request.is_secure() response.test_server_port = request.META.get('SERVER_PORT', 80) return response def double_redirect_view(request): "A view that redirects all requests to a redirection view" return HttpResponseRedirect('/permanent_redirect_view/') def bad_view(request): "A view that returns a 404 with some error content" return HttpResponseNotFound('Not found!. This page contains some MAGIC content') TestChoices = ( ('a', 'First Choice'), ('b', 'Second Choice'), ('c', 'Third Choice'), ('d', 'Fourth Choice'), ('e', 'Fifth Choice') ) class TestForm(Form): text = fields.CharField() email = fields.EmailField() value = fields.IntegerField() single = fields.ChoiceField(choices=TestChoices) multi = fields.MultipleChoiceField(choices=TestChoices) def clean(self): cleaned_data = self.cleaned_data if cleaned_data.get("text") == "Raise non-field error": raise ValidationError("Non-field error.") return cleaned_data def form_view(request): "A view that tests a simple form" if request.method == 'POST': form = TestForm(request.POST) if form.is_valid(): t = Template('Valid POST data.', name='Valid POST Template') c = Context() else: t = Template('Invalid POST data. {{ form.errors }}', name='Invalid POST Template') c = Context({'form': form}) else: form = TestForm(request.GET) t = Template('Viewing base form. {{ form }}.', name='Form GET Template') c = Context({'form': form}) return HttpResponse(t.render(c)) def form_view_with_template(request): "A view that tests a simple form" if request.method == 'POST': form = TestForm(request.POST) if form.is_valid(): message = 'POST data OK' else: message = 'POST data has errors' else: form = TestForm() message = 'GET form page' return render_to_response('form_view.html', { 'form': form, 'message': message } ) class BaseTestFormSet(BaseFormSet): def clean(self): """Checks that no two email addresses are the same.""" if any(self.errors): # Don't bother validating the formset unless each form is valid return emails = [] for i in range(0, self.total_form_count()): form = self.forms[i] email = form.cleaned_data['email'] if email in emails: raise ValidationError( "Forms in a set must have distinct email addresses." ) emails.append(email) TestFormSet = formset_factory(TestForm, BaseTestFormSet) def formset_view(request): "A view that tests a simple formset" if request.method == 'POST': formset = TestFormSet(request.POST) if formset.is_valid(): t = Template('Valid POST data.', name='Valid POST Template') c = Context() else: t = Template('Invalid POST data. {{ my_formset.errors }}', name='Invalid POST Template') c = Context({'my_formset': formset}) else: formset = TestForm(request.GET) t = Template('Viewing base formset. {{ my_formset }}.', name='Formset GET Template') c = Context({'my_formset': formset}) return HttpResponse(t.render(c)) def login_protected_view(request): "A simple view that is login protected." t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) login_protected_view = login_required(login_protected_view) def login_protected_view_changed_redirect(request): "A simple view that is login protected with a custom redirect field set" t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) login_protected_view_changed_redirect = ( login_required(redirect_field_name="redirect_to")(login_protected_view_changed_redirect) ) def _permission_protected_view(request): "A simple view that is permission protected." t = Template('This is a permission protected test. ' 'Username is {{ user.username }}. ' 'Permissions are {{ user.get_all_permissions }}.', name='Permissions Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) permission_protected_view = permission_required('permission_not_granted')(_permission_protected_view) permission_protected_view_exception = ( permission_required('permission_not_granted', raise_exception=True)(_permission_protected_view) ) class _ViewManager(object): @method_decorator(login_required) def login_protected_view(self, request): t = Template('This is a login protected test using a method. ' 'Username is {{ user.username }}.', name='Login Method Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) @method_decorator(permission_required('permission_not_granted')) def permission_protected_view(self, request): t = Template('This is a permission protected test using a method. ' 'Username is {{ user.username }}. ' 'Permissions are {{ user.get_all_permissions }}.', name='Permissions Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) _view_manager = _ViewManager() login_protected_method_view = _view_manager.login_protected_view permission_protected_method_view = _view_manager.permission_protected_view def session_view(request): "A view that modifies the session" request.session['tobacconist'] = 'hovercraft' t = Template('This is a view that modifies the session.', name='Session Modifying View Template') c = Context() return HttpResponse(t.render(c)) def broken_view(request): """A view which just raises an exception, simulating a broken view.""" raise KeyError("Oops! Looks like you wrote some bad code.") def mail_sending_view(request): mail.EmailMessage( "Test message", "This is a test email", "from@example.com", ['first@example.com', 'second@example.com']).send() return HttpResponse("Mail sent") def mass_mail_sending_view(request): m1 = mail.EmailMessage( 'First Test message', 'This is the first test email', 'from@example.com', ['first@example.com', 'second@example.com']) m2 = mail.EmailMessage( 'Second Test message', 'This is the second test email', 'from@example.com', ['second@example.com', 'third@example.com']) c = mail.get_connection() c.send_messages([m1, m2]) return HttpResponse("Mail sent") def nesting_exception_view(request): """ A view that uses a nested client to call another view and then raises an exception. """ client = Client() client.get('/get_view/') raise Exception('exception message') def django_project_redirect(request): return HttpResponseRedirect('https://www.djangoproject.com/')
unknown
codeparrot/codeparrot-clean
from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import FieldDoesNotExist from django.db.models.sql.expressions import SQLEvaluator from django.db.models.sql.where import Constraint, WhereNode from django.contrib.gis.db.models.fields import GeometryField class GeoConstraint(Constraint): """ This subclass overrides `process` to better handle geographic SQL construction. """ def __init__(self, init_constraint): self.alias = init_constraint.alias self.col = init_constraint.col self.field = init_constraint.field def process(self, lookup_type, value, connection): if isinstance(value, SQLEvaluator): # Make sure the F Expression destination field exists, and # set an `srid` attribute with the same as that of the # destination. geo_fld = GeoWhereNode._check_geo_field(value.opts, value.expression.name) if not geo_fld: raise ValueError('No geographic field found in expression.') value.srid = geo_fld.srid db_type = self.field.db_type(connection=connection) params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection) return (self.alias, self.col, db_type), params class GeoWhereNode(WhereNode): """ Used to represent the SQL where-clause for spatial databases -- these are tied to the GeoQuery class that created it. """ def _prepare_data(self, data): if isinstance(data, (list, tuple)): obj, lookup_type, value = data if ( isinstance(obj, Constraint) and isinstance(obj.field, GeometryField) ): data = (GeoConstraint(obj), lookup_type, value) return super(GeoWhereNode, self)._prepare_data(data) def make_atom(self, child, qn, connection): lvalue, lookup_type, value_annot, params_or_value = child if isinstance(lvalue, GeoConstraint): data, params = lvalue.process(lookup_type, params_or_value, connection) spatial_sql, spatial_params = connection.ops.spatial_lookup_sql( data, lookup_type, params_or_value, lvalue.field, qn) return spatial_sql, spatial_params + params else: return super(GeoWhereNode, self).make_atom(child, qn, connection) @classmethod def _check_geo_field(cls, opts, lookup): """ Utility for checking the given lookup with the given model options. The lookup is a string either specifying the geographic field, e.g. 'point, 'the_geom', or a related lookup on a geographic field like 'address__point'. If a GeometryField exists according to the given lookup on the model options, it will be returned. Otherwise returns None. """ # This takes into account the situation where the lookup is a # lookup to a related geographic field, e.g., 'address__point'. field_list = lookup.split(LOOKUP_SEP) # Reversing so list operates like a queue of related lookups, # and popping the top lookup. field_list.reverse() fld_name = field_list.pop() try: geo_fld = opts.get_field(fld_name) # If the field list is still around, then it means that the # lookup was for a geometry field across a relationship -- # thus we keep on getting the related model options and the # model field associated with the next field in the list # until there's no more left. while len(field_list): opts = geo_fld.rel.to._meta geo_fld = opts.get_field(field_list.pop()) except (FieldDoesNotExist, AttributeError): return False # Finally, make sure we got a Geographic field and return. if isinstance(geo_fld, GeometryField): return geo_fld else: return False
unknown
codeparrot/codeparrot-clean
"""Redis cache backend.""" import pickle import random import re import django from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache from django.utils.functional import cached_property from django.utils.module_loading import import_string class RedisSerializer: def __init__(self, protocol=None): self.protocol = pickle.HIGHEST_PROTOCOL if protocol is None else protocol def dumps(self, obj): # For better incr() and decr() atomicity, don't pickle integers. # Using type() rather than isinstance() matches only integers and not # subclasses like bool. if type(obj) is int: return obj return pickle.dumps(obj, self.protocol) def loads(self, data): try: return int(data) except ValueError: return pickle.loads(data) class RedisCacheClient: def __init__( self, servers, serializer=None, pool_class=None, parser_class=None, **options, ): import redis self._lib = redis self._servers = servers self._pools = {} self._client = self._lib.Redis if isinstance(pool_class, str): pool_class = import_string(pool_class) self._pool_class = pool_class or self._lib.ConnectionPool if isinstance(serializer, str): serializer = import_string(serializer) if callable(serializer): serializer = serializer() self._serializer = serializer or RedisSerializer() if isinstance(parser_class, str): parser_class = import_string(parser_class) parser_class = parser_class or self._lib.connection.DefaultParser version = django.get_version() if hasattr(self._lib, "DriverInfo"): driver_info = self._lib.DriverInfo().add_upstream_driver("django", version) driver_info_options = {"driver_info": driver_info} else: # DriverInfo is not available in this redis-py version. driver_info_options = {"lib_name": f"redis-py(django_v{version})"} self._pool_options = { "parser_class": parser_class, **driver_info_options, **options, } def _get_connection_pool_index(self, write): # Write to the first server. Read from other servers if there are more, # otherwise read from the first server. if write or len(self._servers) == 1: return 0 return random.randint(1, len(self._servers) - 1) def _get_connection_pool(self, write): index = self._get_connection_pool_index(write) if index not in self._pools: self._pools[index] = self._pool_class.from_url( self._servers[index], **self._pool_options, ) return self._pools[index] def get_client(self, key=None, *, write=False): # key is used so that the method signature remains the same and custom # cache client can be implemented which might require the key to select # the server, e.g. sharding. pool = self._get_connection_pool(write) return self._client(connection_pool=pool) def add(self, key, value, timeout): client = self.get_client(key, write=True) value = self._serializer.dumps(value) if timeout == 0: if ret := bool(client.set(key, value, nx=True)): client.delete(key) return ret else: return bool(client.set(key, value, ex=timeout, nx=True)) def get(self, key, default): client = self.get_client(key) value = client.get(key) return default if value is None else self._serializer.loads(value) def set(self, key, value, timeout): client = self.get_client(key, write=True) value = self._serializer.dumps(value) if timeout == 0: client.delete(key) else: client.set(key, value, ex=timeout) def touch(self, key, timeout): client = self.get_client(key, write=True) if timeout is None: return bool(client.persist(key)) else: return bool(client.expire(key, timeout)) def delete(self, key): client = self.get_client(key, write=True) return bool(client.delete(key)) def get_many(self, keys): client = self.get_client(None) ret = client.mget(keys) return { k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None } def has_key(self, key): client = self.get_client(key) return bool(client.exists(key)) def incr(self, key, delta): client = self.get_client(key, write=True) if not client.exists(key): raise ValueError("Key '%s' not found." % key) return client.incr(key, delta) def set_many(self, data, timeout): client = self.get_client(None, write=True) pipeline = client.pipeline() pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()}) if timeout is not None: # Setting timeout for each key as redis does not support timeout # with mset(). for key in data: pipeline.expire(key, timeout) pipeline.execute() def delete_many(self, keys): client = self.get_client(None, write=True) client.delete(*keys) def clear(self): client = self.get_client(None, write=True) return bool(client.flushdb()) class RedisCache(BaseCache): def __init__(self, server, params): super().__init__(params) if isinstance(server, str): self._servers = re.split("[;,]", server) else: self._servers = server self._class = RedisCacheClient self._options = params.get("OPTIONS", {}) @cached_property def _cache(self): return self._class(self._servers, **self._options) def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): if timeout == DEFAULT_TIMEOUT: timeout = self.default_timeout # The key will be made persistent if None used as a timeout. # Non-positive values will cause the key to be deleted. return None if timeout is None else max(0, int(timeout)) def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.add(key, value, self.get_backend_timeout(timeout)) def get(self, key, default=None, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.get(key, default) def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): key = self.make_and_validate_key(key, version=version) self._cache.set(key, value, self.get_backend_timeout(timeout)) def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.touch(key, self.get_backend_timeout(timeout)) def delete(self, key, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.delete(key) def get_many(self, keys, version=None): key_map = { self.make_and_validate_key(key, version=version): key for key in keys } ret = self._cache.get_many(key_map.keys()) return {key_map[k]: v for k, v in ret.items()} def has_key(self, key, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.has_key(key) def incr(self, key, delta=1, version=None): key = self.make_and_validate_key(key, version=version) return self._cache.incr(key, delta) def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): if not data: return [] safe_data = {} for key, value in data.items(): key = self.make_and_validate_key(key, version=version) safe_data[key] = value self._cache.set_many(safe_data, self.get_backend_timeout(timeout)) return [] def delete_many(self, keys, version=None): if not keys: return safe_keys = [self.make_and_validate_key(key, version=version) for key in keys] self._cache.delete_many(safe_keys) def clear(self): return self._cache.clear()
python
github
https://github.com/django/django
django/core/cache/backends/redis.py
lockfileVersion: '9.0' settings: autoInstallPeers: true excludeLinksFromLockfile: false importers: .: dependencies: '@angular/common': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@angular/compiler': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@angular/core': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@angular/localize': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@angular/platform-browser': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@angular/router': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel rxjs: specifier: ^7.0.0 version: 7.8.2 tslib: specifier: ^2.3.0 version: 2.8.1 zone.js: specifier: 0.16.0 version: 0.16.0 devDependencies: '@angular-devkit/build-angular': specifier: 21.1.0-rc.0 version: 21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(@angular/compiler@in-existing-linked-by-bazel)(@angular/core@in-existing-linked-by-bazel)(@angular/localize@in-existing-linked-by-bazel)(@angular/platform-browser@in-existing-linked-by-bazel)(@types/node@20.19.33)(jiti@2.6.1)(typescript@5.9.3) '@angular/cli': specifier: 21.1.0-rc.0 version: 21.1.0-rc.0(@types/node@20.19.33)(hono@4.11.9) '@angular/compiler-cli': specifier: link:./in-existing-linked-by-bazel version: link:in-existing-linked-by-bazel '@types/node': specifier: ^20.14.8 version: 20.19.33 typescript: specifier: 5.9.3 version: 5.9.3 packages: '@algolia/abtesting@1.12.2': resolution: {integrity: sha512-oWknd6wpfNrmRcH0vzed3UPX0i17o4kYLM5OMITyMVM2xLgaRbIafoxL0e8mcrNNb0iORCJA0evnNDKRYth5WQ==} engines: {node: '>= 14.0.0'} '@algolia/client-abtesting@5.46.2': resolution: {integrity: sha512-oRSUHbylGIuxrlzdPA8FPJuwrLLRavOhAmFGgdAvMcX47XsyM+IOGa9tc7/K5SPvBqn4nhppOCEz7BrzOPWc4A==} engines: {node: '>= 14.0.0'} '@algolia/client-analytics@5.46.2': resolution: {integrity: sha512-EPBN2Oruw0maWOF4OgGPfioTvd+gmiNwx0HmD9IgmlS+l75DatcBkKOPNJN+0z3wBQWUO5oq602ATxIfmTQ8bA==} engines: {node: '>= 14.0.0'} '@algolia/client-common@5.46.2': resolution: {integrity: sha512-Hj8gswSJNKZ0oyd0wWissqyasm+wTz1oIsv5ZmLarzOZAp3vFEda8bpDQ8PUhO+DfkbiLyVnAxsPe4cGzWtqkg==} engines: {node: '>= 14.0.0'} '@algolia/client-insights@5.46.2': resolution: {integrity: sha512-6dBZko2jt8FmQcHCbmNLB0kCV079Mx/DJcySTL3wirgDBUH7xhY1pOuUTLMiGkqM5D8moVZTvTdRKZUJRkrwBA==} engines: {node: '>= 14.0.0'} '@algolia/client-personalization@5.46.2': resolution: {integrity: sha512-1waE2Uqh/PHNeDXGn/PM/WrmYOBiUGSVxAWqiJIj73jqPqvfzZgzdakHscIVaDl6Cp+j5dwjsZ5LCgaUr6DtmA==} engines: {node: '>= 14.0.0'} '@algolia/client-query-suggestions@5.46.2': resolution: {integrity: sha512-EgOzTZkyDcNL6DV0V/24+oBJ+hKo0wNgyrOX/mePBM9bc9huHxIY2352sXmoZ648JXXY2x//V1kropF/Spx83w==} engines: {node: '>= 14.0.0'} '@algolia/client-search@5.46.2': resolution: {integrity: sha512-ZsOJqu4HOG5BlvIFnMU0YKjQ9ZI6r3C31dg2jk5kMWPSdhJpYL9xa5hEe7aieE+707dXeMI4ej3diy6mXdZpgA==} engines: {node: '>= 14.0.0'} '@algolia/ingestion@1.46.2': resolution: {integrity: sha512-1Uw2OslTWiOFDtt83y0bGiErJYy5MizadV0nHnOoHFWMoDqWW0kQoMFI65pXqRSkVvit5zjXSLik2xMiyQJDWQ==} engines: {node: '>= 14.0.0'} '@algolia/monitoring@1.46.2': resolution: {integrity: sha512-xk9f+DPtNcddWN6E7n1hyNNsATBCHIqAvVGG2EAGHJc4AFYL18uM/kMTiOKXE/LKDPyy1JhIerrh9oYb7RBrgw==} engines: {node: '>= 14.0.0'} '@algolia/recommend@5.46.2': resolution: {integrity: sha512-NApbTPj9LxGzNw4dYnZmj2BoXiAc8NmbbH6qBNzQgXklGklt/xldTvu+FACN6ltFsTzoNU6j2mWNlHQTKGC5+Q==} engines: {node: '>= 14.0.0'} '@algolia/requester-browser-xhr@5.46.2': resolution: {integrity: sha512-ekotpCwpSp033DIIrsTpYlGUCF6momkgupRV/FA3m62SreTSZUKjgK6VTNyG7TtYfq9YFm/pnh65bATP/ZWJEg==} engines: {node: '>= 14.0.0'} '@algolia/requester-fetch@5.46.2': resolution: {integrity: sha512-gKE+ZFi/6y7saTr34wS0SqYFDcjHW4Wminv8PDZEi0/mE99+hSrbKgJWxo2ztb5eqGirQTgIh1AMVacGGWM1iw==} engines: {node: '>= 14.0.0'} '@algolia/requester-node-http@5.46.2': resolution: {integrity: sha512-ciPihkletp7ttweJ8Zt+GukSVLp2ANJHU+9ttiSxsJZThXc4Y2yJ8HGVWesW5jN1zrsZsezN71KrMx/iZsOYpg==} engines: {node: '>= 14.0.0'} '@ampproject/remapping@2.3.0': resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} engines: {node: '>=6.0.0'} '@angular-devkit/architect@0.2101.0-rc.0': resolution: {integrity: sha512-QohNOLzTorQejqb4Kr5GFB9gCiaTuarmTza53g8oYjSMzB/zLDDN6O5e7SW6lp+wdfIla2mnUWyVBJSTNOa+Hg==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} hasBin: true '@angular-devkit/build-angular@21.1.0-rc.0': resolution: {integrity: sha512-NjAPiPJaCB7jXGbX2Js+HMtebF9z24HMBkLZYTa/Moypfqpx0HMMC+Gn/1DrLw6xT39uET1JQRlUFtBkXFsRYw==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} peerDependencies: '@angular/compiler-cli': ^21.0.0 || ^21.1.0-next.0 '@angular/core': ^21.0.0 || ^21.1.0-next.0 '@angular/localize': ^21.0.0 || ^21.1.0-next.0 '@angular/platform-browser': ^21.0.0 || ^21.1.0-next.0 '@angular/platform-server': ^21.0.0 || ^21.1.0-next.0 '@angular/service-worker': ^21.0.0 || ^21.1.0-next.0 '@angular/ssr': ^21.1.0-rc.0 '@web/test-runner': ^0.20.0 browser-sync: ^3.0.2 jest: ^30.2.0 jest-environment-jsdom: ^30.2.0 karma: ^6.3.0 ng-packagr: ^21.0.0 || ^21.1.0-next.0 protractor: ^7.0.0 tailwindcss: ^2.0.0 || ^3.0.0 || ^4.0.0 typescript: '>=5.9 <6.0' peerDependenciesMeta: '@angular/core': optional: true '@angular/localize': optional: true '@angular/platform-browser': optional: true '@angular/platform-server': optional: true '@angular/service-worker': optional: true '@angular/ssr': optional: true '@web/test-runner': optional: true browser-sync: optional: true jest: optional: true jest-environment-jsdom: optional: true karma: optional: true ng-packagr: optional: true protractor: optional: true tailwindcss: optional: true '@angular-devkit/build-webpack@0.2101.0-rc.0': resolution: {integrity: sha512-PkP9v/3PBpNA6RYY+79pG4c1BlalWKjC62Ccbfl1zPNGLKZ0Ie7LTsygACP3xRMkKd3dWtCsp7gGPTPlAmJU+g==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} peerDependencies: webpack: ^5.30.0 webpack-dev-server: ^5.0.2 '@angular-devkit/core@21.1.0-rc.0': resolution: {integrity: sha512-BCMware2kmXEnmWJE8rCcX6p3LdqkGGR0GoEMafCuayM3EqOdJkBsQb8EptKypuZ7y7UREKjfBGs9Q8+WPFcng==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} peerDependencies: chokidar: ^5.0.0 peerDependenciesMeta: chokidar: optional: true '@angular-devkit/schematics@21.1.0-rc.0': resolution: {integrity: sha512-TcRuOpJzOAm8Z5YRNJS4qMxdjuYEXIrmpiwH3qyE4fhWKGOpZoDRgkDGEL5D0glb+nuD4Hwa167A2FBOCVe8zQ==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} '@angular/build@21.1.0-rc.0': resolution: {integrity: sha512-RAK4QHzWlKxHgSfcauwT9WC9aRYCvLiTb3rGwLWk9a/7uWuclqC6Fpw6m8U1t8znF0uXLoxM85RVYxm1fm/rQw==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} peerDependencies: '@angular/compiler': ^21.0.0 || ^21.1.0-next.0 '@angular/compiler-cli': ^21.0.0 || ^21.1.0-next.0 '@angular/core': ^21.0.0 || ^21.1.0-next.0 '@angular/localize': ^21.0.0 || ^21.1.0-next.0 '@angular/platform-browser': ^21.0.0 || ^21.1.0-next.0 '@angular/platform-server': ^21.0.0 || ^21.1.0-next.0 '@angular/service-worker': ^21.0.0 || ^21.1.0-next.0 '@angular/ssr': ^21.1.0-rc.0 karma: ^6.4.0 less: ^4.2.0 ng-packagr: ^21.0.0 || ^21.1.0-next.0 postcss: ^8.4.0 tailwindcss: ^2.0.0 || ^3.0.0 || ^4.0.0 tslib: ^2.3.0 typescript: '>=5.9 <6.0' vitest: ^4.0.8 peerDependenciesMeta: '@angular/core': optional: true '@angular/localize': optional: true '@angular/platform-browser': optional: true '@angular/platform-server': optional: true '@angular/service-worker': optional: true '@angular/ssr': optional: true karma: optional: true less: optional: true ng-packagr: optional: true postcss: optional: true tailwindcss: optional: true vitest: optional: true '@angular/cli@21.1.0-rc.0': resolution: {integrity: sha512-Gqc4zXxnV0TXEurqyBvlLnLMx+bPw3Kpp7a4xBVT6C5ZRzR+KbA4Rpwuqt5D6eYt8byMVLeqARhSFtoRKvH0qQ==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} hasBin: true '@babel/code-frame@7.29.0': resolution: {integrity: sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==} engines: {node: '>=6.9.0'} '@babel/compat-data@7.29.0': resolution: {integrity: sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==} engines: {node: '>=6.9.0'} '@babel/core@7.28.5': resolution: {integrity: sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==} engines: {node: '>=6.9.0'} '@babel/generator@7.28.5': resolution: {integrity: sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==} engines: {node: '>=6.9.0'} '@babel/generator@7.29.1': resolution: {integrity: sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==} engines: {node: '>=6.9.0'} '@babel/helper-annotate-as-pure@7.27.3': resolution: {integrity: sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==} engines: {node: '>=6.9.0'} '@babel/helper-compilation-targets@7.28.6': resolution: {integrity: sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==} engines: {node: '>=6.9.0'} '@babel/helper-create-class-features-plugin@7.28.6': resolution: {integrity: sha512-dTOdvsjnG3xNT9Y0AUg1wAl38y+4Rl4sf9caSQZOXdNqVn+H+HbbJ4IyyHaIqNR6SW9oJpA/RuRjsjCw2IdIow==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/helper-create-regexp-features-plugin@7.28.5': resolution: {integrity: sha512-N1EhvLtHzOvj7QQOUCCS3NrPJP8c5W6ZXCHDn7Yialuy1iu4r5EmIYkXlKNqT99Ciw+W0mDqWoR6HWMZlFP3hw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/helper-define-polyfill-provider@0.6.6': resolution: {integrity: sha512-mOAsxeeKkUKayvZR3HeTYD/fICpCPLJrU5ZjelT/PA6WHtNDBOE436YiaEUvHN454bRM3CebhDsIpieCc4texA==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 '@babel/helper-globals@7.28.0': resolution: {integrity: sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==} engines: {node: '>=6.9.0'} '@babel/helper-member-expression-to-functions@7.28.5': resolution: {integrity: sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==} engines: {node: '>=6.9.0'} '@babel/helper-module-imports@7.28.6': resolution: {integrity: sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==} engines: {node: '>=6.9.0'} '@babel/helper-module-transforms@7.28.6': resolution: {integrity: sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/helper-optimise-call-expression@7.27.1': resolution: {integrity: sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==} engines: {node: '>=6.9.0'} '@babel/helper-plugin-utils@7.28.6': resolution: {integrity: sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==} engines: {node: '>=6.9.0'} '@babel/helper-remap-async-to-generator@7.27.1': resolution: {integrity: sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/helper-replace-supers@7.28.6': resolution: {integrity: sha512-mq8e+laIk94/yFec3DxSjCRD2Z0TAjhVbEJY3UQrlwVo15Lmt7C2wAUbK4bjnTs4APkwsYLTahXRraQXhb1WCg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/helper-skip-transparent-expression-wrappers@7.27.1': resolution: {integrity: sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==} engines: {node: '>=6.9.0'} '@babel/helper-split-export-declaration@7.24.7': resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} engines: {node: '>=6.9.0'} '@babel/helper-string-parser@7.27.1': resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} engines: {node: '>=6.9.0'} '@babel/helper-validator-identifier@7.28.5': resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} engines: {node: '>=6.9.0'} '@babel/helper-validator-option@7.27.1': resolution: {integrity: sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==} engines: {node: '>=6.9.0'} '@babel/helper-wrap-function@7.28.6': resolution: {integrity: sha512-z+PwLziMNBeSQJonizz2AGnndLsP2DeGHIxDAn+wdHOGuo4Fo1x1HBPPXeE9TAOPHNNWQKCSlA2VZyYyyibDnQ==} engines: {node: '>=6.9.0'} '@babel/helpers@7.28.6': resolution: {integrity: sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==} engines: {node: '>=6.9.0'} '@babel/parser@7.29.0': resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} engines: {node: '>=6.0.0'} hasBin: true '@babel/plugin-bugfix-firefox-class-in-computed-class-key@7.28.5': resolution: {integrity: sha512-87GDMS3tsmMSi/3bWOte1UblL+YUTFMV8SZPZ2eSEL17s74Cw/l63rR6NmGVKMYW2GYi85nE+/d6Hw5N0bEk2Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-bugfix-safari-class-field-initializer-scope@7.27.1': resolution: {integrity: sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.27.1': resolution: {integrity: sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.27.1': resolution: {integrity: sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.13.0 '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.28.6': resolution: {integrity: sha512-a0aBScVTlNaiUe35UtfxAN7A/tehvvG4/ByO6+46VPKTRSlfnAFsgKy0FUh+qAkQrDTmhDkT+IBOKlOoMUxQ0g==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2': resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-syntax-import-assertions@7.28.6': resolution: {integrity: sha512-pSJUpFHdx9z5nqTSirOCMtYVP2wFgoWhP0p3g8ONK/4IHhLIBd0B9NYqAvIUAhq+OkhO4VM1tENCt0cjlsNShw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-syntax-import-attributes@7.28.6': resolution: {integrity: sha512-jiLC0ma9XkQT3TKJ9uYvlakm66Pamywo+qwL+oL8HJOvc6TWdZXVfhqJr8CCzbSGUAbDOzlGHJC1U+vRfLQDvw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-syntax-unicode-sets-regex@7.18.6': resolution: {integrity: sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-transform-arrow-functions@7.27.1': resolution: {integrity: sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-async-generator-functions@7.28.0': resolution: {integrity: sha512-BEOdvX4+M765icNPZeidyADIvQ1m1gmunXufXxvRESy/jNNyfovIqUyE7MVgGBjWktCoJlzvFA1To2O4ymIO3Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-async-to-generator@7.27.1': resolution: {integrity: sha512-NREkZsZVJS4xmTr8qzE5y8AfIPqsdQfRuUiLRTEzb7Qii8iFWCyDKaUV2c0rCuh4ljDZ98ALHP/PetiBV2nddA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-block-scoped-functions@7.27.1': resolution: {integrity: sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-block-scoping@7.28.6': resolution: {integrity: sha512-tt/7wOtBmwHPNMPu7ax4pdPz6shjFrmHDghvNC+FG9Qvj7D6mJcoRQIF5dy4njmxR941l6rgtvfSB2zX3VlUIw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-class-properties@7.28.6': resolution: {integrity: sha512-dY2wS3I2G7D697VHndN91TJr8/AAfXQNt5ynCTI/MpxMsSzHp+52uNivYT5wCPax3whc47DR8Ba7cmlQMg24bw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-class-static-block@7.28.6': resolution: {integrity: sha512-rfQ++ghVwTWTqQ7w8qyDxL1XGihjBss4CmTgGRCTAC9RIbhVpyp4fOeZtta0Lbf+dTNIVJer6ych2ibHwkZqsQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.12.0 '@babel/plugin-transform-classes@7.28.6': resolution: {integrity: sha512-EF5KONAqC5zAqT783iMGuM2ZtmEBy+mJMOKl2BCvPZ2lVrwvXnB6o+OBWCS+CoeCCpVRF2sA2RBKUxvT8tQT5Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-computed-properties@7.28.6': resolution: {integrity: sha512-bcc3k0ijhHbc2lEfpFHgx7eYw9KNXqOerKWfzbxEHUGKnS3sz9C4CNL9OiFN1297bDNfUiSO7DaLzbvHQQQ1BQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-destructuring@7.28.5': resolution: {integrity: sha512-Kl9Bc6D0zTUcFUvkNuQh4eGXPKKNDOJQXVyyM4ZAQPMveniJdxi8XMJwLo+xSoW3MIq81bD33lcUe9kZpl0MCw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-dotall-regex@7.28.6': resolution: {integrity: sha512-SljjowuNKB7q5Oayv4FoPzeB74g3QgLt8IVJw9ADvWy3QnUb/01aw8I4AVv8wYnPvQz2GDDZ/g3GhcNyDBI4Bg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-duplicate-keys@7.27.1': resolution: {integrity: sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-duplicate-named-capturing-groups-regex@7.29.0': resolution: {integrity: sha512-zBPcW2lFGxdiD8PUnPwJjag2J9otbcLQzvbiOzDxpYXyCuYX9agOwMPGn1prVH0a4qzhCKu24rlH4c1f7yA8rw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-transform-dynamic-import@7.27.1': resolution: {integrity: sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-explicit-resource-management@7.28.6': resolution: {integrity: sha512-Iao5Konzx2b6g7EPqTy40UZbcdXE126tTxVFr/nAIj+WItNxjKSYTEw3RC+A2/ZetmdJsgueL1KhaMCQHkLPIg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-exponentiation-operator@7.28.6': resolution: {integrity: sha512-WitabqiGjV/vJ0aPOLSFfNY1u9U3R7W36B03r5I2KoNix+a3sOhJ3pKFB3R5It9/UiK78NiO0KE9P21cMhlPkw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-export-namespace-from@7.27.1': resolution: {integrity: sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-for-of@7.27.1': resolution: {integrity: sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-function-name@7.27.1': resolution: {integrity: sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-json-strings@7.28.6': resolution: {integrity: sha512-Nr+hEN+0geQkzhbdgQVPoqr47lZbm+5fCUmO70722xJZd0Mvb59+33QLImGj6F+DkK3xgDi1YVysP8whD6FQAw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-literals@7.27.1': resolution: {integrity: sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-logical-assignment-operators@7.28.6': resolution: {integrity: sha512-+anKKair6gpi8VsM/95kmomGNMD0eLz1NQ8+Pfw5sAwWH9fGYXT50E55ZpV0pHUHWf6IUTWPM+f/7AAff+wr9A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-member-expression-literals@7.27.1': resolution: {integrity: sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-modules-amd@7.27.1': resolution: {integrity: sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-modules-commonjs@7.28.6': resolution: {integrity: sha512-jppVbf8IV9iWWwWTQIxJMAJCWBuuKx71475wHwYytrRGQ2CWiDvYlADQno3tcYpS/T2UUWFQp3nVtYfK/YBQrA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-modules-systemjs@7.29.0': resolution: {integrity: sha512-PrujnVFbOdUpw4UHiVwKvKRLMMic8+eC0CuNlxjsyZUiBjhFdPsewdXCkveh2KqBA9/waD0W1b4hXSOBQJezpQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-modules-umd@7.27.1': resolution: {integrity: sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-named-capturing-groups-regex@7.29.0': resolution: {integrity: sha512-1CZQA5KNAD6ZYQLPw7oi5ewtDNxH/2vuCh+6SmvgDfhumForvs8a1o9n0UrEoBD8HU4djO2yWngTQlXl1NDVEQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-transform-new-target@7.27.1': resolution: {integrity: sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-nullish-coalescing-operator@7.28.6': resolution: {integrity: sha512-3wKbRgmzYbw24mDJXT7N+ADXw8BC/imU9yo9c9X9NKaLF1fW+e5H1U5QjMUBe4Qo4Ox/o++IyUkl1sVCLgevKg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-numeric-separator@7.28.6': resolution: {integrity: sha512-SJR8hPynj8outz+SlStQSwvziMN4+Bq99it4tMIf5/Caq+3iOc0JtKyse8puvyXkk3eFRIA5ID/XfunGgO5i6w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-object-rest-spread@7.28.6': resolution: {integrity: sha512-5rh+JR4JBC4pGkXLAcYdLHZjXudVxWMXbB6u6+E9lRL5TrGVbHt1TjxGbZ8CkmYw9zjkB7jutzOROArsqtncEA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-object-super@7.27.1': resolution: {integrity: sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-optional-catch-binding@7.28.6': resolution: {integrity: sha512-R8ja/Pyrv0OGAvAXQhSTmWyPJPml+0TMqXlO5w+AsMEiwb2fg3WkOvob7UxFSL3OIttFSGSRFKQsOhJ/X6HQdQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-optional-chaining@7.28.6': resolution: {integrity: sha512-A4zobikRGJTsX9uqVFdafzGkqD30t26ck2LmOzAuLL8b2x6k3TIqRiT2xVvA9fNmFeTX484VpsdgmKNA0bS23w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-parameters@7.27.7': resolution: {integrity: sha512-qBkYTYCb76RRxUM6CcZA5KRu8K4SM8ajzVeUgVdMVO9NN9uI/GaVmBg/WKJJGnNokV9SY8FxNOVWGXzqzUidBg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-private-methods@7.28.6': resolution: {integrity: sha512-piiuapX9CRv7+0st8lmuUlRSmX6mBcVeNQ1b4AYzJxfCMuBfB0vBXDiGSmm03pKJw1v6cZ8KSeM+oUnM6yAExg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-private-property-in-object@7.28.6': resolution: {integrity: sha512-b97jvNSOb5+ehyQmBpmhOCiUC5oVK4PMnpRvO7+ymFBoqYjeDHIU9jnrNUuwHOiL9RpGDoKBpSViarV+BU+eVA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-property-literals@7.27.1': resolution: {integrity: sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-regenerator@7.29.0': resolution: {integrity: sha512-FijqlqMA7DmRdg/aINBSs04y8XNTYw/lr1gJ2WsmBnnaNw1iS43EPkJW+zK7z65auG3AWRFXWj+NcTQwYptUog==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-regexp-modifiers@7.28.6': resolution: {integrity: sha512-QGWAepm9qxpaIs7UM9FvUSnCGlb8Ua1RhyM4/veAxLwt3gMat/LSGrZixyuj4I6+Kn9iwvqCyPTtbdxanYoWYg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/plugin-transform-reserved-words@7.27.1': resolution: {integrity: sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-runtime@7.28.5': resolution: {integrity: sha512-20NUVgOrinudkIBzQ2bNxP08YpKprUkRTiRSd2/Z5GOdPImJGkoN4Z7IQe1T5AdyKI1i5L6RBmluqdSzvaq9/w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-shorthand-properties@7.27.1': resolution: {integrity: sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-spread@7.28.6': resolution: {integrity: sha512-9U4QObUC0FtJl05AsUcodau/RWDytrU6uKgkxu09mLR9HLDAtUMoPuuskm5huQsoktmsYpI+bGmq+iapDcriKA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-sticky-regex@7.27.1': resolution: {integrity: sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-template-literals@7.27.1': resolution: {integrity: sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-typeof-symbol@7.27.1': resolution: {integrity: sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-unicode-escapes@7.27.1': resolution: {integrity: sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-unicode-property-regex@7.28.6': resolution: {integrity: sha512-4Wlbdl/sIZjzi/8St0evF0gEZrgOswVO6aOzqxh1kDZOl9WmLrHq2HtGhnOJZmHZYKP8WZ1MDLCt5DAWwRo57A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-unicode-regex@7.27.1': resolution: {integrity: sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/plugin-transform-unicode-sets-regex@7.28.6': resolution: {integrity: sha512-/wHc/paTUmsDYN7SZkpWxogTOBNnlx7nBQYfy6JJlCT7G3mVhltk3e++N7zV0XfgGsrqBxd4rJQt9H16I21Y1Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 '@babel/preset-env@7.28.5': resolution: {integrity: sha512-S36mOoi1Sb6Fz98fBfE+UZSpYw5mJm0NUHtIKrOuNcqeFauy1J6dIvXm2KRVKobOSaGq4t/hBXdN4HGU3wL9Wg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 '@babel/preset-modules@0.1.6-no-external-plugins': resolution: {integrity: sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==} peerDependencies: '@babel/core': ^7.0.0-0 || ^8.0.0-0 <8.0.0 '@babel/runtime@7.28.4': resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==} engines: {node: '>=6.9.0'} '@babel/template@7.28.6': resolution: {integrity: sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==} engines: {node: '>=6.9.0'} '@babel/traverse@7.29.0': resolution: {integrity: sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==} engines: {node: '>=6.9.0'} '@babel/types@7.29.0': resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} engines: {node: '>=6.9.0'} '@discoveryjs/json-ext@0.6.3': resolution: {integrity: sha512-4B4OijXeVNOPZlYA2oEwWOTkzyltLao+xbotHQeqN++Rv27Y6s818+n2Qkp8q+Fxhn0t/5lA5X1Mxktud8eayQ==} engines: {node: '>=14.17.0'} '@emnapi/core@1.8.1': resolution: {integrity: sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==} '@emnapi/runtime@1.8.1': resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==} '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} '@esbuild/aix-ppc64@0.27.2': resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==} engines: {node: '>=18'} cpu: [ppc64] os: [aix] '@esbuild/android-arm64@0.27.2': resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==} engines: {node: '>=18'} cpu: [arm64] os: [android] '@esbuild/android-arm@0.27.2': resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==} engines: {node: '>=18'} cpu: [arm] os: [android] '@esbuild/android-x64@0.27.2': resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==} engines: {node: '>=18'} cpu: [x64] os: [android] '@esbuild/darwin-arm64@0.27.2': resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] '@esbuild/darwin-x64@0.27.2': resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==} engines: {node: '>=18'} cpu: [x64] os: [darwin] '@esbuild/freebsd-arm64@0.27.2': resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] '@esbuild/freebsd-x64@0.27.2': resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] '@esbuild/linux-arm64@0.27.2': resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==} engines: {node: '>=18'} cpu: [arm64] os: [linux] '@esbuild/linux-arm@0.27.2': resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==} engines: {node: '>=18'} cpu: [arm] os: [linux] '@esbuild/linux-ia32@0.27.2': resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==} engines: {node: '>=18'} cpu: [ia32] os: [linux] '@esbuild/linux-loong64@0.27.2': resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==} engines: {node: '>=18'} cpu: [loong64] os: [linux] '@esbuild/linux-mips64el@0.27.2': resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] '@esbuild/linux-ppc64@0.27.2': resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] '@esbuild/linux-riscv64@0.27.2': resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] '@esbuild/linux-s390x@0.27.2': resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==} engines: {node: '>=18'} cpu: [s390x] os: [linux] '@esbuild/linux-x64@0.27.2': resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==} engines: {node: '>=18'} cpu: [x64] os: [linux] '@esbuild/netbsd-arm64@0.27.2': resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] '@esbuild/netbsd-x64@0.27.2': resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] '@esbuild/openbsd-arm64@0.27.2': resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] '@esbuild/openbsd-x64@0.27.2': resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==} engines: {node: '>=18'} cpu: [x64] os: [openbsd] '@esbuild/openharmony-arm64@0.27.2': resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==} engines: {node: '>=18'} cpu: [arm64] os: [openharmony] '@esbuild/sunos-x64@0.27.2': resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==} engines: {node: '>=18'} cpu: [x64] os: [sunos] '@esbuild/win32-arm64@0.27.2': resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==} engines: {node: '>=18'} cpu: [arm64] os: [win32] '@esbuild/win32-ia32@0.27.2': resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==} engines: {node: '>=18'} cpu: [ia32] os: [win32] '@esbuild/win32-x64@0.27.2': resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==} engines: {node: '>=18'} cpu: [x64] os: [win32] '@hono/node-server@1.19.9': resolution: {integrity: sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==} engines: {node: '>=18.14.1'} peerDependencies: hono: ^4 '@inquirer/ansi@1.0.2': resolution: {integrity: sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==} engines: {node: '>=18'} '@inquirer/checkbox@4.3.2': resolution: {integrity: sha512-VXukHf0RR1doGe6Sm4F0Em7SWYLTHSsbGfJdS9Ja2bX5/D5uwVOEjr07cncLROdBvmnvCATYEWlHqYmXv2IlQA==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/confirm@5.1.21': resolution: {integrity: sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/core@10.3.2': resolution: {integrity: sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/editor@4.2.23': resolution: {integrity: sha512-aLSROkEwirotxZ1pBaP8tugXRFCxW94gwrQLxXfrZsKkfjOYC1aRvAZuhpJOb5cu4IBTJdsCigUlf2iCOu4ZDQ==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/expand@4.0.23': resolution: {integrity: sha512-nRzdOyFYnpeYTTR2qFwEVmIWypzdAx/sIkCMeTNTcflFOovfqUk+HcFhQQVBftAh9gmGrpFj6QcGEqrDMDOiew==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/external-editor@1.0.3': resolution: {integrity: sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/figures@1.0.15': resolution: {integrity: sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==} engines: {node: '>=18'} '@inquirer/input@4.3.1': resolution: {integrity: sha512-kN0pAM4yPrLjJ1XJBjDxyfDduXOuQHrBB8aLDMueuwUGn+vNpF7Gq7TvyVxx8u4SHlFFj4trmj+a2cbpG4Jn1g==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/number@3.0.23': resolution: {integrity: sha512-5Smv0OK7K0KUzUfYUXDXQc9jrf8OHo4ktlEayFlelCjwMXz0299Y8OrI+lj7i4gCBY15UObk76q0QtxjzFcFcg==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/password@4.0.23': resolution: {integrity: sha512-zREJHjhT5vJBMZX/IUbyI9zVtVfOLiTO66MrF/3GFZYZ7T4YILW5MSkEYHceSii/KtRk+4i3RE7E1CUXA2jHcA==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/prompts@7.10.1': resolution: {integrity: sha512-Dx/y9bCQcXLI5ooQ5KyvA4FTgeo2jYj/7plWfV5Ak5wDPKQZgudKez2ixyfz7tKXzcJciTxqLeK7R9HItwiByg==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/rawlist@4.1.11': resolution: {integrity: sha512-+LLQB8XGr3I5LZN/GuAHo+GpDJegQwuPARLChlMICNdwW7OwV2izlCSCxN6cqpL0sMXmbKbFcItJgdQq5EBXTw==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/search@3.2.2': resolution: {integrity: sha512-p2bvRfENXCZdWF/U2BXvnSI9h+tuA8iNqtUKb9UWbmLYCRQxd8WkvwWvYn+3NgYaNwdUkHytJMGG4MMLucI1kA==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/select@4.4.2': resolution: {integrity: sha512-l4xMuJo55MAe+N7Qr4rX90vypFwCajSakx59qe/tMaC1aEHWLyw68wF4o0A4SLAY4E0nd+Vt+EyskeDIqu1M6w==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@inquirer/type@3.0.10': resolution: {integrity: sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==} engines: {node: '>=18'} peerDependencies: '@types/node': '>=18' peerDependenciesMeta: '@types/node': optional: true '@isaacs/balanced-match@4.0.1': resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} engines: {node: 20 || >=22} '@isaacs/brace-expansion@5.0.1': resolution: {integrity: sha512-WMz71T1JS624nWj2n2fnYAuPovhv7EUhk69R6i9dsVyzxt5eM3bjwvgk9L+APE1TRscGysAVMANkB0jh0LQZrQ==} engines: {node: 20 || >=22} '@isaacs/fs-minipass@4.0.1': resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} engines: {node: '>=18.0.0'} '@istanbuljs/schema@0.1.3': resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} engines: {node: '>=8'} '@jridgewell/gen-mapping@0.3.13': resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} '@jridgewell/remapping@2.3.5': resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} '@jridgewell/source-map@0.3.11': resolution: {integrity: sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==} '@jridgewell/sourcemap-codec@1.5.5': resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} '@jsonjoy.com/base64@1.1.2': resolution: {integrity: sha512-q6XAnWQDIMA3+FTiOYajoYqySkO+JSat0ytXGSuRdq9uXE7o92gzuQwQM14xaCRlBLGq3v5miDGC4vkVTn54xA==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/base64@17.67.0': resolution: {integrity: sha512-5SEsJGsm15aP8TQGkDfJvz9axgPwAEm98S5DxOuYe8e1EbfajcDmgeXXzccEjh+mLnjqEKrkBdjHWS5vFNwDdw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/buffers@1.2.1': resolution: {integrity: sha512-12cdlDwX4RUM3QxmUbVJWqZ/mrK6dFQH4Zxq6+r1YXKXYBNgZXndx2qbCJwh3+WWkCSn67IjnlG3XYTvmvYtgA==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/buffers@17.67.0': resolution: {integrity: sha512-tfExRpYxBvi32vPs9ZHaTjSP4fHAfzSmcahOfNxtvGHcyJel+aibkPlGeBB+7AoC6hL7lXIE++8okecBxx7lcw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/codegen@1.0.0': resolution: {integrity: sha512-E8Oy+08cmCf0EK/NMxpaJZmOxPqM+6iSe2S4nlSBrPZOORoDJILxtbSUEDKQyTamm/BVAhIGllOBNU79/dwf0g==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/codegen@17.67.0': resolution: {integrity: sha512-idnkUplROpdBOV0HMcwhsCUS5TRUi9poagdGs70A6S4ux9+/aPuKbh8+UYRTLYQHtXvAdNfQWXDqZEx5k4Dj2Q==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-core@4.56.10': resolution: {integrity: sha512-PyAEA/3cnHhsGcdY+AmIU+ZPqTuZkDhCXQ2wkXypdLitSpd6d5Ivxhnq4wa2ETRWFVJGabYynBWxIijOswSmOw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-fsa@4.56.10': resolution: {integrity: sha512-/FVK63ysNzTPOnCCcPoPHt77TOmachdMS422txM4KhxddLdbW1fIbFMYH0AM0ow/YchCyS5gqEjKLNyv71j/5Q==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-node-builtins@4.56.10': resolution: {integrity: sha512-uUnKz8R0YJyKq5jXpZtkGV9U0pJDt8hmYcLRrPjROheIfjMXsz82kXMgAA/qNg0wrZ1Kv+hrg7azqEZx6XZCVw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-node-to-fsa@4.56.10': resolution: {integrity: sha512-oH+O6Y4lhn9NyG6aEoFwIBNKZeYy66toP5LJcDOMBgL99BKQMUf/zWJspdRhMdn/3hbzQsZ8EHHsuekbFLGUWw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-node-utils@4.56.10': resolution: {integrity: sha512-8EuPBgVI2aDPwFdaNQeNpHsyqPi3rr+85tMNG/lHvQLiVjzoZsvxA//Xd8aB567LUhy4QS03ptT+unkD/DIsNg==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-node@4.56.10': resolution: {integrity: sha512-7R4Gv3tkUdW3dXfXiOkqxkElxKNVdd8BDOWC0/dbERd0pXpPY+s2s1Mino+aTvkGrFPiY+mmVxA7zhskm4Ue4Q==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-print@4.56.10': resolution: {integrity: sha512-JW4fp5mAYepzFsSGrQ48ep8FXxpg4niFWHdF78wDrFGof7F3tKDJln72QFDEn/27M1yHd4v7sKHHVPh78aWcEw==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/fs-snapshot@4.56.10': resolution: {integrity: sha512-DkR6l5fj7+qj0+fVKm/OOXMGfDFCGXLfyHkORH3DF8hxkpDgIHbhf/DwncBMs2igu/ST7OEkexn1gIqoU6Y+9g==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/json-pack@1.21.0': resolution: {integrity: sha512-+AKG+R2cfZMShzrF2uQw34v3zbeDYUqnQ+jg7ORic3BGtfw9p/+N6RJbq/kkV8JmYZaINknaEQ2m0/f693ZPpg==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/json-pack@17.67.0': resolution: {integrity: sha512-t0ejURcGaZsn1ClbJ/3kFqSOjlryd92eQY465IYrezsXmPcfHPE/av4twRSxf6WE+TkZgLY+71vCZbiIiFKA/w==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/json-pointer@1.0.2': resolution: {integrity: sha512-Fsn6wM2zlDzY1U+v4Nc8bo3bVqgfNTGcn6dMgs6FjrEnt4ZCe60o6ByKRjOGlI2gow0aE/Q41QOigdTqkyK5fg==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/json-pointer@17.67.0': resolution: {integrity: sha512-+iqOFInH+QZGmSuaybBUNdh7yvNrXvqR+h3wjXm0N/3JK1EyyFAeGJvqnmQL61d1ARLlk/wJdFKSL+LHJ1eaUA==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/util@1.9.0': resolution: {integrity: sha512-pLuQo+VPRnN8hfPqUTLTHk126wuYdXVxE6aDmjSeV4NCAgyxWbiOIeNJVtID3h1Vzpoi9m4jXezf73I6LgabgQ==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@jsonjoy.com/util@17.67.0': resolution: {integrity: sha512-6+8xBaz1rLSohlGh68D1pdw3AwDi9xydm8QNlAFkvnavCJYSze+pxoW2VKP8p308jtlMRLs5NTHfPlZLd4w7ew==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' '@leichtgewicht/ip-codec@2.0.5': resolution: {integrity: sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==} '@listr2/prompt-adapter-inquirer@3.0.5': resolution: {integrity: sha512-WELs+hj6xcilkloBXYf9XXK8tYEnKsgLj01Xl5ONUJpKjmT5hGVUzNUS5tooUxs7pGMrw+jFD/41WpqW4V3LDA==} engines: {node: '>=20.0.0'} peerDependencies: '@inquirer/prompts': '>= 3 < 8' listr2: 9.0.5 '@lmdb/lmdb-darwin-arm64@3.4.4': resolution: {integrity: sha512-XaKL705gDWd6XVls3ATDj13ZdML/LqSIxwgnYpG8xTzH2ifArx8fMMDdvqGE/Emd+W6R90W2fveZcJ0AyS8Y0w==} cpu: [arm64] os: [darwin] '@lmdb/lmdb-darwin-x64@3.4.4': resolution: {integrity: sha512-GPHGEVcwJlkD01GmIr7B4kvbIcUDS2+kBadVEd7lU4can1RZaZQLDDBJRrrNfS2Kavvl0VLI/cMv7UASAXGrww==} cpu: [x64] os: [darwin] '@lmdb/lmdb-linux-arm64@3.4.4': resolution: {integrity: sha512-mALqr7DE42HsiwVTKpQWxacjHoJk+e9p00RWIJqTACh/hpucxp/0lK/XMh5XzWnU/TDCZLukq1+vNqnNumTP/Q==} cpu: [arm64] os: [linux] '@lmdb/lmdb-linux-arm@3.4.4': resolution: {integrity: sha512-cmev5/dZr5ACKri9f6GU6lZCXTjMhV72xujlbOhFCgFXrt4W0TxGsmY8kA1BITvH60JBKE50cSxsiulybAbrrw==} cpu: [arm] os: [linux] '@lmdb/lmdb-linux-x64@3.4.4': resolution: {integrity: sha512-QjLs8OcmCNcraAcLoZyFlo0atzBJniQLLwhtR+ymQqS5kLYpV5RqwriL87BW+ZiR9ZiGgZx3evrz5vnWPtJ1fQ==} cpu: [x64] os: [linux] '@lmdb/lmdb-win32-arm64@3.4.4': resolution: {integrity: sha512-tr/pwHDlZ33forLGAr0tI04cRmP4SgF93yHbb+2zvZiDEyln5yMHhbKDySxY66aUOkhvBvTuHq9q/3YmTj6ZHQ==} cpu: [arm64] os: [win32] '@lmdb/lmdb-win32-x64@3.4.4': resolution: {integrity: sha512-KRzfocJzB/mgoTCqnMawuLSKheHRVTqWfSmouIgYpFs6Hx4zvZSvsZKSCEb5gHmICy7qsx9l06jk3MFTtiFVAQ==} cpu: [x64] os: [win32] '@modelcontextprotocol/sdk@1.25.2': resolution: {integrity: sha512-LZFeo4F9M5qOhC/Uc1aQSrBHxMrvxett+9KLHt7OhcExtoiRN9DKgbZffMP/nxjutWDQpfMDfP3nkHI4X9ijww==} engines: {node: '>=18'} peerDependencies: '@cfworker/json-schema': ^4.1.1 zod: ^3.25 || ^4.0 peerDependenciesMeta: '@cfworker/json-schema': optional: true '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==} cpu: [arm64] os: [darwin] '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==} cpu: [x64] os: [darwin] '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==} cpu: [arm64] os: [linux] '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==} cpu: [arm] os: [linux] '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==} cpu: [x64] os: [linux] '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==} cpu: [x64] os: [win32] '@napi-rs/nice-android-arm-eabi@1.1.1': resolution: {integrity: sha512-kjirL3N6TnRPv5iuHw36wnucNqXAO46dzK9oPb0wj076R5Xm8PfUVA9nAFB5ZNMmfJQJVKACAPd/Z2KYMppthw==} engines: {node: '>= 10'} cpu: [arm] os: [android] '@napi-rs/nice-android-arm64@1.1.1': resolution: {integrity: sha512-blG0i7dXgbInN5urONoUCNf+DUEAavRffrO7fZSeoRMJc5qD+BJeNcpr54msPF6qfDD6kzs9AQJogZvT2KD5nw==} engines: {node: '>= 10'} cpu: [arm64] os: [android] '@napi-rs/nice-darwin-arm64@1.1.1': resolution: {integrity: sha512-s/E7w45NaLqTGuOjC2p96pct4jRfo61xb9bU1unM/MJ/RFkKlJyJDx7OJI/O0ll/hrfpqKopuAFDV8yo0hfT7A==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] '@napi-rs/nice-darwin-x64@1.1.1': resolution: {integrity: sha512-dGoEBnVpsdcC+oHHmW1LRK5eiyzLwdgNQq3BmZIav+9/5WTZwBYX7r5ZkQC07Nxd3KHOCkgbHSh4wPkH1N1LiQ==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] '@napi-rs/nice-freebsd-x64@1.1.1': resolution: {integrity: sha512-kHv4kEHAylMYmlNwcQcDtXjklYp4FCf0b05E+0h6nDHsZ+F0bDe04U/tXNOqrx5CmIAth4vwfkjjUmp4c4JktQ==} engines: {node: '>= 10'} cpu: [x64] os: [freebsd] '@napi-rs/nice-linux-arm-gnueabihf@1.1.1': resolution: {integrity: sha512-E1t7K0efyKXZDoZg1LzCOLxgolxV58HCkaEkEvIYQx12ht2pa8hoBo+4OB3qh7e+QiBlp1SRf+voWUZFxyhyqg==} engines: {node: '>= 10'} cpu: [arm] os: [linux] '@napi-rs/nice-linux-arm64-gnu@1.1.1': resolution: {integrity: sha512-CIKLA12DTIZlmTaaKhQP88R3Xao+gyJxNWEn04wZwC2wmRapNnxCUZkVwggInMJvtVElA+D4ZzOU5sX4jV+SmQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] libc: [glibc] '@napi-rs/nice-linux-arm64-musl@1.1.1': resolution: {integrity: sha512-+2Rzdb3nTIYZ0YJF43qf2twhqOCkiSrHx2Pg6DJaCPYhhaxbLcdlV8hCRMHghQ+EtZQWGNcS2xF4KxBhSGeutg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] libc: [musl] '@napi-rs/nice-linux-ppc64-gnu@1.1.1': resolution: {integrity: sha512-4FS8oc0GeHpwvv4tKciKkw3Y4jKsL7FRhaOeiPei0X9T4Jd619wHNe4xCLmN2EMgZoeGg+Q7GY7BsvwKpL22Tg==} engines: {node: '>= 10'} cpu: [ppc64] os: [linux] libc: [glibc] '@napi-rs/nice-linux-riscv64-gnu@1.1.1': resolution: {integrity: sha512-HU0nw9uD4FO/oGCCk409tCi5IzIZpH2agE6nN4fqpwVlCn5BOq0MS1dXGjXaG17JaAvrlpV5ZeyZwSon10XOXw==} engines: {node: '>= 10'} cpu: [riscv64] os: [linux] libc: [glibc] '@napi-rs/nice-linux-s390x-gnu@1.1.1': resolution: {integrity: sha512-2YqKJWWl24EwrX0DzCQgPLKQBxYDdBxOHot1KWEq7aY2uYeX+Uvtv4I8xFVVygJDgf6/92h9N3Y43WPx8+PAgQ==} engines: {node: '>= 10'} cpu: [s390x] os: [linux] libc: [glibc] '@napi-rs/nice-linux-x64-gnu@1.1.1': resolution: {integrity: sha512-/gaNz3R92t+dcrfCw/96pDopcmec7oCcAQ3l/M+Zxr82KT4DljD37CpgrnXV+pJC263JkW572pdbP3hP+KjcIg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] libc: [glibc] '@napi-rs/nice-linux-x64-musl@1.1.1': resolution: {integrity: sha512-xScCGnyj/oppsNPMnevsBe3pvNaoK7FGvMjT35riz9YdhB2WtTG47ZlbxtOLpjeO9SqqQ2J2igCmz6IJOD5JYw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] libc: [musl] '@napi-rs/nice-openharmony-arm64@1.1.1': resolution: {integrity: sha512-6uJPRVwVCLDeoOaNyeiW0gp2kFIM4r7PL2MczdZQHkFi9gVlgm+Vn+V6nTWRcu856mJ2WjYJiumEajfSm7arPQ==} engines: {node: '>= 10'} cpu: [arm64] os: [openharmony] '@napi-rs/nice-win32-arm64-msvc@1.1.1': resolution: {integrity: sha512-uoTb4eAvM5B2aj/z8j+Nv8OttPf2m+HVx3UjA5jcFxASvNhQriyCQF1OB1lHL43ZhW+VwZlgvjmP5qF3+59atA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] '@napi-rs/nice-win32-ia32-msvc@1.1.1': resolution: {integrity: sha512-CNQqlQT9MwuCsg1Vd/oKXiuH+TcsSPJmlAFc5frFyX/KkOh0UpBLEj7aoY656d5UKZQMQFP7vJNa1DNUNORvug==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] '@napi-rs/nice-win32-x64-msvc@1.1.1': resolution: {integrity: sha512-vB+4G/jBQCAh0jelMTY3+kgFy00Hlx2f2/1zjMoH821IbplbWZOkLiTYXQkygNTzQJTq5cvwBDgn2ppHD+bglQ==} engines: {node: '>= 10'} cpu: [x64] os: [win32] '@napi-rs/nice@1.1.1': resolution: {integrity: sha512-xJIPs+bYuc9ASBl+cvGsKbGrJmS6fAKaSZCnT0lhahT5rhA2VVy9/EcIgd2JhtEuFOJNx7UHNn/qiTPTY4nrQw==} engines: {node: '>= 10'} '@napi-rs/wasm-runtime@1.1.1': resolution: {integrity: sha512-p64ah1M1ld8xjWv3qbvFwHiFVWrq1yFvV4f7w+mzaqiR4IlSgkqhcRdHwsGgomwzBH51sRY4NEowLxnaBjcW/A==} '@ngtools/webpack@21.1.0-rc.0': resolution: {integrity: sha512-ZFLz0m03wGSjQsp1U0rwnTEzW9Nv8fFfk4e1Yu+28VjlVcMxRujAtJ0jEXwQ4QsrSU9er7ZFacvPArAOzfWIXA==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} peerDependencies: '@angular/compiler-cli': ^21.0.0 || ^21.1.0-next.0 typescript: '>=5.9 <6.0' webpack: ^5.54.0 '@npmcli/agent@4.0.0': resolution: {integrity: sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/fs@5.0.0': resolution: {integrity: sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/git@7.0.1': resolution: {integrity: sha512-+XTFxK2jJF/EJJ5SoAzXk3qwIDfvFc5/g+bD274LZ7uY7LE8sTfG6Z8rOanPl2ZEvZWqNvmEdtXC25cE54VcoA==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/installed-package-contents@4.0.0': resolution: {integrity: sha512-yNyAdkBxB72gtZ4GrwXCM0ZUedo9nIbOMKfGjt6Cu6DXf0p8y1PViZAKDC8q8kv/fufx0WTjRBdSlyrvnP7hmA==} engines: {node: ^20.17.0 || >=22.9.0} hasBin: true '@npmcli/node-gyp@5.0.0': resolution: {integrity: sha512-uuG5HZFXLfyFKqg8QypsmgLQW7smiRjVc45bqD/ofZZcR/uxEjgQU8qDPv0s9TEeMUiAAU/GC5bR6++UdTirIQ==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/package-json@7.0.4': resolution: {integrity: sha512-0wInJG3j/K40OJt/33ax47WfWMzZTm6OQxB9cDhTt5huCP2a9g2GnlsxmfN+PulItNPIpPrZ+kfwwUil7eHcZQ==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/promise-spawn@9.0.1': resolution: {integrity: sha512-OLUaoqBuyxeTqUvjA3FZFiXUfYC1alp3Sa99gW3EUDz3tZ3CbXDdcZ7qWKBzicrJleIgucoWamWH1saAmH/l2Q==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/redact@4.0.0': resolution: {integrity: sha512-gOBg5YHMfZy+TfHArfVogwgfBeQnKbbGo3pSUyK/gSI0AVu+pEiDVcKlQb0D8Mg1LNRZILZ6XG8I5dJ4KuAd9Q==} engines: {node: ^20.17.0 || >=22.9.0} '@npmcli/run-script@10.0.3': resolution: {integrity: sha512-ER2N6itRkzWbbtVmZ9WKaWxVlKlOeBFF1/7xx+KA5J1xKa4JjUwBdb6tDpk0v1qA+d+VDwHI9qmLcXSWcmi+Rw==} engines: {node: ^20.17.0 || >=22.9.0} '@oxc-project/types@0.106.0': resolution: {integrity: sha512-QdsH3rZq480VnOHSHgPYOhjL8O8LBdcnSjM408BpPCCUc0JYYZPG9Gafl9i3OcGk/7137o+gweb4cCv3WAUykg==} '@parcel/watcher-android-arm64@2.5.6': resolution: {integrity: sha512-YQxSS34tPF/6ZG7r/Ih9xy+kP/WwediEUsqmtf0cuCV5TPPKw/PQHRhueUo6JdeFJaqV3pyjm0GdYjZotbRt/A==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [android] '@parcel/watcher-darwin-arm64@2.5.6': resolution: {integrity: sha512-Z2ZdrnwyXvvvdtRHLmM4knydIdU9adO3D4n/0cVipF3rRiwP+3/sfzpAwA/qKFL6i1ModaabkU7IbpeMBgiVEA==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [darwin] '@parcel/watcher-darwin-x64@2.5.6': resolution: {integrity: sha512-HgvOf3W9dhithcwOWX9uDZyn1lW9R+7tPZ4sug+NGrGIo4Rk1hAXLEbcH1TQSqxts0NYXXlOWqVpvS1SFS4fRg==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [darwin] '@parcel/watcher-freebsd-x64@2.5.6': resolution: {integrity: sha512-vJVi8yd/qzJxEKHkeemh7w3YAn6RJCtYlE4HPMoVnCpIXEzSrxErBW5SJBgKLbXU3WdIpkjBTeUNtyBVn8TRng==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [freebsd] '@parcel/watcher-linux-arm-glibc@2.5.6': resolution: {integrity: sha512-9JiYfB6h6BgV50CCfasfLf/uvOcJskMSwcdH1PHH9rvS1IrNy8zad6IUVPVUfmXr+u+Km9IxcfMLzgdOudz9EQ==} engines: {node: '>= 10.0.0'} cpu: [arm] os: [linux] libc: [glibc] '@parcel/watcher-linux-arm-musl@2.5.6': resolution: {integrity: sha512-Ve3gUCG57nuUUSyjBq/MAM0CzArtuIOxsBdQ+ftz6ho8n7s1i9E1Nmk/xmP323r2YL0SONs1EuwqBp2u1k5fxg==} engines: {node: '>= 10.0.0'} cpu: [arm] os: [linux] libc: [musl] '@parcel/watcher-linux-arm64-glibc@2.5.6': resolution: {integrity: sha512-f2g/DT3NhGPdBmMWYoxixqYr3v/UXcmLOYy16Bx0TM20Tchduwr4EaCbmxh1321TABqPGDpS8D/ggOTaljijOA==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [linux] libc: [glibc] '@parcel/watcher-linux-arm64-musl@2.5.6': resolution: {integrity: sha512-qb6naMDGlbCwdhLj6hgoVKJl2odL34z2sqkC7Z6kzir8b5W65WYDpLB6R06KabvZdgoHI/zxke4b3zR0wAbDTA==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [linux] libc: [musl] '@parcel/watcher-linux-x64-glibc@2.5.6': resolution: {integrity: sha512-kbT5wvNQlx7NaGjzPFu8nVIW1rWqV780O7ZtkjuWaPUgpv2NMFpjYERVi0UYj1msZNyCzGlaCWEtzc+exjMGbQ==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [linux] libc: [glibc] '@parcel/watcher-linux-x64-musl@2.5.6': resolution: {integrity: sha512-1JRFeC+h7RdXwldHzTsmdtYR/Ku8SylLgTU/reMuqdVD7CtLwf0VR1FqeprZ0eHQkO0vqsbvFLXUmYm/uNKJBg==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [linux] libc: [musl] '@parcel/watcher-win32-arm64@2.5.6': resolution: {integrity: sha512-3ukyebjc6eGlw9yRt678DxVF7rjXatWiHvTXqphZLvo7aC5NdEgFufVwjFfY51ijYEWpXbqF5jtrK275z52D4Q==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [win32] '@parcel/watcher-win32-ia32@2.5.6': resolution: {integrity: sha512-k35yLp1ZMwwee3Ez/pxBi5cf4AoBKYXj00CZ80jUz5h8prpiaQsiRPKQMxoLstNuqe2vR4RNPEAEcjEFzhEz/g==} engines: {node: '>= 10.0.0'} cpu: [ia32] os: [win32] '@parcel/watcher-win32-x64@2.5.6': resolution: {integrity: sha512-hbQlYcCq5dlAX9Qx+kFb0FHue6vbjlf0FrNzSKdYK2APUf7tGfGxQCk2ihEREmbR6ZMc0MVAD5RIX/41gpUzTw==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [win32] '@parcel/watcher@2.5.6': resolution: {integrity: sha512-tmmZ3lQxAe/k/+rNnXQRawJ4NjxO2hqiOLTHvWchtGZULp4RyFeh6aU4XdOYBFe2KE1oShQTv4AblOs2iOrNnQ==} engines: {node: '>= 10.0.0'} '@rolldown/binding-android-arm64@1.0.0-beta.58': resolution: {integrity: sha512-mWj5eE4Qc8TbPdGGaaLvBb9XfDPvE1EmZkJQgiGKwchkWH4oAJcRAKMTw7ZHnb1L+t7Ah41sBkAecaIsuUgsug==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] '@rolldown/binding-darwin-arm64@1.0.0-beta.58': resolution: {integrity: sha512-wFxUymI/5R8bH8qZFYDfAxAN9CyISEIYke+95oZPiv6EWo88aa5rskjVcCpKA532R+klFmdqjbbaD56GNmTF4Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] '@rolldown/binding-darwin-x64@1.0.0-beta.58': resolution: {integrity: sha512-ybp3MkPj23VDV9PhtRwdU5qrGhlViWRV5BjKwO6epaSlUD5lW0WyY+roN3ZAzbma/9RrMTgZ/a/gtQq8YXOcqw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] '@rolldown/binding-freebsd-x64@1.0.0-beta.58': resolution: {integrity: sha512-Evxj3yh7FWvyklUYZa0qTVT9N2zX9TPDqGF056hl8hlCZ9/ndQ2xMv6uw9PD1VlLpukbsqL+/C6M0qwipL0QMg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.58': resolution: {integrity: sha512-tYeXprDOrEgVHUbPXH6MPso4cM/c6RTkmJNICMQlYdki4hGMh92aj3yU6CKs+4X5gfG0yj5kVUw/L4M685SYag==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.58': resolution: {integrity: sha512-N78vmZzP6zG967Ohr+MasCjmKtis0geZ1SOVmxrA0/bklTQSzH5kHEjW5Qn+i1taFno6GEre1E40v0wuWsNOQw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] libc: [glibc] '@rolldown/binding-linux-arm64-musl@1.0.0-beta.58': resolution: {integrity: sha512-l+p4QVtG72C7wI2SIkNQw/KQtSjuYwS3rV6AKcWrRBF62ClsFUcif5vLaZIEbPrCXu5OFRXigXFJnxYsVVZqdQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] libc: [musl] '@rolldown/binding-linux-x64-gnu@1.0.0-beta.58': resolution: {integrity: sha512-urzJX0HrXxIh0FfxwWRjfPCMeInU9qsImLQxHBgLp5ivji1EEUnOfux8KxPPnRQthJyneBrN2LeqUix9DYrNaQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] libc: [glibc] '@rolldown/binding-linux-x64-musl@1.0.0-beta.58': resolution: {integrity: sha512-7ijfVK3GISnXIwq/1FZo+KyAUJjL3kWPJ7rViAL6MWeEBhEgRzJ0yEd9I8N9aut8Y8ab+EKFJyRNMWZuUBwQ0A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] libc: [musl] '@rolldown/binding-openharmony-arm64@1.0.0-beta.58': resolution: {integrity: sha512-/m7sKZCS+cUULbzyJTIlv8JbjNohxbpAOA6cM+lgWgqVzPee3U6jpwydrib328JFN/gF9A99IZEnuGYqEDJdww==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] '@rolldown/binding-wasm32-wasi@1.0.0-beta.58': resolution: {integrity: sha512-6SZk7zMgv+y3wFFQ9qE5P9NnRHcRsptL1ypmudD26PDY+PvFCvfHRkJNfclWnvacVGxjowr7JOL3a9fd1wWhUw==} engines: {node: '>=14.0.0'} cpu: [wasm32] '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.58': resolution: {integrity: sha512-sFqfYPnBZ6xBhMkadB7UD0yjEDRvs7ipR3nCggblN+N4ODCXY6qhg/bKL39+W+dgQybL7ErD4EGERVbW9DAWvg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] '@rolldown/binding-win32-x64-msvc@1.0.0-beta.58': resolution: {integrity: sha512-AnFWJdAqB8+IDPcGrATYs67Kik/6tnndNJV2jGRmwlbeNiQQ8GhRJU8ETRlINfII0pqi9k4WWLnb00p1QCxw/Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] '@rolldown/pluginutils@1.0.0-beta.58': resolution: {integrity: sha512-qWhDs6yFGR5xDfdrwiSa3CWGIHxD597uGE/A9xGqytBjANvh4rLCTTkq7szhMV4+Ygh+PMS90KVJ8xWG/TkX4w==} '@rollup/rollup-android-arm-eabi@4.57.1': resolution: {integrity: sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==} cpu: [arm] os: [android] '@rollup/rollup-android-arm64@4.57.1': resolution: {integrity: sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==} cpu: [arm64] os: [android] '@rollup/rollup-darwin-arm64@4.57.1': resolution: {integrity: sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==} cpu: [arm64] os: [darwin] '@rollup/rollup-darwin-x64@4.57.1': resolution: {integrity: sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==} cpu: [x64] os: [darwin] '@rollup/rollup-freebsd-arm64@4.57.1': resolution: {integrity: sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==} cpu: [arm64] os: [freebsd] '@rollup/rollup-freebsd-x64@4.57.1': resolution: {integrity: sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==} cpu: [x64] os: [freebsd] '@rollup/rollup-linux-arm-gnueabihf@4.57.1': resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} cpu: [arm] os: [linux] libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.57.1': resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} cpu: [arm] os: [linux] libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.57.1': resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} cpu: [arm64] os: [linux] libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.57.1': resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} cpu: [arm64] os: [linux] libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.57.1': resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} cpu: [loong64] os: [linux] libc: [glibc] '@rollup/rollup-linux-loong64-musl@4.57.1': resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} cpu: [loong64] os: [linux] libc: [musl] '@rollup/rollup-linux-ppc64-gnu@4.57.1': resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} cpu: [ppc64] os: [linux] libc: [glibc] '@rollup/rollup-linux-ppc64-musl@4.57.1': resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} cpu: [ppc64] os: [linux] libc: [musl] '@rollup/rollup-linux-riscv64-gnu@4.57.1': resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} cpu: [riscv64] os: [linux] libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.57.1': resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} cpu: [riscv64] os: [linux] libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.57.1': resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} cpu: [s390x] os: [linux] libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.57.1': resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} cpu: [x64] os: [linux] libc: [glibc] '@rollup/rollup-linux-x64-musl@4.57.1': resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} cpu: [x64] os: [linux] libc: [musl] '@rollup/rollup-openbsd-x64@4.57.1': resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} cpu: [x64] os: [openbsd] '@rollup/rollup-openharmony-arm64@4.57.1': resolution: {integrity: sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==} cpu: [arm64] os: [openharmony] '@rollup/rollup-win32-arm64-msvc@4.57.1': resolution: {integrity: sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==} cpu: [arm64] os: [win32] '@rollup/rollup-win32-ia32-msvc@4.57.1': resolution: {integrity: sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==} cpu: [ia32] os: [win32] '@rollup/rollup-win32-x64-gnu@4.57.1': resolution: {integrity: sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==} cpu: [x64] os: [win32] '@rollup/rollup-win32-x64-msvc@4.57.1': resolution: {integrity: sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==} cpu: [x64] os: [win32] '@schematics/angular@21.1.0-rc.0': resolution: {integrity: sha512-2xFCC2wGZZlnxKcf8+CJUaYC0cW/Zb8BO26LoGRLZLgW3E58YwizOa72DzqkCqGF6FDaVoh4V5U7RCtVSPtQGw==} engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0, npm: ^6.11.0 || ^7.5.6 || >=8.0.0, yarn: '>= 1.13.0'} '@sigstore/bundle@4.0.0': resolution: {integrity: sha512-NwCl5Y0V6Di0NexvkTqdoVfmjTaQwoLM236r89KEojGmq/jMls8S+zb7yOwAPdXvbwfKDlP+lmXgAL4vKSQT+A==} engines: {node: ^20.17.0 || >=22.9.0} '@sigstore/core@3.1.0': resolution: {integrity: sha512-o5cw1QYhNQ9IroioJxpzexmPjfCe7gzafd2RY3qnMpxr4ZEja+Jad/U8sgFpaue6bOaF+z7RVkyKVV44FN+N8A==} engines: {node: ^20.17.0 || >=22.9.0} '@sigstore/protobuf-specs@0.5.0': resolution: {integrity: sha512-MM8XIwUjN2bwvCg1QvrMtbBmpcSHrkhFSCu1D11NyPvDQ25HEc4oG5/OcQfd/Tlf/OxmKWERDj0zGE23jQaMwA==} engines: {node: ^18.17.0 || >=20.5.0} '@sigstore/sign@4.1.0': resolution: {integrity: sha512-Vx1RmLxLGnSUqx/o5/VsCjkuN5L7y+vxEEwawvc7u+6WtX2W4GNa7b9HEjmcRWohw/d6BpATXmvOwc78m+Swdg==} engines: {node: ^20.17.0 || >=22.9.0} '@sigstore/tuf@4.0.1': resolution: {integrity: sha512-OPZBg8y5Vc9yZjmWCHrlWPMBqW5yd8+wFNl+thMdtcWz3vjVSoJQutF8YkrzI0SLGnkuFof4HSsWUhXrf219Lw==} engines: {node: ^20.17.0 || >=22.9.0} '@sigstore/verify@3.1.0': resolution: {integrity: sha512-mNe0Iigql08YupSOGv197YdHpPPr+EzDZmfCgMc7RPNaZTw5aLN01nBl6CHJOh3BGtnMIj83EeN4butBchc8Ag==} engines: {node: ^20.17.0 || >=22.9.0} '@tufjs/canonical-json@2.0.0': resolution: {integrity: sha512-yVtV8zsdo8qFHe+/3kw81dSLyF7D576A5cCFCi4X7B39tWT7SekaEFUnvnWJHz+9qO7qJTah1JbrDjWKqFtdWA==} engines: {node: ^16.14.0 || >=18.0.0} '@tufjs/models@4.1.0': resolution: {integrity: sha512-Y8cK9aggNRsqJVaKUlEYs4s7CvQ1b1ta2DVPyAimb0I2qhzjNk+A+mxvll/klL0RlfuIUei8BF7YWiua4kQqww==} engines: {node: ^20.17.0 || >=22.9.0} '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} '@types/body-parser@1.19.6': resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} '@types/bonjour@3.5.13': resolution: {integrity: sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==} '@types/connect-history-api-fallback@1.5.4': resolution: {integrity: sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==} '@types/connect@3.4.38': resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} '@types/eslint-scope@3.7.7': resolution: {integrity: sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==} '@types/eslint@9.6.1': resolution: {integrity: sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==} '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} '@types/express-serve-static-core@4.19.8': resolution: {integrity: sha512-02S5fmqeoKzVZCHPZid4b8JH2eM5HzQLZWN2FohQEy/0eXTq8VXZfSN6Pcr3F6N9R/vNrj7cpgbhjie6m/1tCA==} '@types/express@4.17.25': resolution: {integrity: sha512-dVd04UKsfpINUnK0yBoYHDF3xu7xVH4BuDotC/xGuycx4CgbP48X/KF/586bcObxT0HENHXEU8Nqtu6NR+eKhw==} '@types/http-errors@2.0.5': resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} '@types/http-proxy@1.17.17': resolution: {integrity: sha512-ED6LB+Z1AVylNTu7hdzuBqOgMnvG/ld6wGCG8wFnAzKX5uyW2K3WD52v0gnLCTK/VLpXtKckgWuyScYK6cSPaw==} '@types/json-schema@7.0.15': resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} '@types/mime@1.3.5': resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} '@types/node-forge@1.3.14': resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==} '@types/node@20.19.33': resolution: {integrity: sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==} '@types/qs@6.14.0': resolution: {integrity: sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==} '@types/range-parser@1.2.7': resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} '@types/retry@0.12.2': resolution: {integrity: sha512-XISRgDJ2Tc5q4TRqvgJtzsRkFYNJzZrhTdtMoGVBttwzzQJkPnS3WWTFc7kuDRoPtPakl+T+OfdEUjYJj7Jbow==} '@types/send@0.17.6': resolution: {integrity: sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og==} '@types/send@1.2.1': resolution: {integrity: sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==} '@types/serve-index@1.9.4': resolution: {integrity: sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==} '@types/serve-static@1.15.10': resolution: {integrity: sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw==} '@types/sockjs@0.3.36': resolution: {integrity: sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==} '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} '@vitejs/plugin-basic-ssl@2.1.0': resolution: {integrity: sha512-dOxxrhgyDIEUADhb/8OlV9JIqYLgos03YorAueTIeOUskLJSEsfwCByjbu98ctXitUN3znXKp0bYD/WHSudCeA==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} peerDependencies: vite: ^6.0.0 || ^7.0.0 '@webassemblyjs/ast@1.14.1': resolution: {integrity: sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==} '@webassemblyjs/floating-point-hex-parser@1.13.2': resolution: {integrity: sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==} '@webassemblyjs/helper-api-error@1.13.2': resolution: {integrity: sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==} '@webassemblyjs/helper-buffer@1.14.1': resolution: {integrity: sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==} '@webassemblyjs/helper-numbers@1.13.2': resolution: {integrity: sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==} '@webassemblyjs/helper-wasm-bytecode@1.13.2': resolution: {integrity: sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==} '@webassemblyjs/helper-wasm-section@1.14.1': resolution: {integrity: sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==} '@webassemblyjs/ieee754@1.13.2': resolution: {integrity: sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==} '@webassemblyjs/leb128@1.13.2': resolution: {integrity: sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==} '@webassemblyjs/utf8@1.13.2': resolution: {integrity: sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==} '@webassemblyjs/wasm-edit@1.14.1': resolution: {integrity: sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==} '@webassemblyjs/wasm-gen@1.14.1': resolution: {integrity: sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==} '@webassemblyjs/wasm-opt@1.14.1': resolution: {integrity: sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==} '@webassemblyjs/wasm-parser@1.14.1': resolution: {integrity: sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==} '@webassemblyjs/wast-printer@1.14.1': resolution: {integrity: sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==} '@xtuc/ieee754@1.2.0': resolution: {integrity: sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==} '@xtuc/long@4.2.2': resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} '@yarnpkg/lockfile@1.1.0': resolution: {integrity: sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==} abbrev@4.0.0: resolution: {integrity: sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA==} engines: {node: ^20.17.0 || >=22.9.0} accepts@1.3.8: resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} engines: {node: '>= 0.6'} accepts@2.0.0: resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==} engines: {node: '>= 0.6'} acorn-import-phases@1.0.4: resolution: {integrity: sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==} engines: {node: '>=10.13.0'} peerDependencies: acorn: ^8.14.0 acorn@8.15.0: resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} engines: {node: '>=0.4.0'} hasBin: true adjust-sourcemap-loader@4.0.0: resolution: {integrity: sha512-OXwN5b9pCUXNQHJpwwD2qP40byEmSgzj8B4ydSN0uMNYWiFmJ6x6KwUllMmfk8Rwu/HJDFR7U8ubsWBoN0Xp0A==} engines: {node: '>=8.9'} agent-base@7.1.4: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: ajv: ^8.0.0 peerDependenciesMeta: ajv: optional: true ajv-formats@3.0.1: resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} peerDependencies: ajv: ^8.0.0 peerDependenciesMeta: ajv: optional: true ajv-keywords@5.1.0: resolution: {integrity: sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==} peerDependencies: ajv: ^8.8.2 ajv@8.17.1: resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} algoliasearch@5.46.2: resolution: {integrity: sha512-qqAXW9QvKf2tTyhpDA4qXv1IfBwD2eduSW6tUEBFIfCeE9gn9HQ9I5+MaKoenRuHrzk5sQoNh1/iof8mY7uD6Q==} engines: {node: '>= 14.0.0'} ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} ansi-escapes@7.3.0: resolution: {integrity: sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==} engines: {node: '>=18'} ansi-html-community@0.0.8: resolution: {integrity: sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==} engines: {'0': node >= 0.8.0} hasBin: true ansi-regex@5.0.1: resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} engines: {node: '>=8'} ansi-regex@6.2.2: resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} engines: {node: '>=12'} ansi-styles@4.3.0: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} ansi-styles@6.2.3: resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} engines: {node: '>=12'} anymatch@3.1.3: resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} engines: {node: '>= 8'} argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} array-flatten@1.1.1: resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} autoprefixer@10.4.23: resolution: {integrity: sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==} engines: {node: ^10 || ^12 || >=14} hasBin: true peerDependencies: postcss: ^8.1.0 babel-loader@10.0.0: resolution: {integrity: sha512-z8jt+EdS61AMw22nSfoNJAZ0vrtmhPRVi6ghL3rCeRZI8cdNYFiV5xeV3HbE7rlZZNmGH8BVccwWt8/ED0QOHA==} engines: {node: ^18.20.0 || ^20.10.0 || >=22.0.0} peerDependencies: '@babel/core': ^7.12.0 webpack: '>=5.61.0' babel-plugin-polyfill-corejs2@0.4.15: resolution: {integrity: sha512-hR3GwrRwHUfYwGfrisXPIDP3JcYfBrW7wKE7+Au6wDYl7fm/ka1NEII6kORzxNU556JjfidZeBsO10kYvtV1aw==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 babel-plugin-polyfill-corejs3@0.13.0: resolution: {integrity: sha512-U+GNwMdSFgzVmfhNm8GJUX88AadB3uo9KpJqS3FaqNIPKgySuvMb+bHPsOmmuWyIcuqZj/pzt1RUIUZns4y2+A==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 babel-plugin-polyfill-regenerator@0.6.6: resolution: {integrity: sha512-hYm+XLYRMvupxiQzrvXUj7YyvFFVfv5gI0R71AJzudg1g2AI2vyCPPIFEBjk162/wFzti3inBHo7isWFuEVS/A==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 baseline-browser-mapping@2.9.19: resolution: {integrity: sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==} hasBin: true batch@0.6.1: resolution: {integrity: sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==} beasties@0.3.5: resolution: {integrity: sha512-NaWu+f4YrJxEttJSm16AzMIFtVldCvaJ68b1L098KpqXmxt9xOLtKoLkKxb8ekhOrLqEJAbvT6n6SEvB/sac7A==} engines: {node: '>=14.0.0'} big.js@5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} binary-extensions@2.3.0: resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} engines: {node: '>=8'} body-parser@1.20.4: resolution: {integrity: sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} body-parser@2.2.2: resolution: {integrity: sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==} engines: {node: '>=18'} bonjour-service@1.3.0: resolution: {integrity: sha512-3YuAUiSkWykd+2Azjgyxei8OWf8thdn8AITIog2M4UICzoqfjlqr64WIjEXZllf/W6vK1goqleSR6brGomxQqA==} boolbase@1.0.0: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} braces@3.0.3: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} browserslist@4.28.1: resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==} engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} bundle-name@4.1.0: resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} engines: {node: '>=18'} bytes@3.1.2: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} cacache@20.0.3: resolution: {integrity: sha512-3pUp4e8hv07k1QlijZu6Kn7c9+ZpWWk4j3F8N3xPuCExULobqJydKYOTj1FTq58srkJsXvO7LbGAH4C0ZU3WGw==} engines: {node: ^20.17.0 || >=22.9.0} call-bind-apply-helpers@1.0.2: resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} engines: {node: '>= 0.4'} call-bound@1.0.4: resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} engines: {node: '>= 0.4'} callsites@3.1.0: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} caniuse-lite@1.0.30001769: resolution: {integrity: sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg==} chalk@5.6.2: resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} chardet@2.1.1: resolution: {integrity: sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==} chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} chokidar@4.0.3: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} chownr@3.0.0: resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} engines: {node: '>=18'} chrome-trace-event@1.0.4: resolution: {integrity: sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==} engines: {node: '>=6.0'} cli-cursor@5.0.0: resolution: {integrity: sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==} engines: {node: '>=18'} cli-spinners@3.4.0: resolution: {integrity: sha512-bXfOC4QcT1tKXGorxL3wbJm6XJPDqEnij2gQ2m7ESQuE+/z9YFIWnl/5RpTiKWbMq3EVKR4fRLJGn6DVfu0mpw==} engines: {node: '>=18.20'} cli-truncate@5.1.1: resolution: {integrity: sha512-SroPvNHxUnk+vIW/dOSfNqdy1sPEFkrTk6TUtqLCnBlo3N7TNYYkzzN7uSD6+jVjrdO4+p8nH7JzH6cIvUem6A==} engines: {node: '>=20'} cli-width@4.1.0: resolution: {integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==} engines: {node: '>= 12'} cliui@9.0.1: resolution: {integrity: sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==} engines: {node: '>=20'} clone-deep@4.0.1: resolution: {integrity: sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==} engines: {node: '>=6'} color-convert@2.0.1: resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} engines: {node: '>=7.0.0'} color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} colorette@2.0.20: resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==} commander@2.20.3: resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} compressible@2.0.18: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} compression@1.8.1: resolution: {integrity: sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==} engines: {node: '>= 0.8.0'} connect-history-api-fallback@2.0.0: resolution: {integrity: sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==} engines: {node: '>=0.8'} content-disposition@0.5.4: resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} engines: {node: '>= 0.6'} content-disposition@1.0.1: resolution: {integrity: sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==} engines: {node: '>=18'} content-type@1.0.5: resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} engines: {node: '>= 0.6'} convert-source-map@1.9.0: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} convert-source-map@2.0.0: resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} cookie-signature@1.0.7: resolution: {integrity: sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==} cookie-signature@1.2.2: resolution: {integrity: sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==} engines: {node: '>=6.6.0'} cookie@0.7.2: resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} engines: {node: '>= 0.6'} copy-anything@2.0.6: resolution: {integrity: sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==} copy-webpack-plugin@13.0.1: resolution: {integrity: sha512-J+YV3WfhY6W/Xf9h+J1znYuqTye2xkBUIGyTPWuBAT27qajBa5mR4f8WBmfDY3YjRftT2kqZZiLi1qf0H+UOFw==} engines: {node: '>= 18.12.0'} peerDependencies: webpack: ^5.1.0 core-js-compat@3.48.0: resolution: {integrity: sha512-OM4cAF3D6VtH/WkLtWvyNC56EZVXsZdU3iqaMG2B4WvYrlqU831pc4UtG5yp0sE9z8Y02wVN7PjW5Zf9Gt0f1Q==} core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} cors@2.8.6: resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} engines: {node: '>= 0.10'} cosmiconfig@9.0.0: resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} engines: {node: '>=14'} peerDependencies: typescript: '>=4.9.5' peerDependenciesMeta: typescript: optional: true cross-spawn@7.0.6: resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} css-loader@7.1.2: resolution: {integrity: sha512-6WvYYn7l/XEGN8Xu2vWFt9nVzrCn39vKyTEFf/ExEyoksJjjSZV/0/35XPlMbpnr6VGhZIUg5yJrL8tGfes/FA==} engines: {node: '>= 18.12.0'} peerDependencies: '@rspack/core': 0.x || 1.x webpack: ^5.27.0 peerDependenciesMeta: '@rspack/core': optional: true webpack: optional: true css-select@6.0.0: resolution: {integrity: sha512-rZZVSLle8v0+EY8QAkDWrKhpgt6SA5OtHsgBnsj6ZaLb5dmDVOWUDtQitd9ydxxvEjhewNudS6eTVU7uOyzvXw==} css-what@7.0.0: resolution: {integrity: sha512-wD5oz5xibMOPHzy13CyGmogB3phdvcDaB5t0W/Nr5Z2O/agcB8YwOz6e2Lsp10pNDzBoDO9nVa3RGs/2BttpHQ==} engines: {node: '>= 6'} cssesc@3.0.0: resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} engines: {node: '>=4'} hasBin: true debug@2.6.9: resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} peerDependencies: supports-color: '*' peerDependenciesMeta: supports-color: optional: true debug@4.4.3: resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' peerDependenciesMeta: supports-color: optional: true default-browser-id@5.0.1: resolution: {integrity: sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==} engines: {node: '>=18'} default-browser@5.5.0: resolution: {integrity: sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==} engines: {node: '>=18'} define-lazy-prop@3.0.0: resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} engines: {node: '>=12'} depd@1.1.2: resolution: {integrity: sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==} engines: {node: '>= 0.6'} depd@2.0.0: resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} engines: {node: '>= 0.8'} destroy@1.2.0: resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} detect-libc@2.1.2: resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} engines: {node: '>=8'} detect-node@2.1.0: resolution: {integrity: sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==} dns-packet@5.6.1: resolution: {integrity: sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==} engines: {node: '>=6'} dom-serializer@2.0.0: resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} domelementtype@2.3.0: resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} domhandler@5.0.3: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} dunder-proto@1.0.1: resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} engines: {node: '>= 0.4'} ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} electron-to-chromium@1.5.286: resolution: {integrity: sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==} emoji-regex@10.6.0: resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==} emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} emojis-list@3.0.0: resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==} engines: {node: '>= 4'} encodeurl@2.0.0: resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} engines: {node: '>= 0.8'} encoding@0.1.13: resolution: {integrity: sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==} enhanced-resolve@5.19.0: resolution: {integrity: sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg==} engines: {node: '>=10.13.0'} entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} entities@6.0.1: resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} engines: {node: '>=0.12'} entities@7.0.1: resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} engines: {node: '>=0.12'} env-paths@2.2.1: resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} engines: {node: '>=6'} environment@1.1.0: resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==} engines: {node: '>=18'} err-code@2.0.3: resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} errno@0.1.8: resolution: {integrity: sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==} hasBin: true error-ex@1.3.4: resolution: {integrity: sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==} es-define-property@1.0.1: resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} engines: {node: '>= 0.4'} es-errors@1.3.0: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} es-module-lexer@2.0.0: resolution: {integrity: sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==} es-object-atoms@1.1.1: resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} engines: {node: '>= 0.4'} esbuild-wasm@0.27.2: resolution: {integrity: sha512-eUTnl8eh+v8UZIZh4MrMOKDAc8Lm7+NqP3pyuTORGFY1s/o9WoiJgKnwXy+te2J3hX7iRbFSHEyig7GsPeeJyw==} engines: {node: '>=18'} hasBin: true esbuild@0.27.2: resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==} engines: {node: '>=18'} hasBin: true escalade@3.2.0: resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} engines: {node: '>=6'} escape-html@1.0.3: resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} eslint-scope@5.1.1: resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} engines: {node: '>=8.0.0'} esrecurse@4.3.0: resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} engines: {node: '>=4.0'} estraverse@4.3.0: resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} engines: {node: '>=4.0'} estraverse@5.3.0: resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} engines: {node: '>=4.0'} esutils@2.0.3: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} etag@1.8.1: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} eventemitter3@4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} eventemitter3@5.0.4: resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==} events@3.3.0: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} eventsource-parser@3.0.6: resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} engines: {node: '>=18.0.0'} eventsource@3.0.7: resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} engines: {node: '>=18.0.0'} exponential-backoff@3.1.3: resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} express-rate-limit@7.5.1: resolution: {integrity: sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==} engines: {node: '>= 16'} peerDependencies: express: '>= 4.11' express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} express@5.2.1: resolution: {integrity: sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==} engines: {node: '>= 18'} fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} fast-uri@3.1.0: resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} faye-websocket@0.11.4: resolution: {integrity: sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==} engines: {node: '>=0.8.0'} fdir@6.5.0: resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} engines: {node: '>=12.0.0'} peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: picomatch: optional: true fill-range@7.1.1: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} finalhandler@1.3.2: resolution: {integrity: sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==} engines: {node: '>= 0.8'} finalhandler@2.1.1: resolution: {integrity: sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==} engines: {node: '>= 18.0.0'} find-up@5.0.0: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} flat@5.0.2: resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} hasBin: true follow-redirects@1.15.11: resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} engines: {node: '>=4.0'} peerDependencies: debug: '*' peerDependenciesMeta: debug: optional: true forwarded@0.2.0: resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} engines: {node: '>= 0.6'} fraction.js@5.3.4: resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==} fresh@0.5.2: resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} engines: {node: '>= 0.6'} fresh@2.0.0: resolution: {integrity: sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==} engines: {node: '>= 0.8'} fs-minipass@3.0.3: resolution: {integrity: sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} fsevents@2.3.3: resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} get-east-asian-width@1.4.0: resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} engines: {node: '>=18'} get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} get-proto@1.0.1: resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} engines: {node: '>= 0.4'} glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} glob-parent@6.0.2: resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} engines: {node: '>=10.13.0'} glob-to-regex.js@1.2.0: resolution: {integrity: sha512-QMwlOQKU/IzqMUOAZWubUOT8Qft+Y0KQWnX9nK3ch0CJg0tTp4TvGZsTfudYKv2NzoQSyPcnA6TYeIQ3jGichQ==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' glob-to-regexp@0.4.1: resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==} glob@13.0.2: resolution: {integrity: sha512-035InabNu/c1lW0tzPhAgapKctblppqsKKG9ZaNzbr+gXwWMjXoiyGSyB9sArzrjG7jY+zntRq5ZSUYemrnWVQ==} engines: {node: 20 || >=22} gopd@1.2.0: resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} engines: {node: '>= 0.4'} graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} handle-thing@2.0.1: resolution: {integrity: sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==} has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} has-symbols@1.1.0: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} hasown@2.0.2: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} hono@4.11.9: resolution: {integrity: sha512-Eaw2YTGM6WOxA6CXbckaEvslr2Ne4NFsKrvc0v97JD5awbmeBLO5w9Ho9L9kmKonrwF9RJlW6BxT1PVv/agBHQ==} engines: {node: '>=16.9.0'} hosted-git-info@9.0.2: resolution: {integrity: sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg==} engines: {node: ^20.17.0 || >=22.9.0} hpack.js@2.1.6: resolution: {integrity: sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==} htmlparser2@10.1.0: resolution: {integrity: sha512-VTZkM9GWRAtEpveh7MSF6SjjrpNVNNVJfFup7xTY3UpFtm67foy9HDVXneLtFVt4pMz5kZtgNcvCniNFb1hlEQ==} http-cache-semantics@4.2.0: resolution: {integrity: sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==} http-deceiver@1.2.7: resolution: {integrity: sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==} http-errors@1.8.1: resolution: {integrity: sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==} engines: {node: '>= 0.6'} http-errors@2.0.1: resolution: {integrity: sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==} engines: {node: '>= 0.8'} http-parser-js@0.5.10: resolution: {integrity: sha512-Pysuw9XpUq5dVc/2SMHpuTY01RFl8fttgcyunjL7eEMhGM3cI4eOmiCycJDVCo/7O7ClfQD3SaI6ftDzqOXYMA==} http-proxy-agent@7.0.2: resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} engines: {node: '>= 14'} http-proxy-middleware@2.0.9: resolution: {integrity: sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==} engines: {node: '>=12.0.0'} peerDependencies: '@types/express': ^4.17.13 peerDependenciesMeta: '@types/express': optional: true http-proxy-middleware@3.0.5: resolution: {integrity: sha512-GLZZm1X38BPY4lkXA01jhwxvDoOkkXqjgVyUzVxiEK4iuRu03PZoYHhHRwxnfhQMDuaxi3vVri0YgSro/1oWqg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} http-proxy@1.18.1: resolution: {integrity: sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==} engines: {node: '>=8.0.0'} https-proxy-agent@7.0.6: resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} engines: {node: '>= 14'} hyperdyperid@1.2.0: resolution: {integrity: sha512-Y93lCzHYgGWdrJ66yIktxiaGULYc6oGiABxhcO5AufBeOyoIdZF7bIfLaOrbM0iGIOXQQgxxRrFEnb+Y6w1n4A==} engines: {node: '>=10.18'} iconv-lite@0.4.24: resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} engines: {node: '>=0.10.0'} iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} iconv-lite@0.7.2: resolution: {integrity: sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==} engines: {node: '>=0.10.0'} icss-utils@5.1.0: resolution: {integrity: sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==} engines: {node: ^10 || ^12 || >= 14} peerDependencies: postcss: ^8.1.0 ignore-walk@8.0.0: resolution: {integrity: sha512-FCeMZT4NiRQGh+YkeKMtWrOmBgWjHjMJ26WQWrRQyoyzqevdaGSakUaJW5xQYmjLlUVk2qUnCjYVBax9EKKg8A==} engines: {node: ^20.17.0 || >=22.9.0} image-size@0.5.5: resolution: {integrity: sha512-6TDAlDPZxUFCv+fuOkIoXT/V/f3Qbq8e37p+YOiYrUv3v9cc3/6x78VdfPgFVaB9dZYeLUfKgHRebpkm/oP2VQ==} engines: {node: '>=0.10.0'} hasBin: true immutable@5.1.4: resolution: {integrity: sha512-p6u1bG3YSnINT5RQmx/yRZBpenIl30kVxkTLDyHLIMk0gict704Q9n+thfDI7lTRm9vXdDYutVzXhzcThxTnXA==} import-fresh@3.3.1: resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} engines: {node: '>=6'} imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} ini@6.0.0: resolution: {integrity: sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ==} engines: {node: ^20.17.0 || >=22.9.0} ip-address@10.1.0: resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==} engines: {node: '>= 12'} ipaddr.js@1.9.1: resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} engines: {node: '>= 0.10'} ipaddr.js@2.3.0: resolution: {integrity: sha512-Zv/pA+ciVFbCSBBjGfaKUya/CcGmUHzTydLMaTwrUUEM2DIEO3iZvueGxmacvmN50fGpGVKeTXpb2LcYQxeVdg==} engines: {node: '>= 10'} is-arrayish@0.2.1: resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} is-core-module@2.16.1: resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} engines: {node: '>= 0.4'} is-docker@3.0.0: resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} hasBin: true is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} is-fullwidth-code-point@3.0.0: resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} engines: {node: '>=8'} is-fullwidth-code-point@5.1.0: resolution: {integrity: sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==} engines: {node: '>=18'} is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} is-in-ssh@1.0.0: resolution: {integrity: sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw==} engines: {node: '>=20'} is-inside-container@1.0.0: resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} engines: {node: '>=14.16'} hasBin: true is-interactive@2.0.0: resolution: {integrity: sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==} engines: {node: '>=12'} is-network-error@1.3.0: resolution: {integrity: sha512-6oIwpsgRfnDiyEDLMay/GqCl3HoAtH5+RUKW29gYkL0QA+ipzpDLA16yQs7/RHCSu+BwgbJaOUqa4A99qNVQVw==} engines: {node: '>=16'} is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} is-plain-obj@3.0.0: resolution: {integrity: sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==} engines: {node: '>=10'} is-plain-object@2.0.4: resolution: {integrity: sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==} engines: {node: '>=0.10.0'} is-plain-object@5.0.0: resolution: {integrity: sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==} engines: {node: '>=0.10.0'} is-promise@4.0.0: resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} is-unicode-supported@2.1.0: resolution: {integrity: sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==} engines: {node: '>=18'} is-what@3.14.1: resolution: {integrity: sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==} is-wsl@3.1.0: resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} engines: {node: '>=16'} isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} isexe@4.0.0: resolution: {integrity: sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw==} engines: {node: '>=20'} isobject@3.0.1: resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} engines: {node: '>=0.10.0'} istanbul-lib-coverage@3.2.2: resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} engines: {node: '>=8'} istanbul-lib-instrument@6.0.3: resolution: {integrity: sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==} engines: {node: '>=10'} jest-worker@27.5.1: resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==} engines: {node: '>= 10.13.0'} jiti@2.6.1: resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} hasBin: true jose@6.1.3: resolution: {integrity: sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==} js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} js-yaml@4.1.1: resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} hasBin: true jsesc@3.1.0: resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} engines: {node: '>=6'} hasBin: true json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} json-parse-even-better-errors@5.0.0: resolution: {integrity: sha512-ZF1nxZ28VhQouRWhUcVlUIN3qwSgPuswK05s/HIaoetAoE/9tngVmCHjSxmSQPav1nd+lPtTL0YZ/2AFdR/iYQ==} engines: {node: ^20.17.0 || >=22.9.0} json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} json-schema-typed@8.0.2: resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} json5@2.2.3: resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} engines: {node: '>=6'} hasBin: true jsonc-parser@3.3.1: resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} jsonparse@1.3.1: resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==} engines: {'0': node >= 0.2.0} karma-source-map-support@1.4.0: resolution: {integrity: sha512-RsBECncGO17KAoJCYXjv+ckIz+Ii9NCi+9enk+rq6XC81ezYkb4/RHE6CTXdA7IOJqoF3wcaLfVG0CPmE5ca6A==} kind-of@6.0.3: resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} engines: {node: '>=0.10.0'} launch-editor@2.12.0: resolution: {integrity: sha512-giOHXoOtifjdHqUamwKq6c49GzBdLjvxrd2D+Q4V6uOHopJv7p9VJxikDsQ/CBXZbEITgUqSVHXLTG3VhPP1Dg==} less-loader@12.3.0: resolution: {integrity: sha512-0M6+uYulvYIWs52y0LqN4+QM9TqWAohYSNTo4htE8Z7Cn3G/qQMEmktfHmyJT23k+20kU9zHH2wrfFXkxNLtVw==} engines: {node: '>= 18.12.0'} peerDependencies: '@rspack/core': 0.x || 1.x less: ^3.5.0 || ^4.0.0 webpack: ^5.0.0 peerDependenciesMeta: '@rspack/core': optional: true webpack: optional: true less@4.4.2: resolution: {integrity: sha512-j1n1IuTX1VQjIy3tT7cyGbX7nvQOsFLoIqobZv4ttI5axP923gA44zUj6miiA6R5Aoms4sEGVIIcucXUbRI14g==} engines: {node: '>=14'} hasBin: true license-webpack-plugin@4.0.2: resolution: {integrity: sha512-771TFWFD70G1wLTC4oU2Cw4qvtmNrIw+wRvBtn+okgHl7slJVi7zfNcdmqDL72BojM30VNJ2UHylr1o77U37Jw==} peerDependencies: webpack: '*' peerDependenciesMeta: webpack: optional: true lines-and-columns@1.2.4: resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} listr2@9.0.5: resolution: {integrity: sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==} engines: {node: '>=20.0.0'} lmdb@3.4.4: resolution: {integrity: sha512-+Y2DqovevLkb6DrSQ6SXTYLEd6kvlRbhsxzgJrk7BUfOVA/mt21ak6pFDZDKxiAczHMWxrb02kXBTSTIA0O94A==} hasBin: true loader-runner@4.3.1: resolution: {integrity: sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==} engines: {node: '>=6.11.5'} loader-utils@2.0.4: resolution: {integrity: sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==} engines: {node: '>=8.9.0'} loader-utils@3.3.1: resolution: {integrity: sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==} engines: {node: '>= 12.13.0'} locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} lodash.debounce@4.0.8: resolution: {integrity: sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==} log-symbols@7.0.1: resolution: {integrity: sha512-ja1E3yCr9i/0hmBVaM0bfwDjnGy8I/s6PP4DFp+yP+a+mrHO4Rm7DtmnqROTUkHIkqffC84YY7AeqX6oFk0WFg==} engines: {node: '>=18'} log-update@6.1.0: resolution: {integrity: sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==} engines: {node: '>=18'} lru-cache@11.2.6: resolution: {integrity: sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==} engines: {node: 20 || >=22} lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} magic-string@0.30.21: resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} make-dir@2.1.0: resolution: {integrity: sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==} engines: {node: '>=6'} make-fetch-happen@15.0.3: resolution: {integrity: sha512-iyyEpDty1mwW3dGlYXAJqC/azFn5PPvgKVwXayOGBSmKLxhKZ9fg4qIan2ePpp1vJIwfFiO34LAPZgq9SZW9Aw==} engines: {node: ^20.17.0 || >=22.9.0} math-intrinsics@1.1.0: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} media-typer@0.3.0: resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} engines: {node: '>= 0.6'} media-typer@1.1.0: resolution: {integrity: sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==} engines: {node: '>= 0.8'} memfs@4.56.10: resolution: {integrity: sha512-eLvzyrwqLHnLYalJP7YZ3wBe79MXktMdfQbvMrVD80K+NhrIukCVBvgP30zTJYEEDh9hZ/ep9z0KOdD7FSHo7w==} peerDependencies: tslib: '2' merge-descriptors@1.0.3: resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} merge-descriptors@2.0.0: resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==} engines: {node: '>=18'} merge-stream@2.0.0: resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} methods@1.1.2: resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} engines: {node: '>= 0.6'} micromatch@4.0.8: resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} mime-db@1.52.0: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} mime-db@1.54.0: resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==} engines: {node: '>= 0.6'} mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} mime-types@3.0.2: resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==} engines: {node: '>=18'} mime@1.6.0: resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} engines: {node: '>=4'} hasBin: true mimic-function@5.0.1: resolution: {integrity: sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==} engines: {node: '>=18'} mini-css-extract-plugin@2.9.4: resolution: {integrity: sha512-ZWYT7ln73Hptxqxk2DxPU9MmapXRhxkJD6tkSR04dnQxm8BGu2hzgKLugK5yySD97u/8yy7Ma7E76k9ZdvtjkQ==} engines: {node: '>= 12.13.0'} peerDependencies: webpack: ^5.0.0 minimalistic-assert@1.0.1: resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} minimatch@10.1.2: resolution: {integrity: sha512-fu656aJ0n2kcXwsnwnv9g24tkU5uSmOlTjd6WyyaKm2Z+h1qmY6bAjrcaIxF/BslFqbZ8UBtbJi7KgQOZD2PTw==} engines: {node: 20 || >=22} minipass-collect@2.0.1: resolution: {integrity: sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==} engines: {node: '>=16 || 14 >=14.17'} minipass-fetch@5.0.1: resolution: {integrity: sha512-yHK8pb0iCGat0lDrs/D6RZmCdaBT64tULXjdxjSMAqoDi18Q3qKEUTHypHQZQd9+FYpIS+lkvpq6C/R6SbUeRw==} engines: {node: ^20.17.0 || >=22.9.0} minipass-flush@1.0.5: resolution: {integrity: sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==} engines: {node: '>= 8'} minipass-pipeline@1.2.4: resolution: {integrity: sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==} engines: {node: '>=8'} minipass-sized@2.0.0: resolution: {integrity: sha512-zSsHhto5BcUVM2m1LurnXY6M//cGhVaegT71OfOXoprxT6o780GZd792ea6FfrQkuU4usHZIUczAQMRUE2plzA==} engines: {node: '>=8'} minipass@3.3.6: resolution: {integrity: sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==} engines: {node: '>=8'} minipass@7.1.2: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} minizlib@3.1.0: resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==} engines: {node: '>= 18'} mrmime@2.0.1: resolution: {integrity: sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==} engines: {node: '>=10'} ms@2.0.0: resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} msgpackr-extract@3.0.3: resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==} hasBin: true msgpackr@1.11.8: resolution: {integrity: sha512-bC4UGzHhVvgDNS7kn9tV8fAucIYUBuGojcaLiz7v+P63Lmtm0Xeji8B/8tYKddALXxJLpwIeBmUN3u64C4YkRA==} multicast-dns@7.2.5: resolution: {integrity: sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==} hasBin: true mute-stream@2.0.0: resolution: {integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==} engines: {node: ^18.17.0 || >=20.5.0} nanoid@3.3.11: resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true needle@3.3.1: resolution: {integrity: sha512-6k0YULvhpw+RoLNiQCRKOl09Rv1dPLr8hHnVjHqdolKwDrdNyk+Hmrthi4lIGPPz3r39dLx0hsF5s40sZ3Us4Q==} engines: {node: '>= 4.4.x'} hasBin: true negotiator@0.6.3: resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} engines: {node: '>= 0.6'} negotiator@0.6.4: resolution: {integrity: sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==} engines: {node: '>= 0.6'} negotiator@1.0.0: resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} engines: {node: '>= 0.6'} neo-async@2.6.2: resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} node-addon-api@6.1.0: resolution: {integrity: sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA==} node-addon-api@7.1.1: resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==} node-forge@1.3.3: resolution: {integrity: sha512-rLvcdSyRCyouf6jcOIPe/BgwG/d7hKjzMKOas33/pHEr6gbq18IK9zV7DiPvzsz0oBJPme6qr6H6kGZuI9/DZg==} engines: {node: '>= 6.13.0'} node-gyp-build-optional-packages@5.2.2: resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==} hasBin: true node-gyp@12.2.0: resolution: {integrity: sha512-q23WdzrQv48KozXlr0U1v9dwO/k59NHeSzn6loGcasyf0UnSrtzs8kRxM+mfwJSf0DkX0s43hcqgnSO4/VNthQ==} engines: {node: ^20.17.0 || >=22.9.0} hasBin: true node-releases@2.0.27: resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==} nopt@9.0.0: resolution: {integrity: sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw==} engines: {node: ^20.17.0 || >=22.9.0} hasBin: true normalize-path@3.0.0: resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} engines: {node: '>=0.10.0'} npm-bundled@5.0.0: resolution: {integrity: sha512-JLSpbzh6UUXIEoqPsYBvVNVmyrjVZ1fzEFbqxKkTJQkWBO3xFzFT+KDnSKQWwOQNbuWRwt5LSD6HOTLGIWzfrw==} engines: {node: ^20.17.0 || >=22.9.0} npm-install-checks@8.0.0: resolution: {integrity: sha512-ScAUdMpyzkbpxoNekQ3tNRdFI8SJ86wgKZSQZdUxT+bj0wVFpsEMWnkXP0twVe1gJyNF5apBWDJhhIbgrIViRA==} engines: {node: ^20.17.0 || >=22.9.0} npm-normalize-package-bin@5.0.0: resolution: {integrity: sha512-CJi3OS4JLsNMmr2u07OJlhcrPxCeOeP/4xq67aWNai6TNWWbTrlNDgl8NcFKVlcBKp18GPj+EzbNIgrBfZhsag==} engines: {node: ^20.17.0 || >=22.9.0} npm-package-arg@13.0.2: resolution: {integrity: sha512-IciCE3SY3uE84Ld8WZU23gAPPV9rIYod4F+rc+vJ7h7cwAJt9Vk6TVsK60ry7Uj3SRS3bqRRIGuTp9YVlk6WNA==} engines: {node: ^20.17.0 || >=22.9.0} npm-packlist@10.0.3: resolution: {integrity: sha512-zPukTwJMOu5X5uvm0fztwS5Zxyvmk38H/LfidkOMt3gbZVCyro2cD/ETzwzVPcWZA3JOyPznfUN/nkyFiyUbxg==} engines: {node: ^20.17.0 || >=22.9.0} npm-pick-manifest@11.0.3: resolution: {integrity: sha512-buzyCfeoGY/PxKqmBqn1IUJrZnUi1VVJTdSSRPGI60tJdUhUoSQFhs0zycJokDdOznQentgrpf8LayEHyyYlqQ==} engines: {node: ^20.17.0 || >=22.9.0} npm-registry-fetch@19.1.1: resolution: {integrity: sha512-TakBap6OM1w0H73VZVDf44iFXsOS3h+L4wVMXmbWOQroZgFhMch0juN6XSzBNlD965yIKvWg2dfu7NSiaYLxtw==} engines: {node: ^20.17.0 || >=22.9.0} nth-check@2.1.1: resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} object-inspect@1.13.4: resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} engines: {node: '>= 0.4'} obuf@1.1.2: resolution: {integrity: sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==} on-finished@2.4.1: resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} engines: {node: '>= 0.8'} on-headers@1.1.0: resolution: {integrity: sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==} engines: {node: '>= 0.8'} once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} onetime@7.0.0: resolution: {integrity: sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==} engines: {node: '>=18'} open@10.2.0: resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} engines: {node: '>=18'} open@11.0.0: resolution: {integrity: sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw==} engines: {node: '>=20'} ora@9.0.0: resolution: {integrity: sha512-m0pg2zscbYgWbqRR6ABga5c3sZdEon7bSgjnlXC64kxtxLOyjRcbbUkLj7HFyy/FTD+P2xdBWu8snGhYI0jc4A==} engines: {node: '>=20'} ordered-binary@1.6.1: resolution: {integrity: sha512-QkCdPooczexPLiXIrbVOPYkR3VO3T6v2OyKRkR1Xbhpy7/LAVXwahnRCgRp78Oe/Ehf0C/HATAxfSr6eA1oX+w==} p-limit@3.1.0: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} p-map@7.0.4: resolution: {integrity: sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==} engines: {node: '>=18'} p-retry@6.2.1: resolution: {integrity: sha512-hEt02O4hUct5wtwg4H4KcWgDdm+l1bOaEy/hWzd8xtXB9BqxTWBBhb+2ImAtH4Cv4rPjV76xN3Zumqk3k3AhhQ==} engines: {node: '>=16.17'} pacote@21.0.4: resolution: {integrity: sha512-RplP/pDW0NNNDh3pnaoIWYPvNenS7UqMbXyvMqJczosiFWTeGGwJC2NQBLqKf4rGLFfwCOnntw1aEp9Jiqm1MA==} engines: {node: ^20.17.0 || >=22.9.0} hasBin: true parent-module@1.0.1: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} parse-json@5.2.0: resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} engines: {node: '>=8'} parse-node-version@1.0.1: resolution: {integrity: sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==} engines: {node: '>= 0.10'} parse5-html-rewriting-stream@8.0.0: resolution: {integrity: sha512-wzh11mj8KKkno1pZEu+l2EVeWsuKDfR5KNWZOTsslfUX8lPDZx77m9T0kIoAVkFtD1nx6YF8oh4BnPHvxMtNMw==} parse5-sax-parser@8.0.0: resolution: {integrity: sha512-/dQ8UzHZwnrzs3EvDj6IkKrD/jIZyTlB+8XrHJvcjNgRdmWruNdN9i9RK/JtxakmlUdPwKubKPTCqvbTgzGhrw==} parse5@8.0.0: resolution: {integrity: sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==} parseurl@1.3.3: resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} engines: {node: '>= 0.8'} path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} path-key@3.1.1: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} path-scurry@2.0.1: resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==} engines: {node: 20 || >=22} path-to-regexp@0.1.12: resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==} path-to-regexp@8.3.0: resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==} picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} picomatch@2.3.1: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} picomatch@4.0.3: resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} pify@4.0.1: resolution: {integrity: sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==} engines: {node: '>=6'} piscina@5.1.4: resolution: {integrity: sha512-7uU4ZnKeQq22t9AsmHGD2w4OYQGonwFnTypDypaWi7Qr2EvQIFVtG8J5D/3bE7W123Wdc9+v4CZDu5hJXVCtBg==} engines: {node: '>=20.x'} pkce-challenge@5.0.1: resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} engines: {node: '>=16.20.0'} postcss-loader@8.2.0: resolution: {integrity: sha512-tHX+RkpsXVcc7st4dSdDGliI+r4aAQDuv+v3vFYHixb6YgjreG5AG4SEB0kDK8u2s6htqEEpKlkhSBUTvWKYnA==} engines: {node: '>= 18.12.0'} peerDependencies: '@rspack/core': 0.x || 1.x postcss: ^7.0.0 || ^8.0.1 webpack: ^5.0.0 peerDependenciesMeta: '@rspack/core': optional: true webpack: optional: true postcss-media-query-parser@0.2.3: resolution: {integrity: sha512-3sOlxmbKcSHMjlUXQZKQ06jOswE7oVkXPxmZdoB1r5l0q6gTFTQSHxNxOrCccElbW7dxNytifNEo8qidX2Vsig==} postcss-modules-extract-imports@3.1.0: resolution: {integrity: sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==} engines: {node: ^10 || ^12 || >= 14} peerDependencies: postcss: ^8.1.0 postcss-modules-local-by-default@4.2.0: resolution: {integrity: sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw==} engines: {node: ^10 || ^12 || >= 14} peerDependencies: postcss: ^8.1.0 postcss-modules-scope@3.2.1: resolution: {integrity: sha512-m9jZstCVaqGjTAuny8MdgE88scJnCiQSlSrOWcTQgM2t32UBe+MUmFSO5t7VMSfAf/FJKImAxBav8ooCHJXCJA==} engines: {node: ^10 || ^12 || >= 14} peerDependencies: postcss: ^8.1.0 postcss-modules-values@4.0.0: resolution: {integrity: sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==} engines: {node: ^10 || ^12 || >= 14} peerDependencies: postcss: ^8.1.0 postcss-selector-parser@7.1.1: resolution: {integrity: sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==} engines: {node: '>=4'} postcss-value-parser@4.2.0: resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} postcss@8.5.6: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} powershell-utils@0.1.0: resolution: {integrity: sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==} engines: {node: '>=20'} proc-log@6.1.0: resolution: {integrity: sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ==} engines: {node: ^20.17.0 || >=22.9.0} process-nextick-args@2.0.1: resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} promise-retry@2.0.1: resolution: {integrity: sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==} engines: {node: '>=10'} proxy-addr@2.0.7: resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} engines: {node: '>= 0.10'} prr@1.0.1: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} qs@6.14.1: resolution: {integrity: sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==} engines: {node: '>=0.6'} randombytes@2.1.0: resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} range-parser@1.2.1: resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} engines: {node: '>= 0.6'} raw-body@2.5.3: resolution: {integrity: sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==} engines: {node: '>= 0.8'} raw-body@3.0.2: resolution: {integrity: sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==} engines: {node: '>= 0.10'} readable-stream@2.3.8: resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==} readable-stream@3.6.2: resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} engines: {node: '>= 6'} readdirp@3.6.0: resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} engines: {node: '>=8.10.0'} readdirp@4.1.2: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} regenerate-unicode-properties@10.2.2: resolution: {integrity: sha512-m03P+zhBeQd1RGnYxrGyDAPpWX/epKirLrp8e3qevZdVkKtnCrjjWczIbYc8+xd6vcTStVlqfycTx1KR4LOr0g==} engines: {node: '>=4'} regenerate@1.4.2: resolution: {integrity: sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==} regex-parser@2.3.1: resolution: {integrity: sha512-yXLRqatcCuKtVHsWrNg0JL3l1zGfdXeEvDa0bdu4tCDQw0RpMDZsqbkyRTUnKMR0tXF627V2oEWjBEaEdqTwtQ==} regexpu-core@6.4.0: resolution: {integrity: sha512-0ghuzq67LI9bLXpOX/ISfve/Mq33a4aFRzoQYhnnok1JOFpmE/A2TBGkNVenOGEeSBCjIiWcc6MVOG5HEQv0sA==} engines: {node: '>=4'} regjsgen@0.8.0: resolution: {integrity: sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==} regjsparser@0.13.0: resolution: {integrity: sha512-NZQZdC5wOE/H3UT28fVGL+ikOZcEzfMGk/c3iN9UGxzWHMa1op7274oyiUVrAG4B2EuFhus8SvkaYnhvW92p9Q==} hasBin: true require-from-string@2.0.2: resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} engines: {node: '>=0.10.0'} requires-port@1.0.0: resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} resolve-url-loader@5.0.0: resolution: {integrity: sha512-uZtduh8/8srhBoMx//5bwqjQ+rfYOUq8zC9NrMUGtjBiGTtFJM42s58/36+hTqeqINcnYe08Nj3LkK9lW4N8Xg==} engines: {node: '>=12'} resolve@1.22.11: resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} engines: {node: '>= 0.4'} hasBin: true restore-cursor@5.1.0: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} retry@0.12.0: resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} engines: {node: '>= 4'} retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} rfdc@1.4.1: resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==} rolldown@1.0.0-beta.58: resolution: {integrity: sha512-v1FCjMZCan7f+xGAHBi+mqiE4MlH7I+SXEHSQSJoMOGNNB2UYtvMiejsq9YuUOiZjNeUeV/a21nSFbrUR+4ZCQ==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true rollup@4.57.1: resolution: {integrity: sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true router@2.2.0: resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} engines: {node: '>= 18'} run-applescript@7.1.0: resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} engines: {node: '>=18'} rxjs@7.8.2: resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} safe-buffer@5.1.2: resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} sass-loader@16.0.6: resolution: {integrity: sha512-sglGzId5gmlfxNs4gK2U3h7HlVRfx278YK6Ono5lwzuvi1jxig80YiuHkaDBVsYIKFhx8wN7XSCI0M2IDS/3qA==} engines: {node: '>= 18.12.0'} peerDependencies: '@rspack/core': 0.x || 1.x node-sass: ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 || ^9.0.0 sass: ^1.3.0 sass-embedded: '*' webpack: ^5.0.0 peerDependenciesMeta: '@rspack/core': optional: true node-sass: optional: true sass: optional: true sass-embedded: optional: true webpack: optional: true sass@1.97.1: resolution: {integrity: sha512-uf6HoO8fy6ClsrShvMgaKUn14f2EHQLQRtpsZZLeU/Mv0Q1K5P0+x2uvH6Cub39TVVbWNSrraUhDAoFph6vh0A==} engines: {node: '>=14.0.0'} hasBin: true sax@1.4.4: resolution: {integrity: sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==} engines: {node: '>=11.0.0'} schema-utils@4.3.3: resolution: {integrity: sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==} engines: {node: '>= 10.13.0'} select-hose@2.0.0: resolution: {integrity: sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==} selfsigned@2.4.1: resolution: {integrity: sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==} engines: {node: '>=10'} semver@5.7.2: resolution: {integrity: sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==} hasBin: true semver@6.3.1: resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} hasBin: true semver@7.7.3: resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} engines: {node: '>=10'} hasBin: true send@0.19.2: resolution: {integrity: sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==} engines: {node: '>= 0.8.0'} send@1.2.1: resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==} engines: {node: '>= 18'} serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} serve-index@1.9.2: resolution: {integrity: sha512-KDj11HScOaLmrPxl70KYNW1PksP4Nb/CLL2yvC+Qd2kHMPEEpfc4Re2e4FOay+bC/+XQl/7zAcWON3JVo5v3KQ==} engines: {node: '>= 0.8.0'} serve-static@1.16.3: resolution: {integrity: sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==} engines: {node: '>= 0.8.0'} serve-static@2.2.1: resolution: {integrity: sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==} engines: {node: '>= 18'} setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} shallow-clone@3.0.1: resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} engines: {node: '>=8'} shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} shebang-regex@3.0.0: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} shell-quote@1.8.3: resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} engines: {node: '>= 0.4'} side-channel-list@1.0.0: resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} engines: {node: '>= 0.4'} side-channel-map@1.0.1: resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} engines: {node: '>= 0.4'} side-channel-weakmap@1.0.2: resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} engines: {node: '>= 0.4'} side-channel@1.1.0: resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} engines: {node: '>= 0.4'} signal-exit@4.1.0: resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} engines: {node: '>=14'} sigstore@4.1.0: resolution: {integrity: sha512-/fUgUhYghuLzVT/gaJoeVehLCgZiUxPCPMcyVNY0lIf/cTCz58K/WTI7PefDarXxp9nUKpEwg1yyz3eSBMTtgA==} engines: {node: ^20.17.0 || >=22.9.0} slice-ansi@7.1.2: resolution: {integrity: sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==} engines: {node: '>=18'} smart-buffer@4.2.0: resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} engines: {node: '>= 6.0.0', npm: '>= 3.0.0'} sockjs@0.3.24: resolution: {integrity: sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==} socks-proxy-agent@8.0.5: resolution: {integrity: sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==} engines: {node: '>= 14'} socks@2.8.7: resolution: {integrity: sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==} engines: {node: '>= 10.0.0', npm: '>= 3.0.0'} source-map-js@1.2.1: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} source-map-loader@5.0.0: resolution: {integrity: sha512-k2Dur7CbSLcAH73sBcIkV5xjPV4SzqO1NJ7+XaQl8if3VODDUj3FNchNGpqgJSKbvUfJuhVdv8K2Eu8/TNl2eA==} engines: {node: '>= 18.12.0'} peerDependencies: webpack: ^5.72.1 source-map-support@0.5.21: resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} source-map@0.6.1: resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} engines: {node: '>=0.10.0'} source-map@0.7.6: resolution: {integrity: sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==} engines: {node: '>= 12'} spdx-correct@3.2.0: resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} spdx-exceptions@2.5.0: resolution: {integrity: sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==} spdx-expression-parse@3.0.1: resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} spdx-license-ids@3.0.22: resolution: {integrity: sha512-4PRT4nh1EImPbt2jASOKHX7PB7I+e4IWNLvkKFDxNhJlfjbYlleYQh285Z/3mPTHSAK/AvdMmw5BNNuYH8ShgQ==} spdy-transport@3.0.0: resolution: {integrity: sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==} spdy@4.0.2: resolution: {integrity: sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==} engines: {node: '>=6.0.0'} ssri@13.0.1: resolution: {integrity: sha512-QUiRf1+u9wPTL/76GTYlKttDEBWV1ga9ZXW8BG6kfdeyyM8LGPix9gROyg9V2+P0xNyF3X2Go526xKFdMZrHSQ==} engines: {node: ^20.17.0 || >=22.9.0} statuses@1.5.0: resolution: {integrity: sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==} engines: {node: '>= 0.6'} statuses@2.0.2: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} stdin-discarder@0.2.2: resolution: {integrity: sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ==} engines: {node: '>=18'} string-width@4.2.3: resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} engines: {node: '>=8'} string-width@7.2.0: resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==} engines: {node: '>=18'} string-width@8.1.1: resolution: {integrity: sha512-KpqHIdDL9KwYk22wEOg/VIqYbrnLeSApsKT/bSj6Ez7pn3CftUiLAv2Lccpq1ALcpLV9UX1Ppn92npZWu2w/aw==} engines: {node: '>=20'} string_decoder@1.1.1: resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==} string_decoder@1.3.0: resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} strip-ansi@6.0.1: resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} engines: {node: '>=8'} strip-ansi@7.1.2: resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} engines: {node: '>=12'} supports-color@8.1.1: resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} engines: {node: '>=10'} supports-preserve-symlinks-flag@1.0.0: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} tapable@2.3.0: resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} engines: {node: '>=6'} tar@7.5.7: resolution: {integrity: sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ==} engines: {node: '>=18'} terser-webpack-plugin@5.3.16: resolution: {integrity: sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q==} engines: {node: '>= 10.13.0'} peerDependencies: '@swc/core': '*' esbuild: '*' uglify-js: '*' webpack: ^5.1.0 peerDependenciesMeta: '@swc/core': optional: true esbuild: optional: true uglify-js: optional: true terser@5.44.1: resolution: {integrity: sha512-t/R3R/n0MSwnnazuPpPNVO60LX0SKL45pyl9YlvxIdkH0Of7D5qM2EVe+yASRIlY5pZ73nclYJfNANGWPwFDZw==} engines: {node: '>=10'} hasBin: true thingies@2.5.0: resolution: {integrity: sha512-s+2Bwztg6PhWUD7XMfeYm5qliDdSiZm7M7n8KjTkIsm3l/2lgVRc2/Gx/v+ZX8lT4FMA+i8aQvhcWylldc+ZNw==} engines: {node: '>=10.18'} peerDependencies: tslib: ^2 thunky@1.1.0: resolution: {integrity: sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==} tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} toidentifier@1.0.1: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} tree-dump@1.1.0: resolution: {integrity: sha512-rMuvhU4MCDbcbnleZTFezWsaZXRFemSqAM+7jPnzUl1fo9w3YEKOxAeui0fz3OI4EU4hf23iyA7uQRVko+UaBA==} engines: {node: '>=10.0'} peerDependencies: tslib: '2' tree-kill@1.2.2: resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} hasBin: true tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} tuf-js@4.1.0: resolution: {integrity: sha512-50QV99kCKH5P/Vs4E2Gzp7BopNV+KzTXqWeaxrfu5IQJBOULRsTIS9seSsOVT8ZnGXzCyx55nYWAi4qJzpZKEQ==} engines: {node: ^20.17.0 || >=22.9.0} type-is@1.6.18: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} type-is@2.0.1: resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} engines: {node: '>= 0.6'} typed-assert@1.0.9: resolution: {integrity: sha512-KNNZtayBCtmnNmbo5mG47p1XsCyrx6iVqomjcZnec/1Y5GGARaxPs6r49RnSPeUP3YjNYiU9sQHAtY4BBvnZwg==} typescript@5.9.3: resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} engines: {node: '>=14.17'} hasBin: true undici-types@6.21.0: resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} undici@7.18.0: resolution: {integrity: sha512-CfPufgPFHCYu0W4h1NiKW9+tNJ39o3kWm7Cm29ET1enSJx+AERfz7A2wAr26aY0SZbYzZlTBQtcHy15o60VZfQ==} engines: {node: '>=20.18.1'} unicode-canonical-property-names-ecmascript@2.0.1: resolution: {integrity: sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==} engines: {node: '>=4'} unicode-match-property-ecmascript@2.0.0: resolution: {integrity: sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==} engines: {node: '>=4'} unicode-match-property-value-ecmascript@2.2.1: resolution: {integrity: sha512-JQ84qTuMg4nVkx8ga4A16a1epI9H6uTXAknqxkGF/aFfRLw1xC/Bp24HNLaZhHSkWd3+84t8iXnp1J0kYcZHhg==} engines: {node: '>=4'} unicode-property-aliases-ecmascript@2.2.0: resolution: {integrity: sha512-hpbDzxUY9BFwX+UeBnxv3Sh1q7HFxj48DTmXchNgRa46lO8uj3/1iEn3MiNUYTg1g9ctIqXCCERn8gYZhHC5lQ==} engines: {node: '>=4'} unique-filename@5.0.0: resolution: {integrity: sha512-2RaJTAvAb4owyjllTfXzFClJ7WsGxlykkPvCr9pA//LD9goVq+m4PPAeBgNodGZ7nSrntT/auWpJ6Y5IFXcfjg==} engines: {node: ^20.17.0 || >=22.9.0} unique-slug@6.0.0: resolution: {integrity: sha512-4Lup7Ezn8W3d52/xBhZBVdx323ckxa7DEvd9kPQHppTkLoJXw6ltrBCyj5pnrxj0qKDxYMJ56CoxNuFCscdTiw==} engines: {node: ^20.17.0 || >=22.9.0} unpipe@1.0.0: resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} engines: {node: '>= 0.8'} update-browserslist-db@1.2.3: resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==} hasBin: true peerDependencies: browserslist: '>= 4.21.0' util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} utils-merge@1.0.1: resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} engines: {node: '>= 0.4.0'} uuid@8.3.2: resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} hasBin: true validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} validate-npm-package-name@7.0.2: resolution: {integrity: sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==} engines: {node: ^20.17.0 || >=22.9.0} vary@1.1.2: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} vite@7.3.0: resolution: {integrity: sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: '@types/node': ^20.19.0 || >=22.12.0 jiti: '>=1.21.0' less: ^4.0.0 lightningcss: ^1.21.0 sass: ^1.70.0 sass-embedded: ^1.70.0 stylus: '>=0.54.8' sugarss: ^5.0.0 terser: ^5.16.0 tsx: ^4.8.1 yaml: ^2.4.2 peerDependenciesMeta: '@types/node': optional: true jiti: optional: true less: optional: true lightningcss: optional: true sass: optional: true sass-embedded: optional: true stylus: optional: true sugarss: optional: true terser: optional: true tsx: optional: true yaml: optional: true watchpack@2.5.0: resolution: {integrity: sha512-e6vZvY6xboSwLz2GD36c16+O/2Z6fKvIf4pOXptw2rY9MVwE/TXc6RGqxD3I3x0a28lwBY7DE+76uTPSsBrrCA==} engines: {node: '>=10.13.0'} watchpack@2.5.1: resolution: {integrity: sha512-Zn5uXdcFNIA1+1Ei5McRd+iRzfhENPCe7LeABkJtNulSxjma+l7ltNx55BWZkRlwRnpOgHqxnjyaDgJnNXnqzg==} engines: {node: '>=10.13.0'} wbuf@1.7.3: resolution: {integrity: sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==} weak-lru-cache@1.2.2: resolution: {integrity: sha512-DEAoo25RfSYMuTGc9vPJzZcZullwIqRDSI9LOy+fkCJPi6hykCnfKaXTuPBDuXAUcqHXyOgFtHNp/kB2FjYHbw==} webpack-dev-middleware@7.4.5: resolution: {integrity: sha512-uxQ6YqGdE4hgDKNf7hUiPXOdtkXvBJXrfEGYSx7P7LC8hnUYGK70X6xQXUvXeNyBDDcsiQXpG2m3G9vxowaEuA==} engines: {node: '>= 18.12.0'} peerDependencies: webpack: ^5.0.0 peerDependenciesMeta: webpack: optional: true webpack-dev-server@5.2.2: resolution: {integrity: sha512-QcQ72gh8a+7JO63TAx/6XZf/CWhgMzu5m0QirvPfGvptOusAxG12w2+aua1Jkjr7hzaWDnJ2n6JFeexMHI+Zjg==} engines: {node: '>= 18.12.0'} hasBin: true peerDependencies: webpack: ^5.0.0 webpack-cli: '*' peerDependenciesMeta: webpack: optional: true webpack-cli: optional: true webpack-merge@6.0.1: resolution: {integrity: sha512-hXXvrjtx2PLYx4qruKl+kyRSLc52V+cCvMxRjmKwoA+CBbbF5GfIBtR6kCvl0fYGqTUPKB+1ktVmTHqMOzgCBg==} engines: {node: '>=18.0.0'} webpack-sources@3.3.3: resolution: {integrity: sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==} engines: {node: '>=10.13.0'} webpack-subresource-integrity@5.1.0: resolution: {integrity: sha512-sacXoX+xd8r4WKsy9MvH/q/vBtEHr86cpImXwyg74pFIpERKt6FmB8cXpeuh0ZLgclOlHI4Wcll7+R5L02xk9Q==} engines: {node: '>= 12'} peerDependencies: html-webpack-plugin: '>= 5.0.0-beta.1 < 6' webpack: ^5.12.0 peerDependenciesMeta: html-webpack-plugin: optional: true webpack@5.104.1: resolution: {integrity: sha512-Qphch25abbMNtekmEGJmeRUhLDbe+QfiWTiqpKYkpCOWY64v9eyl+KRRLmqOFA2AvKPpc9DC6+u2n76tQLBoaA==} engines: {node: '>=10.13.0'} hasBin: true peerDependencies: webpack-cli: '*' peerDependenciesMeta: webpack-cli: optional: true websocket-driver@0.7.4: resolution: {integrity: sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==} engines: {node: '>=0.8.0'} websocket-extensions@0.1.4: resolution: {integrity: sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==} engines: {node: '>=0.8.0'} which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} hasBin: true which@6.0.1: resolution: {integrity: sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg==} engines: {node: ^20.17.0 || >=22.9.0} hasBin: true wildcard@2.0.1: resolution: {integrity: sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==} wrap-ansi@6.2.0: resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} engines: {node: '>=8'} wrap-ansi@9.0.2: resolution: {integrity: sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==} engines: {node: '>=18'} wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} ws@8.19.0: resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} engines: {node: '>=10.0.0'} peerDependencies: bufferutil: ^4.0.1 utf-8-validate: '>=5.0.2' peerDependenciesMeta: bufferutil: optional: true utf-8-validate: optional: true wsl-utils@0.1.0: resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} engines: {node: '>=18'} wsl-utils@0.3.1: resolution: {integrity: sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg==} engines: {node: '>=20'} y18n@5.0.8: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'} yallist@3.1.1: resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} yallist@4.0.0: resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} yallist@5.0.0: resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} engines: {node: '>=18'} yargs-parser@22.0.0: resolution: {integrity: sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==} engines: {node: ^20.19.0 || ^22.12.0 || >=23} yargs@18.0.0: resolution: {integrity: sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==} engines: {node: ^20.19.0 || ^22.12.0 || >=23} yocto-queue@0.1.0: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} yoctocolors-cjs@2.1.3: resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==} engines: {node: '>=18'} yoctocolors@2.1.2: resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==} engines: {node: '>=18'} zod-to-json-schema@3.25.1: resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==} peerDependencies: zod: ^3.25 || ^4 zod@4.3.5: resolution: {integrity: sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==} zone.js@0.16.0: resolution: {integrity: sha512-LqLPpIQANebrlxY6jKcYKdgN5DTXyyHAKnnWWjE5pPfEQ4n7j5zn7mOEEpwNZVKGqx3kKKmvplEmoBrvpgROTA==} snapshots: '@algolia/abtesting@1.12.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-abtesting@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-analytics@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-common@5.46.2': {} '@algolia/client-insights@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-personalization@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-query-suggestions@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/client-search@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/ingestion@1.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/monitoring@1.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/recommend@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 '@algolia/requester-browser-xhr@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-fetch@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@algolia/requester-node-http@5.46.2': dependencies: '@algolia/client-common': 5.46.2 '@ampproject/remapping@2.3.0': dependencies: '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 '@angular-devkit/architect@0.2101.0-rc.0': dependencies: '@angular-devkit/core': 21.1.0-rc.0 rxjs: 7.8.2 transitivePeerDependencies: - chokidar '@angular-devkit/build-angular@21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(@angular/compiler@in-existing-linked-by-bazel)(@angular/core@in-existing-linked-by-bazel)(@angular/localize@in-existing-linked-by-bazel)(@angular/platform-browser@in-existing-linked-by-bazel)(@types/node@20.19.33)(jiti@2.6.1)(typescript@5.9.3)': dependencies: '@ampproject/remapping': 2.3.0 '@angular-devkit/architect': 0.2101.0-rc.0 '@angular-devkit/build-webpack': 0.2101.0-rc.0(webpack-dev-server@5.2.2(tslib@2.8.1)(webpack@5.104.1))(webpack@5.104.1(esbuild@0.27.2)) '@angular-devkit/core': 21.1.0-rc.0 '@angular/build': 21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(@angular/compiler@in-existing-linked-by-bazel)(@angular/core@in-existing-linked-by-bazel)(@angular/localize@in-existing-linked-by-bazel)(@angular/platform-browser@in-existing-linked-by-bazel)(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(postcss@8.5.6)(terser@5.44.1)(tslib@2.8.1)(typescript@5.9.3) '@angular/compiler-cli': link:in-existing-linked-by-bazel '@babel/core': 7.28.5 '@babel/generator': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-split-export-declaration': 7.24.7 '@babel/plugin-transform-async-generator-functions': 7.28.0(@babel/core@7.28.5) '@babel/plugin-transform-async-to-generator': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-runtime': 7.28.5(@babel/core@7.28.5) '@babel/preset-env': 7.28.5(@babel/core@7.28.5) '@babel/runtime': 7.28.4 '@discoveryjs/json-ext': 0.6.3 '@ngtools/webpack': 21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(typescript@5.9.3)(webpack@5.104.1(esbuild@0.27.2)) ansi-colors: 4.1.3 autoprefixer: 10.4.23(postcss@8.5.6) babel-loader: 10.0.0(@babel/core@7.28.5)(webpack@5.104.1(esbuild@0.27.2)) browserslist: 4.28.1 copy-webpack-plugin: 13.0.1(webpack@5.104.1(esbuild@0.27.2)) css-loader: 7.1.2(webpack@5.104.1(esbuild@0.27.2)) esbuild-wasm: 0.27.2 http-proxy-middleware: 3.0.5 istanbul-lib-instrument: 6.0.3 jsonc-parser: 3.3.1 karma-source-map-support: 1.4.0 less: 4.4.2 less-loader: 12.3.0(less@4.4.2)(webpack@5.104.1(esbuild@0.27.2)) license-webpack-plugin: 4.0.2(webpack@5.104.1(esbuild@0.27.2)) loader-utils: 3.3.1 mini-css-extract-plugin: 2.9.4(webpack@5.104.1(esbuild@0.27.2)) open: 11.0.0 ora: 9.0.0 picomatch: 4.0.3 piscina: 5.1.4 postcss: 8.5.6 postcss-loader: 8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.104.1(esbuild@0.27.2)) resolve-url-loader: 5.0.0 rxjs: 7.8.2 sass: 1.97.1 sass-loader: 16.0.6(sass@1.97.1)(webpack@5.104.1(esbuild@0.27.2)) semver: 7.7.3 source-map-loader: 5.0.0(webpack@5.104.1(esbuild@0.27.2)) source-map-support: 0.5.21 terser: 5.44.1 tinyglobby: 0.2.15 tree-kill: 1.2.2 tslib: 2.8.1 typescript: 5.9.3 webpack: 5.104.1(esbuild@0.27.2) webpack-dev-middleware: 7.4.5(tslib@2.8.1)(webpack@5.104.1) webpack-dev-server: 5.2.2(tslib@2.8.1)(webpack@5.104.1) webpack-merge: 6.0.1 webpack-subresource-integrity: 5.1.0(webpack@5.104.1(esbuild@0.27.2)) optionalDependencies: '@angular/core': link:in-existing-linked-by-bazel '@angular/localize': link:in-existing-linked-by-bazel '@angular/platform-browser': link:in-existing-linked-by-bazel esbuild: 0.27.2 transitivePeerDependencies: - '@angular/compiler' - '@rspack/core' - '@swc/core' - '@types/node' - bufferutil - chokidar - debug - html-webpack-plugin - jiti - lightningcss - node-sass - sass-embedded - stylus - sugarss - supports-color - tsx - uglify-js - utf-8-validate - vitest - webpack-cli - yaml '@angular-devkit/build-webpack@0.2101.0-rc.0(webpack-dev-server@5.2.2(tslib@2.8.1)(webpack@5.104.1))(webpack@5.104.1(esbuild@0.27.2))': dependencies: '@angular-devkit/architect': 0.2101.0-rc.0 rxjs: 7.8.2 webpack: 5.104.1(esbuild@0.27.2) webpack-dev-server: 5.2.2(tslib@2.8.1)(webpack@5.104.1) transitivePeerDependencies: - chokidar '@angular-devkit/core@21.1.0-rc.0': dependencies: ajv: 8.17.1 ajv-formats: 3.0.1(ajv@8.17.1) jsonc-parser: 3.3.1 picomatch: 4.0.3 rxjs: 7.8.2 source-map: 0.7.6 '@angular-devkit/schematics@21.1.0-rc.0': dependencies: '@angular-devkit/core': 21.1.0-rc.0 jsonc-parser: 3.3.1 magic-string: 0.30.21 ora: 9.0.0 rxjs: 7.8.2 transitivePeerDependencies: - chokidar '@angular/build@21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(@angular/compiler@in-existing-linked-by-bazel)(@angular/core@in-existing-linked-by-bazel)(@angular/localize@in-existing-linked-by-bazel)(@angular/platform-browser@in-existing-linked-by-bazel)(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(postcss@8.5.6)(terser@5.44.1)(tslib@2.8.1)(typescript@5.9.3)': dependencies: '@ampproject/remapping': 2.3.0 '@angular-devkit/architect': 0.2101.0-rc.0 '@angular/compiler': link:in-existing-linked-by-bazel '@angular/compiler-cli': link:in-existing-linked-by-bazel '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-split-export-declaration': 7.24.7 '@inquirer/confirm': 5.1.21(@types/node@20.19.33) '@vitejs/plugin-basic-ssl': 2.1.0(vite@7.3.0(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(sass@1.97.1)(terser@5.44.1)) beasties: 0.3.5 browserslist: 4.28.1 esbuild: 0.27.2 https-proxy-agent: 7.0.6 istanbul-lib-instrument: 6.0.3 jsonc-parser: 3.3.1 listr2: 9.0.5 magic-string: 0.30.21 mrmime: 2.0.1 parse5-html-rewriting-stream: 8.0.0 picomatch: 4.0.3 piscina: 5.1.4 rolldown: 1.0.0-beta.58 sass: 1.97.1 semver: 7.7.3 source-map-support: 0.5.21 tinyglobby: 0.2.15 tslib: 2.8.1 typescript: 5.9.3 undici: 7.18.0 vite: 7.3.0(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(sass@1.97.1)(terser@5.44.1) watchpack: 2.5.0 optionalDependencies: '@angular/core': link:in-existing-linked-by-bazel '@angular/localize': link:in-existing-linked-by-bazel '@angular/platform-browser': link:in-existing-linked-by-bazel less: 4.4.2 lmdb: 3.4.4 postcss: 8.5.6 transitivePeerDependencies: - '@types/node' - chokidar - jiti - lightningcss - sass-embedded - stylus - sugarss - supports-color - terser - tsx - yaml '@angular/cli@21.1.0-rc.0(@types/node@20.19.33)(hono@4.11.9)': dependencies: '@angular-devkit/architect': 0.2101.0-rc.0 '@angular-devkit/core': 21.1.0-rc.0 '@angular-devkit/schematics': 21.1.0-rc.0 '@inquirer/prompts': 7.10.1(@types/node@20.19.33) '@listr2/prompt-adapter-inquirer': 3.0.5(@inquirer/prompts@7.10.1(@types/node@20.19.33))(@types/node@20.19.33)(listr2@9.0.5) '@modelcontextprotocol/sdk': 1.25.2(hono@4.11.9)(zod@4.3.5) '@schematics/angular': 21.1.0-rc.0 '@yarnpkg/lockfile': 1.1.0 algoliasearch: 5.46.2 ini: 6.0.0 jsonc-parser: 3.3.1 listr2: 9.0.5 npm-package-arg: 13.0.2 pacote: 21.0.4 parse5-html-rewriting-stream: 8.0.0 resolve: 1.22.11 semver: 7.7.3 yargs: 18.0.0 zod: 4.3.5 transitivePeerDependencies: - '@cfworker/json-schema' - '@types/node' - chokidar - hono - supports-color '@babel/code-frame@7.29.0': dependencies: '@babel/helper-validator-identifier': 7.28.5 js-tokens: 4.0.0 picocolors: 1.1.1 '@babel/compat-data@7.29.0': {} '@babel/core@7.28.5': dependencies: '@babel/code-frame': 7.29.0 '@babel/generator': 7.28.5 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-module-transforms': 7.28.6(@babel/core@7.28.5) '@babel/helpers': 7.28.6 '@babel/parser': 7.29.0 '@babel/template': 7.28.6 '@babel/traverse': 7.29.0 '@babel/types': 7.29.0 '@jridgewell/remapping': 2.3.5 convert-source-map: 2.0.0 debug: 4.4.3 gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 transitivePeerDependencies: - supports-color '@babel/generator@7.28.5': dependencies: '@babel/parser': 7.29.0 '@babel/types': 7.29.0 '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 jsesc: 3.1.0 '@babel/generator@7.29.1': dependencies: '@babel/parser': 7.29.0 '@babel/types': 7.29.0 '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 jsesc: 3.1.0 '@babel/helper-annotate-as-pure@7.27.3': dependencies: '@babel/types': 7.29.0 '@babel/helper-compilation-targets@7.28.6': dependencies: '@babel/compat-data': 7.29.0 '@babel/helper-validator-option': 7.27.1 browserslist: 4.28.1 lru-cache: 5.1.1 semver: 6.3.1 '@babel/helper-create-class-features-plugin@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-member-expression-to-functions': 7.28.5 '@babel/helper-optimise-call-expression': 7.27.1 '@babel/helper-replace-supers': 7.28.6(@babel/core@7.28.5) '@babel/helper-skip-transparent-expression-wrappers': 7.27.1 '@babel/traverse': 7.29.0 semver: 6.3.1 transitivePeerDependencies: - supports-color '@babel/helper-create-regexp-features-plugin@7.28.5(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 regexpu-core: 6.4.0 semver: 6.3.1 '@babel/helper-define-polyfill-provider@0.6.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 debug: 4.4.3 lodash.debounce: 4.0.8 resolve: 1.22.11 transitivePeerDependencies: - supports-color '@babel/helper-globals@7.28.0': {} '@babel/helper-member-expression-to-functions@7.28.5': dependencies: '@babel/traverse': 7.29.0 '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-module-imports@7.28.6': dependencies: '@babel/traverse': 7.29.0 '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-module-transforms@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-imports': 7.28.6 '@babel/helper-validator-identifier': 7.28.5 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-optimise-call-expression@7.27.1': dependencies: '@babel/types': 7.29.0 '@babel/helper-plugin-utils@7.28.6': {} '@babel/helper-remap-async-to-generator@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-wrap-function': 7.28.6 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-replace-supers@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-member-expression-to-functions': 7.28.5 '@babel/helper-optimise-call-expression': 7.27.1 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-skip-transparent-expression-wrappers@7.27.1': dependencies: '@babel/traverse': 7.29.0 '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helper-split-export-declaration@7.24.7': dependencies: '@babel/types': 7.29.0 '@babel/helper-string-parser@7.27.1': {} '@babel/helper-validator-identifier@7.28.5': {} '@babel/helper-validator-option@7.27.1': {} '@babel/helper-wrap-function@7.28.6': dependencies: '@babel/template': 7.28.6 '@babel/traverse': 7.29.0 '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color '@babel/helpers@7.28.6': dependencies: '@babel/template': 7.28.6 '@babel/types': 7.29.0 '@babel/parser@7.29.0': dependencies: '@babel/types': 7.29.0 '@babel/plugin-bugfix-firefox-class-in-computed-class-key@7.28.5(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-bugfix-safari-class-field-initializer-scope@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-skip-transparent-expression-wrappers': 7.27.1 '@babel/plugin-transform-optional-chaining': 7.28.6(@babel/core@7.28.5) transitivePeerDependencies: - supports-color '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-import-assertions@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-syntax-import-attributes@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-syntax-unicode-sets-regex@7.18.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-arrow-functions@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-async-generator-functions@7.28.0(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-remap-async-to-generator': 7.27.1(@babel/core@7.28.5) '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-async-to-generator@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-imports': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-remap-async-to-generator': 7.27.1(@babel/core@7.28.5) transitivePeerDependencies: - supports-color '@babel/plugin-transform-block-scoped-functions@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-block-scoping@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-class-properties@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-class-features-plugin': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-class-static-block@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-class-features-plugin': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-classes@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-globals': 7.28.0 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-replace-supers': 7.28.6(@babel/core@7.28.5) '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-computed-properties@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/template': 7.28.6 '@babel/plugin-transform-destructuring@7.28.5(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-dotall-regex@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-duplicate-keys@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-duplicate-named-capturing-groups-regex@7.29.0(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-dynamic-import@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-explicit-resource-management@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-destructuring': 7.28.5(@babel/core@7.28.5) transitivePeerDependencies: - supports-color '@babel/plugin-transform-exponentiation-operator@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-export-namespace-from@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-for-of@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-skip-transparent-expression-wrappers': 7.27.1 transitivePeerDependencies: - supports-color '@babel/plugin-transform-function-name@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-json-strings@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-literals@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-logical-assignment-operators@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-member-expression-literals@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-modules-amd@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-transforms': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-modules-commonjs@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-transforms': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-modules-systemjs@7.29.0(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-transforms': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-validator-identifier': 7.28.5 '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-modules-umd@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-transforms': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-named-capturing-groups-regex@7.29.0(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-new-target@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-nullish-coalescing-operator@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-numeric-separator@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-object-rest-spread@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-destructuring': 7.28.5(@babel/core@7.28.5) '@babel/plugin-transform-parameters': 7.27.7(@babel/core@7.28.5) '@babel/traverse': 7.29.0 transitivePeerDependencies: - supports-color '@babel/plugin-transform-object-super@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-replace-supers': 7.28.6(@babel/core@7.28.5) transitivePeerDependencies: - supports-color '@babel/plugin-transform-optional-catch-binding@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-optional-chaining@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-skip-transparent-expression-wrappers': 7.27.1 transitivePeerDependencies: - supports-color '@babel/plugin-transform-parameters@7.27.7(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-private-methods@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-class-features-plugin': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-private-property-in-object@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-create-class-features-plugin': 7.28.6(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 transitivePeerDependencies: - supports-color '@babel/plugin-transform-property-literals@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-regenerator@7.29.0(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-regexp-modifiers@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-reserved-words@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-runtime@7.28.5(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-module-imports': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 babel-plugin-polyfill-corejs2: 0.4.15(@babel/core@7.28.5) babel-plugin-polyfill-corejs3: 0.13.0(@babel/core@7.28.5) babel-plugin-polyfill-regenerator: 0.6.6(@babel/core@7.28.5) semver: 6.3.1 transitivePeerDependencies: - supports-color '@babel/plugin-transform-shorthand-properties@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-spread@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-skip-transparent-expression-wrappers': 7.27.1 transitivePeerDependencies: - supports-color '@babel/plugin-transform-sticky-regex@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-template-literals@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-typeof-symbol@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-unicode-escapes@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-unicode-property-regex@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-unicode-regex@7.27.1(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-unicode-sets-regex@7.28.6(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-create-regexp-features-plugin': 7.28.5(@babel/core@7.28.5) '@babel/helper-plugin-utils': 7.28.6 '@babel/preset-env@7.28.5(@babel/core@7.28.5)': dependencies: '@babel/compat-data': 7.29.0 '@babel/core': 7.28.5 '@babel/helper-compilation-targets': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 '@babel/helper-validator-option': 7.27.1 '@babel/plugin-bugfix-firefox-class-in-computed-class-key': 7.28.5(@babel/core@7.28.5) '@babel/plugin-bugfix-safari-class-field-initializer-scope': 7.27.1(@babel/core@7.28.5) '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.27.1(@babel/core@7.28.5) '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.27.1(@babel/core@7.28.5) '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly': 7.28.6(@babel/core@7.28.5) '@babel/plugin-proposal-private-property-in-object': 7.21.0-placeholder-for-preset-env.2(@babel/core@7.28.5) '@babel/plugin-syntax-import-assertions': 7.28.6(@babel/core@7.28.5) '@babel/plugin-syntax-import-attributes': 7.28.6(@babel/core@7.28.5) '@babel/plugin-syntax-unicode-sets-regex': 7.18.6(@babel/core@7.28.5) '@babel/plugin-transform-arrow-functions': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-async-generator-functions': 7.28.0(@babel/core@7.28.5) '@babel/plugin-transform-async-to-generator': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-block-scoped-functions': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-block-scoping': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-class-properties': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-class-static-block': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-classes': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-computed-properties': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-destructuring': 7.28.5(@babel/core@7.28.5) '@babel/plugin-transform-dotall-regex': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-duplicate-keys': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-duplicate-named-capturing-groups-regex': 7.29.0(@babel/core@7.28.5) '@babel/plugin-transform-dynamic-import': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-explicit-resource-management': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-exponentiation-operator': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-export-namespace-from': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-for-of': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-function-name': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-json-strings': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-literals': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-logical-assignment-operators': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-member-expression-literals': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-modules-amd': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-modules-commonjs': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-modules-systemjs': 7.29.0(@babel/core@7.28.5) '@babel/plugin-transform-modules-umd': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-named-capturing-groups-regex': 7.29.0(@babel/core@7.28.5) '@babel/plugin-transform-new-target': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-nullish-coalescing-operator': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-numeric-separator': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-object-rest-spread': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-object-super': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-optional-catch-binding': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-optional-chaining': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-parameters': 7.27.7(@babel/core@7.28.5) '@babel/plugin-transform-private-methods': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-private-property-in-object': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-property-literals': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-regenerator': 7.29.0(@babel/core@7.28.5) '@babel/plugin-transform-regexp-modifiers': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-reserved-words': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-shorthand-properties': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-spread': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-sticky-regex': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-template-literals': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-typeof-symbol': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-unicode-escapes': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-unicode-property-regex': 7.28.6(@babel/core@7.28.5) '@babel/plugin-transform-unicode-regex': 7.27.1(@babel/core@7.28.5) '@babel/plugin-transform-unicode-sets-regex': 7.28.6(@babel/core@7.28.5) '@babel/preset-modules': 0.1.6-no-external-plugins(@babel/core@7.28.5) babel-plugin-polyfill-corejs2: 0.4.15(@babel/core@7.28.5) babel-plugin-polyfill-corejs3: 0.13.0(@babel/core@7.28.5) babel-plugin-polyfill-regenerator: 0.6.6(@babel/core@7.28.5) core-js-compat: 3.48.0 semver: 6.3.1 transitivePeerDependencies: - supports-color '@babel/preset-modules@0.1.6-no-external-plugins(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 '@babel/helper-plugin-utils': 7.28.6 '@babel/types': 7.29.0 esutils: 2.0.3 '@babel/runtime@7.28.4': {} '@babel/template@7.28.6': dependencies: '@babel/code-frame': 7.29.0 '@babel/parser': 7.29.0 '@babel/types': 7.29.0 '@babel/traverse@7.29.0': dependencies: '@babel/code-frame': 7.29.0 '@babel/generator': 7.29.1 '@babel/helper-globals': 7.28.0 '@babel/parser': 7.29.0 '@babel/template': 7.28.6 '@babel/types': 7.29.0 debug: 4.4.3 transitivePeerDependencies: - supports-color '@babel/types@7.29.0': dependencies: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 '@discoveryjs/json-ext@0.6.3': {} '@emnapi/core@1.8.1': dependencies: '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 optional: true '@emnapi/runtime@1.8.1': dependencies: tslib: 2.8.1 optional: true '@emnapi/wasi-threads@1.1.0': dependencies: tslib: 2.8.1 optional: true '@esbuild/aix-ppc64@0.27.2': optional: true '@esbuild/android-arm64@0.27.2': optional: true '@esbuild/android-arm@0.27.2': optional: true '@esbuild/android-x64@0.27.2': optional: true '@esbuild/darwin-arm64@0.27.2': optional: true '@esbuild/darwin-x64@0.27.2': optional: true '@esbuild/freebsd-arm64@0.27.2': optional: true '@esbuild/freebsd-x64@0.27.2': optional: true '@esbuild/linux-arm64@0.27.2': optional: true '@esbuild/linux-arm@0.27.2': optional: true '@esbuild/linux-ia32@0.27.2': optional: true '@esbuild/linux-loong64@0.27.2': optional: true '@esbuild/linux-mips64el@0.27.2': optional: true '@esbuild/linux-ppc64@0.27.2': optional: true '@esbuild/linux-riscv64@0.27.2': optional: true '@esbuild/linux-s390x@0.27.2': optional: true '@esbuild/linux-x64@0.27.2': optional: true '@esbuild/netbsd-arm64@0.27.2': optional: true '@esbuild/netbsd-x64@0.27.2': optional: true '@esbuild/openbsd-arm64@0.27.2': optional: true '@esbuild/openbsd-x64@0.27.2': optional: true '@esbuild/openharmony-arm64@0.27.2': optional: true '@esbuild/sunos-x64@0.27.2': optional: true '@esbuild/win32-arm64@0.27.2': optional: true '@esbuild/win32-ia32@0.27.2': optional: true '@esbuild/win32-x64@0.27.2': optional: true '@hono/node-server@1.19.9(hono@4.11.9)': dependencies: hono: 4.11.9 '@inquirer/ansi@1.0.2': {} '@inquirer/checkbox@4.3.2(@types/node@20.19.33)': dependencies: '@inquirer/ansi': 1.0.2 '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/figures': 1.0.15 '@inquirer/type': 3.0.10(@types/node@20.19.33) yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/confirm@5.1.21(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/core@10.3.2(@types/node@20.19.33)': dependencies: '@inquirer/ansi': 1.0.2 '@inquirer/figures': 1.0.15 '@inquirer/type': 3.0.10(@types/node@20.19.33) cli-width: 4.1.0 mute-stream: 2.0.0 signal-exit: 4.1.0 wrap-ansi: 6.2.0 yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/editor@4.2.23(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/external-editor': 1.0.3(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/expand@4.0.23(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/external-editor@1.0.3(@types/node@20.19.33)': dependencies: chardet: 2.1.1 iconv-lite: 0.7.2 optionalDependencies: '@types/node': 20.19.33 '@inquirer/figures@1.0.15': {} '@inquirer/input@4.3.1(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/number@3.0.23(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/password@4.0.23(@types/node@20.19.33)': dependencies: '@inquirer/ansi': 1.0.2 '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/prompts@7.10.1(@types/node@20.19.33)': dependencies: '@inquirer/checkbox': 4.3.2(@types/node@20.19.33) '@inquirer/confirm': 5.1.21(@types/node@20.19.33) '@inquirer/editor': 4.2.23(@types/node@20.19.33) '@inquirer/expand': 4.0.23(@types/node@20.19.33) '@inquirer/input': 4.3.1(@types/node@20.19.33) '@inquirer/number': 3.0.23(@types/node@20.19.33) '@inquirer/password': 4.0.23(@types/node@20.19.33) '@inquirer/rawlist': 4.1.11(@types/node@20.19.33) '@inquirer/search': 3.2.2(@types/node@20.19.33) '@inquirer/select': 4.4.2(@types/node@20.19.33) optionalDependencies: '@types/node': 20.19.33 '@inquirer/rawlist@4.1.11(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/search@3.2.2(@types/node@20.19.33)': dependencies: '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/figures': 1.0.15 '@inquirer/type': 3.0.10(@types/node@20.19.33) yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/select@4.4.2(@types/node@20.19.33)': dependencies: '@inquirer/ansi': 1.0.2 '@inquirer/core': 10.3.2(@types/node@20.19.33) '@inquirer/figures': 1.0.15 '@inquirer/type': 3.0.10(@types/node@20.19.33) yoctocolors-cjs: 2.1.3 optionalDependencies: '@types/node': 20.19.33 '@inquirer/type@3.0.10(@types/node@20.19.33)': optionalDependencies: '@types/node': 20.19.33 '@isaacs/balanced-match@4.0.1': {} '@isaacs/brace-expansion@5.0.1': dependencies: '@isaacs/balanced-match': 4.0.1 '@isaacs/fs-minipass@4.0.1': dependencies: minipass: 7.1.2 '@istanbuljs/schema@0.1.3': {} '@jridgewell/gen-mapping@0.3.13': dependencies: '@jridgewell/sourcemap-codec': 1.5.5 '@jridgewell/trace-mapping': 0.3.31 '@jridgewell/remapping@2.3.5': dependencies: '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 '@jridgewell/resolve-uri@3.1.2': {} '@jridgewell/source-map@0.3.11': dependencies: '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 '@jridgewell/sourcemap-codec@1.5.5': {} '@jridgewell/trace-mapping@0.3.31': dependencies: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 '@jsonjoy.com/base64@1.1.2(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/base64@17.67.0(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/buffers@1.2.1(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/buffers@17.67.0(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/codegen@1.0.0(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/codegen@17.67.0(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/fs-core@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) thingies: 2.5.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-fsa@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-core': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) thingies: 2.5.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-node-builtins@4.56.10(tslib@2.8.1)': dependencies: tslib: 2.8.1 '@jsonjoy.com/fs-node-to-fsa@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-fsa': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-node-utils@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-node@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-core': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-print': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-snapshot': 4.56.10(tslib@2.8.1) glob-to-regex.js: 1.2.0(tslib@2.8.1) thingies: 2.5.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-print@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) tree-dump: 1.1.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/fs-snapshot@4.56.10(tslib@2.8.1)': dependencies: '@jsonjoy.com/buffers': 17.67.0(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) '@jsonjoy.com/json-pack': 17.67.0(tslib@2.8.1) '@jsonjoy.com/util': 17.67.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/json-pack@1.21.0(tslib@2.8.1)': dependencies: '@jsonjoy.com/base64': 1.1.2(tslib@2.8.1) '@jsonjoy.com/buffers': 1.2.1(tslib@2.8.1) '@jsonjoy.com/codegen': 1.0.0(tslib@2.8.1) '@jsonjoy.com/json-pointer': 1.0.2(tslib@2.8.1) '@jsonjoy.com/util': 1.9.0(tslib@2.8.1) hyperdyperid: 1.2.0 thingies: 2.5.0(tslib@2.8.1) tree-dump: 1.1.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/json-pack@17.67.0(tslib@2.8.1)': dependencies: '@jsonjoy.com/base64': 17.67.0(tslib@2.8.1) '@jsonjoy.com/buffers': 17.67.0(tslib@2.8.1) '@jsonjoy.com/codegen': 17.67.0(tslib@2.8.1) '@jsonjoy.com/json-pointer': 17.67.0(tslib@2.8.1) '@jsonjoy.com/util': 17.67.0(tslib@2.8.1) hyperdyperid: 1.2.0 thingies: 2.5.0(tslib@2.8.1) tree-dump: 1.1.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/json-pointer@1.0.2(tslib@2.8.1)': dependencies: '@jsonjoy.com/codegen': 1.0.0(tslib@2.8.1) '@jsonjoy.com/util': 1.9.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/json-pointer@17.67.0(tslib@2.8.1)': dependencies: '@jsonjoy.com/util': 17.67.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/util@1.9.0(tslib@2.8.1)': dependencies: '@jsonjoy.com/buffers': 1.2.1(tslib@2.8.1) '@jsonjoy.com/codegen': 1.0.0(tslib@2.8.1) tslib: 2.8.1 '@jsonjoy.com/util@17.67.0(tslib@2.8.1)': dependencies: '@jsonjoy.com/buffers': 17.67.0(tslib@2.8.1) '@jsonjoy.com/codegen': 17.67.0(tslib@2.8.1) tslib: 2.8.1 '@leichtgewicht/ip-codec@2.0.5': {} '@listr2/prompt-adapter-inquirer@3.0.5(@inquirer/prompts@7.10.1(@types/node@20.19.33))(@types/node@20.19.33)(listr2@9.0.5)': dependencies: '@inquirer/prompts': 7.10.1(@types/node@20.19.33) '@inquirer/type': 3.0.10(@types/node@20.19.33) listr2: 9.0.5 transitivePeerDependencies: - '@types/node' '@lmdb/lmdb-darwin-arm64@3.4.4': optional: true '@lmdb/lmdb-darwin-x64@3.4.4': optional: true '@lmdb/lmdb-linux-arm64@3.4.4': optional: true '@lmdb/lmdb-linux-arm@3.4.4': optional: true '@lmdb/lmdb-linux-x64@3.4.4': optional: true '@lmdb/lmdb-win32-arm64@3.4.4': optional: true '@lmdb/lmdb-win32-x64@3.4.4': optional: true '@modelcontextprotocol/sdk@1.25.2(hono@4.11.9)(zod@4.3.5)': dependencies: '@hono/node-server': 1.19.9(hono@4.11.9) ajv: 8.17.1 ajv-formats: 3.0.1(ajv@8.17.1) content-type: 1.0.5 cors: 2.8.6 cross-spawn: 7.0.6 eventsource: 3.0.7 eventsource-parser: 3.0.6 express: 5.2.1 express-rate-limit: 7.5.1(express@5.2.1) jose: 6.1.3 json-schema-typed: 8.0.2 pkce-challenge: 5.0.1 raw-body: 3.0.2 zod: 4.3.5 zod-to-json-schema: 3.25.1(zod@4.3.5) transitivePeerDependencies: - hono - supports-color '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': optional: true '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': optional: true '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': optional: true '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': optional: true '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': optional: true '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': optional: true '@napi-rs/nice-android-arm-eabi@1.1.1': optional: true '@napi-rs/nice-android-arm64@1.1.1': optional: true '@napi-rs/nice-darwin-arm64@1.1.1': optional: true '@napi-rs/nice-darwin-x64@1.1.1': optional: true '@napi-rs/nice-freebsd-x64@1.1.1': optional: true '@napi-rs/nice-linux-arm-gnueabihf@1.1.1': optional: true '@napi-rs/nice-linux-arm64-gnu@1.1.1': optional: true '@napi-rs/nice-linux-arm64-musl@1.1.1': optional: true '@napi-rs/nice-linux-ppc64-gnu@1.1.1': optional: true '@napi-rs/nice-linux-riscv64-gnu@1.1.1': optional: true '@napi-rs/nice-linux-s390x-gnu@1.1.1': optional: true '@napi-rs/nice-linux-x64-gnu@1.1.1': optional: true '@napi-rs/nice-linux-x64-musl@1.1.1': optional: true '@napi-rs/nice-openharmony-arm64@1.1.1': optional: true '@napi-rs/nice-win32-arm64-msvc@1.1.1': optional: true '@napi-rs/nice-win32-ia32-msvc@1.1.1': optional: true '@napi-rs/nice-win32-x64-msvc@1.1.1': optional: true '@napi-rs/nice@1.1.1': optionalDependencies: '@napi-rs/nice-android-arm-eabi': 1.1.1 '@napi-rs/nice-android-arm64': 1.1.1 '@napi-rs/nice-darwin-arm64': 1.1.1 '@napi-rs/nice-darwin-x64': 1.1.1 '@napi-rs/nice-freebsd-x64': 1.1.1 '@napi-rs/nice-linux-arm-gnueabihf': 1.1.1 '@napi-rs/nice-linux-arm64-gnu': 1.1.1 '@napi-rs/nice-linux-arm64-musl': 1.1.1 '@napi-rs/nice-linux-ppc64-gnu': 1.1.1 '@napi-rs/nice-linux-riscv64-gnu': 1.1.1 '@napi-rs/nice-linux-s390x-gnu': 1.1.1 '@napi-rs/nice-linux-x64-gnu': 1.1.1 '@napi-rs/nice-linux-x64-musl': 1.1.1 '@napi-rs/nice-openharmony-arm64': 1.1.1 '@napi-rs/nice-win32-arm64-msvc': 1.1.1 '@napi-rs/nice-win32-ia32-msvc': 1.1.1 '@napi-rs/nice-win32-x64-msvc': 1.1.1 optional: true '@napi-rs/wasm-runtime@1.1.1': dependencies: '@emnapi/core': 1.8.1 '@emnapi/runtime': 1.8.1 '@tybys/wasm-util': 0.10.1 optional: true '@ngtools/webpack@21.1.0-rc.0(@angular/compiler-cli@in-existing-linked-by-bazel)(typescript@5.9.3)(webpack@5.104.1(esbuild@0.27.2))': dependencies: '@angular/compiler-cli': link:in-existing-linked-by-bazel typescript: 5.9.3 webpack: 5.104.1(esbuild@0.27.2) '@npmcli/agent@4.0.0': dependencies: agent-base: 7.1.4 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 lru-cache: 11.2.6 socks-proxy-agent: 8.0.5 transitivePeerDependencies: - supports-color '@npmcli/fs@5.0.0': dependencies: semver: 7.7.3 '@npmcli/git@7.0.1': dependencies: '@npmcli/promise-spawn': 9.0.1 ini: 6.0.0 lru-cache: 11.2.6 npm-pick-manifest: 11.0.3 proc-log: 6.1.0 promise-retry: 2.0.1 semver: 7.7.3 which: 6.0.1 '@npmcli/installed-package-contents@4.0.0': dependencies: npm-bundled: 5.0.0 npm-normalize-package-bin: 5.0.0 '@npmcli/node-gyp@5.0.0': {} '@npmcli/package-json@7.0.4': dependencies: '@npmcli/git': 7.0.1 glob: 13.0.2 hosted-git-info: 9.0.2 json-parse-even-better-errors: 5.0.0 proc-log: 6.1.0 semver: 7.7.3 validate-npm-package-license: 3.0.4 '@npmcli/promise-spawn@9.0.1': dependencies: which: 6.0.1 '@npmcli/redact@4.0.0': {} '@npmcli/run-script@10.0.3': dependencies: '@npmcli/node-gyp': 5.0.0 '@npmcli/package-json': 7.0.4 '@npmcli/promise-spawn': 9.0.1 node-gyp: 12.2.0 proc-log: 6.1.0 which: 6.0.1 transitivePeerDependencies: - supports-color '@oxc-project/types@0.106.0': {} '@parcel/watcher-android-arm64@2.5.6': optional: true '@parcel/watcher-darwin-arm64@2.5.6': optional: true '@parcel/watcher-darwin-x64@2.5.6': optional: true '@parcel/watcher-freebsd-x64@2.5.6': optional: true '@parcel/watcher-linux-arm-glibc@2.5.6': optional: true '@parcel/watcher-linux-arm-musl@2.5.6': optional: true '@parcel/watcher-linux-arm64-glibc@2.5.6': optional: true '@parcel/watcher-linux-arm64-musl@2.5.6': optional: true '@parcel/watcher-linux-x64-glibc@2.5.6': optional: true '@parcel/watcher-linux-x64-musl@2.5.6': optional: true '@parcel/watcher-win32-arm64@2.5.6': optional: true '@parcel/watcher-win32-ia32@2.5.6': optional: true '@parcel/watcher-win32-x64@2.5.6': optional: true '@parcel/watcher@2.5.6': dependencies: detect-libc: 2.1.2 is-glob: 4.0.3 node-addon-api: 7.1.1 picomatch: 4.0.3 optionalDependencies: '@parcel/watcher-android-arm64': 2.5.6 '@parcel/watcher-darwin-arm64': 2.5.6 '@parcel/watcher-darwin-x64': 2.5.6 '@parcel/watcher-freebsd-x64': 2.5.6 '@parcel/watcher-linux-arm-glibc': 2.5.6 '@parcel/watcher-linux-arm-musl': 2.5.6 '@parcel/watcher-linux-arm64-glibc': 2.5.6 '@parcel/watcher-linux-arm64-musl': 2.5.6 '@parcel/watcher-linux-x64-glibc': 2.5.6 '@parcel/watcher-linux-x64-musl': 2.5.6 '@parcel/watcher-win32-arm64': 2.5.6 '@parcel/watcher-win32-ia32': 2.5.6 '@parcel/watcher-win32-x64': 2.5.6 optional: true '@rolldown/binding-android-arm64@1.0.0-beta.58': optional: true '@rolldown/binding-darwin-arm64@1.0.0-beta.58': optional: true '@rolldown/binding-darwin-x64@1.0.0-beta.58': optional: true '@rolldown/binding-freebsd-x64@1.0.0-beta.58': optional: true '@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.58': optional: true '@rolldown/binding-linux-arm64-gnu@1.0.0-beta.58': optional: true '@rolldown/binding-linux-arm64-musl@1.0.0-beta.58': optional: true '@rolldown/binding-linux-x64-gnu@1.0.0-beta.58': optional: true '@rolldown/binding-linux-x64-musl@1.0.0-beta.58': optional: true '@rolldown/binding-openharmony-arm64@1.0.0-beta.58': optional: true '@rolldown/binding-wasm32-wasi@1.0.0-beta.58': dependencies: '@napi-rs/wasm-runtime': 1.1.1 optional: true '@rolldown/binding-win32-arm64-msvc@1.0.0-beta.58': optional: true '@rolldown/binding-win32-x64-msvc@1.0.0-beta.58': optional: true '@rolldown/pluginutils@1.0.0-beta.58': {} '@rollup/rollup-android-arm-eabi@4.57.1': optional: true '@rollup/rollup-android-arm64@4.57.1': optional: true '@rollup/rollup-darwin-arm64@4.57.1': optional: true '@rollup/rollup-darwin-x64@4.57.1': optional: true '@rollup/rollup-freebsd-arm64@4.57.1': optional: true '@rollup/rollup-freebsd-x64@4.57.1': optional: true '@rollup/rollup-linux-arm-gnueabihf@4.57.1': optional: true '@rollup/rollup-linux-arm-musleabihf@4.57.1': optional: true '@rollup/rollup-linux-arm64-gnu@4.57.1': optional: true '@rollup/rollup-linux-arm64-musl@4.57.1': optional: true '@rollup/rollup-linux-loong64-gnu@4.57.1': optional: true '@rollup/rollup-linux-loong64-musl@4.57.1': optional: true '@rollup/rollup-linux-ppc64-gnu@4.57.1': optional: true '@rollup/rollup-linux-ppc64-musl@4.57.1': optional: true '@rollup/rollup-linux-riscv64-gnu@4.57.1': optional: true '@rollup/rollup-linux-riscv64-musl@4.57.1': optional: true '@rollup/rollup-linux-s390x-gnu@4.57.1': optional: true '@rollup/rollup-linux-x64-gnu@4.57.1': optional: true '@rollup/rollup-linux-x64-musl@4.57.1': optional: true '@rollup/rollup-openbsd-x64@4.57.1': optional: true '@rollup/rollup-openharmony-arm64@4.57.1': optional: true '@rollup/rollup-win32-arm64-msvc@4.57.1': optional: true '@rollup/rollup-win32-ia32-msvc@4.57.1': optional: true '@rollup/rollup-win32-x64-gnu@4.57.1': optional: true '@rollup/rollup-win32-x64-msvc@4.57.1': optional: true '@schematics/angular@21.1.0-rc.0': dependencies: '@angular-devkit/core': 21.1.0-rc.0 '@angular-devkit/schematics': 21.1.0-rc.0 jsonc-parser: 3.3.1 transitivePeerDependencies: - chokidar '@sigstore/bundle@4.0.0': dependencies: '@sigstore/protobuf-specs': 0.5.0 '@sigstore/core@3.1.0': {} '@sigstore/protobuf-specs@0.5.0': {} '@sigstore/sign@4.1.0': dependencies: '@sigstore/bundle': 4.0.0 '@sigstore/core': 3.1.0 '@sigstore/protobuf-specs': 0.5.0 make-fetch-happen: 15.0.3 proc-log: 6.1.0 promise-retry: 2.0.1 transitivePeerDependencies: - supports-color '@sigstore/tuf@4.0.1': dependencies: '@sigstore/protobuf-specs': 0.5.0 tuf-js: 4.1.0 transitivePeerDependencies: - supports-color '@sigstore/verify@3.1.0': dependencies: '@sigstore/bundle': 4.0.0 '@sigstore/core': 3.1.0 '@sigstore/protobuf-specs': 0.5.0 '@tufjs/canonical-json@2.0.0': {} '@tufjs/models@4.1.0': dependencies: '@tufjs/canonical-json': 2.0.0 minimatch: 10.1.2 '@tybys/wasm-util@0.10.1': dependencies: tslib: 2.8.1 optional: true '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 '@types/node': 20.19.33 '@types/bonjour@3.5.13': dependencies: '@types/node': 20.19.33 '@types/connect-history-api-fallback@1.5.4': dependencies: '@types/express-serve-static-core': 4.19.8 '@types/node': 20.19.33 '@types/connect@3.4.38': dependencies: '@types/node': 20.19.33 '@types/eslint-scope@3.7.7': dependencies: '@types/eslint': 9.6.1 '@types/estree': 1.0.8 '@types/eslint@9.6.1': dependencies: '@types/estree': 1.0.8 '@types/json-schema': 7.0.15 '@types/estree@1.0.8': {} '@types/express-serve-static-core@4.19.8': dependencies: '@types/node': 20.19.33 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 '@types/express@4.17.25': dependencies: '@types/body-parser': 1.19.6 '@types/express-serve-static-core': 4.19.8 '@types/qs': 6.14.0 '@types/serve-static': 1.15.10 '@types/http-errors@2.0.5': {} '@types/http-proxy@1.17.17': dependencies: '@types/node': 20.19.33 '@types/json-schema@7.0.15': {} '@types/mime@1.3.5': {} '@types/node-forge@1.3.14': dependencies: '@types/node': 20.19.33 '@types/node@20.19.33': dependencies: undici-types: 6.21.0 '@types/qs@6.14.0': {} '@types/range-parser@1.2.7': {} '@types/retry@0.12.2': {} '@types/send@0.17.6': dependencies: '@types/mime': 1.3.5 '@types/node': 20.19.33 '@types/send@1.2.1': dependencies: '@types/node': 20.19.33 '@types/serve-index@1.9.4': dependencies: '@types/express': 4.17.25 '@types/serve-static@1.15.10': dependencies: '@types/http-errors': 2.0.5 '@types/node': 20.19.33 '@types/send': 0.17.6 '@types/sockjs@0.3.36': dependencies: '@types/node': 20.19.33 '@types/ws@8.18.1': dependencies: '@types/node': 20.19.33 '@vitejs/plugin-basic-ssl@2.1.0(vite@7.3.0(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(sass@1.97.1)(terser@5.44.1))': dependencies: vite: 7.3.0(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(sass@1.97.1)(terser@5.44.1) '@webassemblyjs/ast@1.14.1': dependencies: '@webassemblyjs/helper-numbers': 1.13.2 '@webassemblyjs/helper-wasm-bytecode': 1.13.2 '@webassemblyjs/floating-point-hex-parser@1.13.2': {} '@webassemblyjs/helper-api-error@1.13.2': {} '@webassemblyjs/helper-buffer@1.14.1': {} '@webassemblyjs/helper-numbers@1.13.2': dependencies: '@webassemblyjs/floating-point-hex-parser': 1.13.2 '@webassemblyjs/helper-api-error': 1.13.2 '@xtuc/long': 4.2.2 '@webassemblyjs/helper-wasm-bytecode@1.13.2': {} '@webassemblyjs/helper-wasm-section@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/helper-buffer': 1.14.1 '@webassemblyjs/helper-wasm-bytecode': 1.13.2 '@webassemblyjs/wasm-gen': 1.14.1 '@webassemblyjs/ieee754@1.13.2': dependencies: '@xtuc/ieee754': 1.2.0 '@webassemblyjs/leb128@1.13.2': dependencies: '@xtuc/long': 4.2.2 '@webassemblyjs/utf8@1.13.2': {} '@webassemblyjs/wasm-edit@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/helper-buffer': 1.14.1 '@webassemblyjs/helper-wasm-bytecode': 1.13.2 '@webassemblyjs/helper-wasm-section': 1.14.1 '@webassemblyjs/wasm-gen': 1.14.1 '@webassemblyjs/wasm-opt': 1.14.1 '@webassemblyjs/wasm-parser': 1.14.1 '@webassemblyjs/wast-printer': 1.14.1 '@webassemblyjs/wasm-gen@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/helper-wasm-bytecode': 1.13.2 '@webassemblyjs/ieee754': 1.13.2 '@webassemblyjs/leb128': 1.13.2 '@webassemblyjs/utf8': 1.13.2 '@webassemblyjs/wasm-opt@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/helper-buffer': 1.14.1 '@webassemblyjs/wasm-gen': 1.14.1 '@webassemblyjs/wasm-parser': 1.14.1 '@webassemblyjs/wasm-parser@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/helper-api-error': 1.13.2 '@webassemblyjs/helper-wasm-bytecode': 1.13.2 '@webassemblyjs/ieee754': 1.13.2 '@webassemblyjs/leb128': 1.13.2 '@webassemblyjs/utf8': 1.13.2 '@webassemblyjs/wast-printer@1.14.1': dependencies: '@webassemblyjs/ast': 1.14.1 '@xtuc/long': 4.2.2 '@xtuc/ieee754@1.2.0': {} '@xtuc/long@4.2.2': {} '@yarnpkg/lockfile@1.1.0': {} abbrev@4.0.0: {} accepts@1.3.8: dependencies: mime-types: 2.1.35 negotiator: 0.6.3 accepts@2.0.0: dependencies: mime-types: 3.0.2 negotiator: 1.0.0 acorn-import-phases@1.0.4(acorn@8.15.0): dependencies: acorn: 8.15.0 acorn@8.15.0: {} adjust-sourcemap-loader@4.0.0: dependencies: loader-utils: 2.0.4 regex-parser: 2.3.1 agent-base@7.1.4: {} ajv-formats@2.1.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 ajv-formats@3.0.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 ajv-keywords@5.1.0(ajv@8.17.1): dependencies: ajv: 8.17.1 fast-deep-equal: 3.1.3 ajv@8.17.1: dependencies: fast-deep-equal: 3.1.3 fast-uri: 3.1.0 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 algoliasearch@5.46.2: dependencies: '@algolia/abtesting': 1.12.2 '@algolia/client-abtesting': 5.46.2 '@algolia/client-analytics': 5.46.2 '@algolia/client-common': 5.46.2 '@algolia/client-insights': 5.46.2 '@algolia/client-personalization': 5.46.2 '@algolia/client-query-suggestions': 5.46.2 '@algolia/client-search': 5.46.2 '@algolia/ingestion': 1.46.2 '@algolia/monitoring': 1.46.2 '@algolia/recommend': 5.46.2 '@algolia/requester-browser-xhr': 5.46.2 '@algolia/requester-fetch': 5.46.2 '@algolia/requester-node-http': 5.46.2 ansi-colors@4.1.3: {} ansi-escapes@7.3.0: dependencies: environment: 1.1.0 ansi-html-community@0.0.8: {} ansi-regex@5.0.1: {} ansi-regex@6.2.2: {} ansi-styles@4.3.0: dependencies: color-convert: 2.0.1 ansi-styles@6.2.3: {} anymatch@3.1.3: dependencies: normalize-path: 3.0.0 picomatch: 2.3.1 argparse@2.0.1: {} array-flatten@1.1.1: {} autoprefixer@10.4.23(postcss@8.5.6): dependencies: browserslist: 4.28.1 caniuse-lite: 1.0.30001769 fraction.js: 5.3.4 picocolors: 1.1.1 postcss: 8.5.6 postcss-value-parser: 4.2.0 babel-loader@10.0.0(@babel/core@7.28.5)(webpack@5.104.1(esbuild@0.27.2)): dependencies: '@babel/core': 7.28.5 find-up: 5.0.0 webpack: 5.104.1(esbuild@0.27.2) babel-plugin-polyfill-corejs2@0.4.15(@babel/core@7.28.5): dependencies: '@babel/compat-data': 7.29.0 '@babel/core': 7.28.5 '@babel/helper-define-polyfill-provider': 0.6.6(@babel/core@7.28.5) semver: 6.3.1 transitivePeerDependencies: - supports-color babel-plugin-polyfill-corejs3@0.13.0(@babel/core@7.28.5): dependencies: '@babel/core': 7.28.5 '@babel/helper-define-polyfill-provider': 0.6.6(@babel/core@7.28.5) core-js-compat: 3.48.0 transitivePeerDependencies: - supports-color babel-plugin-polyfill-regenerator@0.6.6(@babel/core@7.28.5): dependencies: '@babel/core': 7.28.5 '@babel/helper-define-polyfill-provider': 0.6.6(@babel/core@7.28.5) transitivePeerDependencies: - supports-color baseline-browser-mapping@2.9.19: {} batch@0.6.1: {} beasties@0.3.5: dependencies: css-select: 6.0.0 css-what: 7.0.0 dom-serializer: 2.0.0 domhandler: 5.0.3 htmlparser2: 10.1.0 picocolors: 1.1.1 postcss: 8.5.6 postcss-media-query-parser: 0.2.3 big.js@5.2.2: {} binary-extensions@2.3.0: {} body-parser@1.20.4: dependencies: bytes: 3.1.2 content-type: 1.0.5 debug: 2.6.9 depd: 2.0.0 destroy: 1.2.0 http-errors: 2.0.1 iconv-lite: 0.4.24 on-finished: 2.4.1 qs: 6.14.1 raw-body: 2.5.3 type-is: 1.6.18 unpipe: 1.0.0 transitivePeerDependencies: - supports-color body-parser@2.2.2: dependencies: bytes: 3.1.2 content-type: 1.0.5 debug: 4.4.3 http-errors: 2.0.1 iconv-lite: 0.7.2 on-finished: 2.4.1 qs: 6.14.1 raw-body: 3.0.2 type-is: 2.0.1 transitivePeerDependencies: - supports-color bonjour-service@1.3.0: dependencies: fast-deep-equal: 3.1.3 multicast-dns: 7.2.5 boolbase@1.0.0: {} braces@3.0.3: dependencies: fill-range: 7.1.1 browserslist@4.28.1: dependencies: baseline-browser-mapping: 2.9.19 caniuse-lite: 1.0.30001769 electron-to-chromium: 1.5.286 node-releases: 2.0.27 update-browserslist-db: 1.2.3(browserslist@4.28.1) buffer-from@1.1.2: {} bundle-name@4.1.0: dependencies: run-applescript: 7.1.0 bytes@3.1.2: {} cacache@20.0.3: dependencies: '@npmcli/fs': 5.0.0 fs-minipass: 3.0.3 glob: 13.0.2 lru-cache: 11.2.6 minipass: 7.1.2 minipass-collect: 2.0.1 minipass-flush: 1.0.5 minipass-pipeline: 1.2.4 p-map: 7.0.4 ssri: 13.0.1 unique-filename: 5.0.0 call-bind-apply-helpers@1.0.2: dependencies: es-errors: 1.3.0 function-bind: 1.1.2 call-bound@1.0.4: dependencies: call-bind-apply-helpers: 1.0.2 get-intrinsic: 1.3.0 callsites@3.1.0: {} caniuse-lite@1.0.30001769: {} chalk@5.6.2: {} chardet@2.1.1: {} chokidar@3.6.0: dependencies: anymatch: 3.1.3 braces: 3.0.3 glob-parent: 5.1.2 is-binary-path: 2.1.0 is-glob: 4.0.3 normalize-path: 3.0.0 readdirp: 3.6.0 optionalDependencies: fsevents: 2.3.3 chokidar@4.0.3: dependencies: readdirp: 4.1.2 chownr@3.0.0: {} chrome-trace-event@1.0.4: {} cli-cursor@5.0.0: dependencies: restore-cursor: 5.1.0 cli-spinners@3.4.0: {} cli-truncate@5.1.1: dependencies: slice-ansi: 7.1.2 string-width: 8.1.1 cli-width@4.1.0: {} cliui@9.0.1: dependencies: string-width: 7.2.0 strip-ansi: 7.1.2 wrap-ansi: 9.0.2 clone-deep@4.0.1: dependencies: is-plain-object: 2.0.4 kind-of: 6.0.3 shallow-clone: 3.0.1 color-convert@2.0.1: dependencies: color-name: 1.1.4 color-name@1.1.4: {} colorette@2.0.20: {} commander@2.20.3: {} compressible@2.0.18: dependencies: mime-db: 1.54.0 compression@1.8.1: dependencies: bytes: 3.1.2 compressible: 2.0.18 debug: 2.6.9 negotiator: 0.6.4 on-headers: 1.1.0 safe-buffer: 5.2.1 vary: 1.1.2 transitivePeerDependencies: - supports-color connect-history-api-fallback@2.0.0: {} content-disposition@0.5.4: dependencies: safe-buffer: 5.2.1 content-disposition@1.0.1: {} content-type@1.0.5: {} convert-source-map@1.9.0: {} convert-source-map@2.0.0: {} cookie-signature@1.0.7: {} cookie-signature@1.2.2: {} cookie@0.7.2: {} copy-anything@2.0.6: dependencies: is-what: 3.14.1 copy-webpack-plugin@13.0.1(webpack@5.104.1(esbuild@0.27.2)): dependencies: glob-parent: 6.0.2 normalize-path: 3.0.0 schema-utils: 4.3.3 serialize-javascript: 6.0.2 tinyglobby: 0.2.15 webpack: 5.104.1(esbuild@0.27.2) core-js-compat@3.48.0: dependencies: browserslist: 4.28.1 core-util-is@1.0.3: {} cors@2.8.6: dependencies: object-assign: 4.1.1 vary: 1.1.2 cosmiconfig@9.0.0(typescript@5.9.3): dependencies: env-paths: 2.2.1 import-fresh: 3.3.1 js-yaml: 4.1.1 parse-json: 5.2.0 optionalDependencies: typescript: 5.9.3 cross-spawn@7.0.6: dependencies: path-key: 3.1.1 shebang-command: 2.0.0 which: 2.0.2 css-loader@7.1.2(webpack@5.104.1(esbuild@0.27.2)): dependencies: icss-utils: 5.1.0(postcss@8.5.6) postcss: 8.5.6 postcss-modules-extract-imports: 3.1.0(postcss@8.5.6) postcss-modules-local-by-default: 4.2.0(postcss@8.5.6) postcss-modules-scope: 3.2.1(postcss@8.5.6) postcss-modules-values: 4.0.0(postcss@8.5.6) postcss-value-parser: 4.2.0 semver: 7.7.3 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) css-select@6.0.0: dependencies: boolbase: 1.0.0 css-what: 7.0.0 domhandler: 5.0.3 domutils: 3.2.2 nth-check: 2.1.1 css-what@7.0.0: {} cssesc@3.0.0: {} debug@2.6.9: dependencies: ms: 2.0.0 debug@4.4.3: dependencies: ms: 2.1.3 default-browser-id@5.0.1: {} default-browser@5.5.0: dependencies: bundle-name: 4.1.0 default-browser-id: 5.0.1 define-lazy-prop@3.0.0: {} depd@1.1.2: {} depd@2.0.0: {} destroy@1.2.0: {} detect-libc@2.1.2: optional: true detect-node@2.1.0: {} dns-packet@5.6.1: dependencies: '@leichtgewicht/ip-codec': 2.0.5 dom-serializer@2.0.0: dependencies: domelementtype: 2.3.0 domhandler: 5.0.3 entities: 4.5.0 domelementtype@2.3.0: {} domhandler@5.0.3: dependencies: domelementtype: 2.3.0 domutils@3.2.2: dependencies: dom-serializer: 2.0.0 domelementtype: 2.3.0 domhandler: 5.0.3 dunder-proto@1.0.1: dependencies: call-bind-apply-helpers: 1.0.2 es-errors: 1.3.0 gopd: 1.2.0 ee-first@1.1.1: {} electron-to-chromium@1.5.286: {} emoji-regex@10.6.0: {} emoji-regex@8.0.0: {} emojis-list@3.0.0: {} encodeurl@2.0.0: {} encoding@0.1.13: dependencies: iconv-lite: 0.6.3 optional: true enhanced-resolve@5.19.0: dependencies: graceful-fs: 4.2.11 tapable: 2.3.0 entities@4.5.0: {} entities@6.0.1: {} entities@7.0.1: {} env-paths@2.2.1: {} environment@1.1.0: {} err-code@2.0.3: {} errno@0.1.8: dependencies: prr: 1.0.1 optional: true error-ex@1.3.4: dependencies: is-arrayish: 0.2.1 es-define-property@1.0.1: {} es-errors@1.3.0: {} es-module-lexer@2.0.0: {} es-object-atoms@1.1.1: dependencies: es-errors: 1.3.0 esbuild-wasm@0.27.2: {} esbuild@0.27.2: optionalDependencies: '@esbuild/aix-ppc64': 0.27.2 '@esbuild/android-arm': 0.27.2 '@esbuild/android-arm64': 0.27.2 '@esbuild/android-x64': 0.27.2 '@esbuild/darwin-arm64': 0.27.2 '@esbuild/darwin-x64': 0.27.2 '@esbuild/freebsd-arm64': 0.27.2 '@esbuild/freebsd-x64': 0.27.2 '@esbuild/linux-arm': 0.27.2 '@esbuild/linux-arm64': 0.27.2 '@esbuild/linux-ia32': 0.27.2 '@esbuild/linux-loong64': 0.27.2 '@esbuild/linux-mips64el': 0.27.2 '@esbuild/linux-ppc64': 0.27.2 '@esbuild/linux-riscv64': 0.27.2 '@esbuild/linux-s390x': 0.27.2 '@esbuild/linux-x64': 0.27.2 '@esbuild/netbsd-arm64': 0.27.2 '@esbuild/netbsd-x64': 0.27.2 '@esbuild/openbsd-arm64': 0.27.2 '@esbuild/openbsd-x64': 0.27.2 '@esbuild/openharmony-arm64': 0.27.2 '@esbuild/sunos-x64': 0.27.2 '@esbuild/win32-arm64': 0.27.2 '@esbuild/win32-ia32': 0.27.2 '@esbuild/win32-x64': 0.27.2 escalade@3.2.0: {} escape-html@1.0.3: {} eslint-scope@5.1.1: dependencies: esrecurse: 4.3.0 estraverse: 4.3.0 esrecurse@4.3.0: dependencies: estraverse: 5.3.0 estraverse@4.3.0: {} estraverse@5.3.0: {} esutils@2.0.3: {} etag@1.8.1: {} eventemitter3@4.0.7: {} eventemitter3@5.0.4: {} events@3.3.0: {} eventsource-parser@3.0.6: {} eventsource@3.0.7: dependencies: eventsource-parser: 3.0.6 exponential-backoff@3.1.3: {} express-rate-limit@7.5.1(express@5.2.1): dependencies: express: 5.2.1 express@4.22.1: dependencies: accepts: 1.3.8 array-flatten: 1.1.1 body-parser: 1.20.4 content-disposition: 0.5.4 content-type: 1.0.5 cookie: 0.7.2 cookie-signature: 1.0.7 debug: 2.6.9 depd: 2.0.0 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 finalhandler: 1.3.2 fresh: 0.5.2 http-errors: 2.0.1 merge-descriptors: 1.0.3 methods: 1.1.2 on-finished: 2.4.1 parseurl: 1.3.3 path-to-regexp: 0.1.12 proxy-addr: 2.0.7 qs: 6.14.1 range-parser: 1.2.1 safe-buffer: 5.2.1 send: 0.19.2 serve-static: 1.16.3 setprototypeof: 1.2.0 statuses: 2.0.2 type-is: 1.6.18 utils-merge: 1.0.1 vary: 1.1.2 transitivePeerDependencies: - supports-color express@5.2.1: dependencies: accepts: 2.0.0 body-parser: 2.2.2 content-disposition: 1.0.1 content-type: 1.0.5 cookie: 0.7.2 cookie-signature: 1.2.2 debug: 4.4.3 depd: 2.0.0 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 finalhandler: 2.1.1 fresh: 2.0.0 http-errors: 2.0.1 merge-descriptors: 2.0.0 mime-types: 3.0.2 on-finished: 2.4.1 once: 1.4.0 parseurl: 1.3.3 proxy-addr: 2.0.7 qs: 6.14.1 range-parser: 1.2.1 router: 2.2.0 send: 1.2.1 serve-static: 2.2.1 statuses: 2.0.2 type-is: 2.0.1 vary: 1.1.2 transitivePeerDependencies: - supports-color fast-deep-equal@3.1.3: {} fast-uri@3.1.0: {} faye-websocket@0.11.4: dependencies: websocket-driver: 0.7.4 fdir@6.5.0(picomatch@4.0.3): optionalDependencies: picomatch: 4.0.3 fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 finalhandler@1.3.2: dependencies: debug: 2.6.9 encodeurl: 2.0.0 escape-html: 1.0.3 on-finished: 2.4.1 parseurl: 1.3.3 statuses: 2.0.2 unpipe: 1.0.0 transitivePeerDependencies: - supports-color finalhandler@2.1.1: dependencies: debug: 4.4.3 encodeurl: 2.0.0 escape-html: 1.0.3 on-finished: 2.4.1 parseurl: 1.3.3 statuses: 2.0.2 transitivePeerDependencies: - supports-color find-up@5.0.0: dependencies: locate-path: 6.0.0 path-exists: 4.0.0 flat@5.0.2: {} follow-redirects@1.15.11(debug@4.4.3): optionalDependencies: debug: 4.4.3 forwarded@0.2.0: {} fraction.js@5.3.4: {} fresh@0.5.2: {} fresh@2.0.0: {} fs-minipass@3.0.3: dependencies: minipass: 7.1.2 fsevents@2.3.3: optional: true function-bind@1.1.2: {} gensync@1.0.0-beta.2: {} get-caller-file@2.0.5: {} get-east-asian-width@1.4.0: {} get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 es-define-property: 1.0.1 es-errors: 1.3.0 es-object-atoms: 1.1.1 function-bind: 1.1.2 get-proto: 1.0.1 gopd: 1.2.0 has-symbols: 1.1.0 hasown: 2.0.2 math-intrinsics: 1.1.0 get-proto@1.0.1: dependencies: dunder-proto: 1.0.1 es-object-atoms: 1.1.1 glob-parent@5.1.2: dependencies: is-glob: 4.0.3 glob-parent@6.0.2: dependencies: is-glob: 4.0.3 glob-to-regex.js@1.2.0(tslib@2.8.1): dependencies: tslib: 2.8.1 glob-to-regexp@0.4.1: {} glob@13.0.2: dependencies: minimatch: 10.1.2 minipass: 7.1.2 path-scurry: 2.0.1 gopd@1.2.0: {} graceful-fs@4.2.11: {} handle-thing@2.0.1: {} has-flag@4.0.0: {} has-symbols@1.1.0: {} hasown@2.0.2: dependencies: function-bind: 1.1.2 hono@4.11.9: {} hosted-git-info@9.0.2: dependencies: lru-cache: 11.2.6 hpack.js@2.1.6: dependencies: inherits: 2.0.4 obuf: 1.1.2 readable-stream: 2.3.8 wbuf: 1.7.3 htmlparser2@10.1.0: dependencies: domelementtype: 2.3.0 domhandler: 5.0.3 domutils: 3.2.2 entities: 7.0.1 http-cache-semantics@4.2.0: {} http-deceiver@1.2.7: {} http-errors@1.8.1: dependencies: depd: 1.1.2 inherits: 2.0.4 setprototypeof: 1.2.0 statuses: 1.5.0 toidentifier: 1.0.1 http-errors@2.0.1: dependencies: depd: 2.0.0 inherits: 2.0.4 setprototypeof: 1.2.0 statuses: 2.0.2 toidentifier: 1.0.1 http-parser-js@0.5.10: {} http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.4 debug: 4.4.3 transitivePeerDependencies: - supports-color http-proxy-middleware@2.0.9(@types/express@4.17.25): dependencies: '@types/http-proxy': 1.17.17 http-proxy: 1.18.1(debug@4.4.3) is-glob: 4.0.3 is-plain-obj: 3.0.0 micromatch: 4.0.8 optionalDependencies: '@types/express': 4.17.25 transitivePeerDependencies: - debug http-proxy-middleware@3.0.5: dependencies: '@types/http-proxy': 1.17.17 debug: 4.4.3 http-proxy: 1.18.1(debug@4.4.3) is-glob: 4.0.3 is-plain-object: 5.0.0 micromatch: 4.0.8 transitivePeerDependencies: - supports-color http-proxy@1.18.1(debug@4.4.3): dependencies: eventemitter3: 4.0.7 follow-redirects: 1.15.11(debug@4.4.3) requires-port: 1.0.0 transitivePeerDependencies: - debug https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.4 debug: 4.4.3 transitivePeerDependencies: - supports-color hyperdyperid@1.2.0: {} iconv-lite@0.4.24: dependencies: safer-buffer: 2.1.2 iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 iconv-lite@0.7.2: dependencies: safer-buffer: 2.1.2 icss-utils@5.1.0(postcss@8.5.6): dependencies: postcss: 8.5.6 ignore-walk@8.0.0: dependencies: minimatch: 10.1.2 image-size@0.5.5: optional: true immutable@5.1.4: {} import-fresh@3.3.1: dependencies: parent-module: 1.0.1 resolve-from: 4.0.0 imurmurhash@0.1.4: {} inherits@2.0.4: {} ini@6.0.0: {} ip-address@10.1.0: {} ipaddr.js@1.9.1: {} ipaddr.js@2.3.0: {} is-arrayish@0.2.1: {} is-binary-path@2.1.0: dependencies: binary-extensions: 2.3.0 is-core-module@2.16.1: dependencies: hasown: 2.0.2 is-docker@3.0.0: {} is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} is-fullwidth-code-point@5.1.0: dependencies: get-east-asian-width: 1.4.0 is-glob@4.0.3: dependencies: is-extglob: 2.1.1 is-in-ssh@1.0.0: {} is-inside-container@1.0.0: dependencies: is-docker: 3.0.0 is-interactive@2.0.0: {} is-network-error@1.3.0: {} is-number@7.0.0: {} is-plain-obj@3.0.0: {} is-plain-object@2.0.4: dependencies: isobject: 3.0.1 is-plain-object@5.0.0: {} is-promise@4.0.0: {} is-unicode-supported@2.1.0: {} is-what@3.14.1: {} is-wsl@3.1.0: dependencies: is-inside-container: 1.0.0 isarray@1.0.0: {} isexe@2.0.0: {} isexe@4.0.0: {} isobject@3.0.1: {} istanbul-lib-coverage@3.2.2: {} istanbul-lib-instrument@6.0.3: dependencies: '@babel/core': 7.28.5 '@babel/parser': 7.29.0 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 semver: 7.7.3 transitivePeerDependencies: - supports-color jest-worker@27.5.1: dependencies: '@types/node': 20.19.33 merge-stream: 2.0.0 supports-color: 8.1.1 jiti@2.6.1: {} jose@6.1.3: {} js-tokens@4.0.0: {} js-yaml@4.1.1: dependencies: argparse: 2.0.1 jsesc@3.1.0: {} json-parse-even-better-errors@2.3.1: {} json-parse-even-better-errors@5.0.0: {} json-schema-traverse@1.0.0: {} json-schema-typed@8.0.2: {} json5@2.2.3: {} jsonc-parser@3.3.1: {} jsonparse@1.3.1: {} karma-source-map-support@1.4.0: dependencies: source-map-support: 0.5.21 kind-of@6.0.3: {} launch-editor@2.12.0: dependencies: picocolors: 1.1.1 shell-quote: 1.8.3 less-loader@12.3.0(less@4.4.2)(webpack@5.104.1(esbuild@0.27.2)): dependencies: less: 4.4.2 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) less@4.4.2: dependencies: copy-anything: 2.0.6 parse-node-version: 1.0.1 tslib: 2.8.1 optionalDependencies: errno: 0.1.8 graceful-fs: 4.2.11 image-size: 0.5.5 make-dir: 2.1.0 mime: 1.6.0 needle: 3.3.1 source-map: 0.6.1 license-webpack-plugin@4.0.2(webpack@5.104.1(esbuild@0.27.2)): dependencies: webpack-sources: 3.3.3 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) lines-and-columns@1.2.4: {} listr2@9.0.5: dependencies: cli-truncate: 5.1.1 colorette: 2.0.20 eventemitter3: 5.0.4 log-update: 6.1.0 rfdc: 1.4.1 wrap-ansi: 9.0.2 lmdb@3.4.4: dependencies: msgpackr: 1.11.8 node-addon-api: 6.1.0 node-gyp-build-optional-packages: 5.2.2 ordered-binary: 1.6.1 weak-lru-cache: 1.2.2 optionalDependencies: '@lmdb/lmdb-darwin-arm64': 3.4.4 '@lmdb/lmdb-darwin-x64': 3.4.4 '@lmdb/lmdb-linux-arm': 3.4.4 '@lmdb/lmdb-linux-arm64': 3.4.4 '@lmdb/lmdb-linux-x64': 3.4.4 '@lmdb/lmdb-win32-arm64': 3.4.4 '@lmdb/lmdb-win32-x64': 3.4.4 optional: true loader-runner@4.3.1: {} loader-utils@2.0.4: dependencies: big.js: 5.2.2 emojis-list: 3.0.0 json5: 2.2.3 loader-utils@3.3.1: {} locate-path@6.0.0: dependencies: p-locate: 5.0.0 lodash.debounce@4.0.8: {} log-symbols@7.0.1: dependencies: is-unicode-supported: 2.1.0 yoctocolors: 2.1.2 log-update@6.1.0: dependencies: ansi-escapes: 7.3.0 cli-cursor: 5.0.0 slice-ansi: 7.1.2 strip-ansi: 7.1.2 wrap-ansi: 9.0.2 lru-cache@11.2.6: {} lru-cache@5.1.1: dependencies: yallist: 3.1.1 magic-string@0.30.21: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 make-dir@2.1.0: dependencies: pify: 4.0.1 semver: 5.7.2 optional: true make-fetch-happen@15.0.3: dependencies: '@npmcli/agent': 4.0.0 cacache: 20.0.3 http-cache-semantics: 4.2.0 minipass: 7.1.2 minipass-fetch: 5.0.1 minipass-flush: 1.0.5 minipass-pipeline: 1.2.4 negotiator: 1.0.0 proc-log: 6.1.0 promise-retry: 2.0.1 ssri: 13.0.1 transitivePeerDependencies: - supports-color math-intrinsics@1.1.0: {} media-typer@0.3.0: {} media-typer@1.1.0: {} memfs@4.56.10(tslib@2.8.1): dependencies: '@jsonjoy.com/fs-core': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-fsa': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-builtins': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-to-fsa': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-node-utils': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-print': 4.56.10(tslib@2.8.1) '@jsonjoy.com/fs-snapshot': 4.56.10(tslib@2.8.1) '@jsonjoy.com/json-pack': 1.21.0(tslib@2.8.1) '@jsonjoy.com/util': 1.9.0(tslib@2.8.1) glob-to-regex.js: 1.2.0(tslib@2.8.1) thingies: 2.5.0(tslib@2.8.1) tree-dump: 1.1.0(tslib@2.8.1) tslib: 2.8.1 merge-descriptors@1.0.3: {} merge-descriptors@2.0.0: {} merge-stream@2.0.0: {} methods@1.1.2: {} micromatch@4.0.8: dependencies: braces: 3.0.3 picomatch: 2.3.1 mime-db@1.52.0: {} mime-db@1.54.0: {} mime-types@2.1.35: dependencies: mime-db: 1.52.0 mime-types@3.0.2: dependencies: mime-db: 1.54.0 mime@1.6.0: {} mimic-function@5.0.1: {} mini-css-extract-plugin@2.9.4(webpack@5.104.1(esbuild@0.27.2)): dependencies: schema-utils: 4.3.3 tapable: 2.3.0 webpack: 5.104.1(esbuild@0.27.2) minimalistic-assert@1.0.1: {} minimatch@10.1.2: dependencies: '@isaacs/brace-expansion': 5.0.1 minipass-collect@2.0.1: dependencies: minipass: 7.1.2 minipass-fetch@5.0.1: dependencies: minipass: 7.1.2 minipass-sized: 2.0.0 minizlib: 3.1.0 optionalDependencies: encoding: 0.1.13 minipass-flush@1.0.5: dependencies: minipass: 3.3.6 minipass-pipeline@1.2.4: dependencies: minipass: 3.3.6 minipass-sized@2.0.0: dependencies: minipass: 7.1.2 minipass@3.3.6: dependencies: yallist: 4.0.0 minipass@7.1.2: {} minizlib@3.1.0: dependencies: minipass: 7.1.2 mrmime@2.0.1: {} ms@2.0.0: {} ms@2.1.3: {} msgpackr-extract@3.0.3: dependencies: node-gyp-build-optional-packages: 5.2.2 optionalDependencies: '@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3 '@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3 '@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3 '@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3 '@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3 '@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3 optional: true msgpackr@1.11.8: optionalDependencies: msgpackr-extract: 3.0.3 optional: true multicast-dns@7.2.5: dependencies: dns-packet: 5.6.1 thunky: 1.1.0 mute-stream@2.0.0: {} nanoid@3.3.11: {} needle@3.3.1: dependencies: iconv-lite: 0.6.3 sax: 1.4.4 optional: true negotiator@0.6.3: {} negotiator@0.6.4: {} negotiator@1.0.0: {} neo-async@2.6.2: {} node-addon-api@6.1.0: optional: true node-addon-api@7.1.1: optional: true node-forge@1.3.3: {} node-gyp-build-optional-packages@5.2.2: dependencies: detect-libc: 2.1.2 optional: true node-gyp@12.2.0: dependencies: env-paths: 2.2.1 exponential-backoff: 3.1.3 graceful-fs: 4.2.11 make-fetch-happen: 15.0.3 nopt: 9.0.0 proc-log: 6.1.0 semver: 7.7.3 tar: 7.5.7 tinyglobby: 0.2.15 which: 6.0.1 transitivePeerDependencies: - supports-color node-releases@2.0.27: {} nopt@9.0.0: dependencies: abbrev: 4.0.0 normalize-path@3.0.0: {} npm-bundled@5.0.0: dependencies: npm-normalize-package-bin: 5.0.0 npm-install-checks@8.0.0: dependencies: semver: 7.7.3 npm-normalize-package-bin@5.0.0: {} npm-package-arg@13.0.2: dependencies: hosted-git-info: 9.0.2 proc-log: 6.1.0 semver: 7.7.3 validate-npm-package-name: 7.0.2 npm-packlist@10.0.3: dependencies: ignore-walk: 8.0.0 proc-log: 6.1.0 npm-pick-manifest@11.0.3: dependencies: npm-install-checks: 8.0.0 npm-normalize-package-bin: 5.0.0 npm-package-arg: 13.0.2 semver: 7.7.3 npm-registry-fetch@19.1.1: dependencies: '@npmcli/redact': 4.0.0 jsonparse: 1.3.1 make-fetch-happen: 15.0.3 minipass: 7.1.2 minipass-fetch: 5.0.1 minizlib: 3.1.0 npm-package-arg: 13.0.2 proc-log: 6.1.0 transitivePeerDependencies: - supports-color nth-check@2.1.1: dependencies: boolbase: 1.0.0 object-assign@4.1.1: {} object-inspect@1.13.4: {} obuf@1.1.2: {} on-finished@2.4.1: dependencies: ee-first: 1.1.1 on-headers@1.1.0: {} once@1.4.0: dependencies: wrappy: 1.0.2 onetime@7.0.0: dependencies: mimic-function: 5.0.1 open@10.2.0: dependencies: default-browser: 5.5.0 define-lazy-prop: 3.0.0 is-inside-container: 1.0.0 wsl-utils: 0.1.0 open@11.0.0: dependencies: default-browser: 5.5.0 define-lazy-prop: 3.0.0 is-in-ssh: 1.0.0 is-inside-container: 1.0.0 powershell-utils: 0.1.0 wsl-utils: 0.3.1 ora@9.0.0: dependencies: chalk: 5.6.2 cli-cursor: 5.0.0 cli-spinners: 3.4.0 is-interactive: 2.0.0 is-unicode-supported: 2.1.0 log-symbols: 7.0.1 stdin-discarder: 0.2.2 string-width: 8.1.1 strip-ansi: 7.1.2 ordered-binary@1.6.1: optional: true p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 p-locate@5.0.0: dependencies: p-limit: 3.1.0 p-map@7.0.4: {} p-retry@6.2.1: dependencies: '@types/retry': 0.12.2 is-network-error: 1.3.0 retry: 0.13.1 pacote@21.0.4: dependencies: '@npmcli/git': 7.0.1 '@npmcli/installed-package-contents': 4.0.0 '@npmcli/package-json': 7.0.4 '@npmcli/promise-spawn': 9.0.1 '@npmcli/run-script': 10.0.3 cacache: 20.0.3 fs-minipass: 3.0.3 minipass: 7.1.2 npm-package-arg: 13.0.2 npm-packlist: 10.0.3 npm-pick-manifest: 11.0.3 npm-registry-fetch: 19.1.1 proc-log: 6.1.0 promise-retry: 2.0.1 sigstore: 4.1.0 ssri: 13.0.1 tar: 7.5.7 transitivePeerDependencies: - supports-color parent-module@1.0.1: dependencies: callsites: 3.1.0 parse-json@5.2.0: dependencies: '@babel/code-frame': 7.29.0 error-ex: 1.3.4 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 parse-node-version@1.0.1: {} parse5-html-rewriting-stream@8.0.0: dependencies: entities: 6.0.1 parse5: 8.0.0 parse5-sax-parser: 8.0.0 parse5-sax-parser@8.0.0: dependencies: parse5: 8.0.0 parse5@8.0.0: dependencies: entities: 6.0.1 parseurl@1.3.3: {} path-exists@4.0.0: {} path-key@3.1.1: {} path-parse@1.0.7: {} path-scurry@2.0.1: dependencies: lru-cache: 11.2.6 minipass: 7.1.2 path-to-regexp@0.1.12: {} path-to-regexp@8.3.0: {} picocolors@1.1.1: {} picomatch@2.3.1: {} picomatch@4.0.3: {} pify@4.0.1: optional: true piscina@5.1.4: optionalDependencies: '@napi-rs/nice': 1.1.1 pkce-challenge@5.0.1: {} postcss-loader@8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.104.1(esbuild@0.27.2)): dependencies: cosmiconfig: 9.0.0(typescript@5.9.3) jiti: 2.6.1 postcss: 8.5.6 semver: 7.7.3 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) transitivePeerDependencies: - typescript postcss-media-query-parser@0.2.3: {} postcss-modules-extract-imports@3.1.0(postcss@8.5.6): dependencies: postcss: 8.5.6 postcss-modules-local-by-default@4.2.0(postcss@8.5.6): dependencies: icss-utils: 5.1.0(postcss@8.5.6) postcss: 8.5.6 postcss-selector-parser: 7.1.1 postcss-value-parser: 4.2.0 postcss-modules-scope@3.2.1(postcss@8.5.6): dependencies: postcss: 8.5.6 postcss-selector-parser: 7.1.1 postcss-modules-values@4.0.0(postcss@8.5.6): dependencies: icss-utils: 5.1.0(postcss@8.5.6) postcss: 8.5.6 postcss-selector-parser@7.1.1: dependencies: cssesc: 3.0.0 util-deprecate: 1.0.2 postcss-value-parser@4.2.0: {} postcss@8.5.6: dependencies: nanoid: 3.3.11 picocolors: 1.1.1 source-map-js: 1.2.1 powershell-utils@0.1.0: {} proc-log@6.1.0: {} process-nextick-args@2.0.1: {} promise-retry@2.0.1: dependencies: err-code: 2.0.3 retry: 0.12.0 proxy-addr@2.0.7: dependencies: forwarded: 0.2.0 ipaddr.js: 1.9.1 prr@1.0.1: optional: true qs@6.14.1: dependencies: side-channel: 1.1.0 randombytes@2.1.0: dependencies: safe-buffer: 5.2.1 range-parser@1.2.1: {} raw-body@2.5.3: dependencies: bytes: 3.1.2 http-errors: 2.0.1 iconv-lite: 0.4.24 unpipe: 1.0.0 raw-body@3.0.2: dependencies: bytes: 3.1.2 http-errors: 2.0.1 iconv-lite: 0.7.2 unpipe: 1.0.0 readable-stream@2.3.8: dependencies: core-util-is: 1.0.3 inherits: 2.0.4 isarray: 1.0.0 process-nextick-args: 2.0.1 safe-buffer: 5.1.2 string_decoder: 1.1.1 util-deprecate: 1.0.2 readable-stream@3.6.2: dependencies: inherits: 2.0.4 string_decoder: 1.3.0 util-deprecate: 1.0.2 readdirp@3.6.0: dependencies: picomatch: 2.3.1 readdirp@4.1.2: {} regenerate-unicode-properties@10.2.2: dependencies: regenerate: 1.4.2 regenerate@1.4.2: {} regex-parser@2.3.1: {} regexpu-core@6.4.0: dependencies: regenerate: 1.4.2 regenerate-unicode-properties: 10.2.2 regjsgen: 0.8.0 regjsparser: 0.13.0 unicode-match-property-ecmascript: 2.0.0 unicode-match-property-value-ecmascript: 2.2.1 regjsgen@0.8.0: {} regjsparser@0.13.0: dependencies: jsesc: 3.1.0 require-from-string@2.0.2: {} requires-port@1.0.0: {} resolve-from@4.0.0: {} resolve-url-loader@5.0.0: dependencies: adjust-sourcemap-loader: 4.0.0 convert-source-map: 1.9.0 loader-utils: 2.0.4 postcss: 8.5.6 source-map: 0.6.1 resolve@1.22.11: dependencies: is-core-module: 2.16.1 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 restore-cursor@5.1.0: dependencies: onetime: 7.0.0 signal-exit: 4.1.0 retry@0.12.0: {} retry@0.13.1: {} rfdc@1.4.1: {} rolldown@1.0.0-beta.58: dependencies: '@oxc-project/types': 0.106.0 '@rolldown/pluginutils': 1.0.0-beta.58 optionalDependencies: '@rolldown/binding-android-arm64': 1.0.0-beta.58 '@rolldown/binding-darwin-arm64': 1.0.0-beta.58 '@rolldown/binding-darwin-x64': 1.0.0-beta.58 '@rolldown/binding-freebsd-x64': 1.0.0-beta.58 '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-beta.58 '@rolldown/binding-linux-arm64-gnu': 1.0.0-beta.58 '@rolldown/binding-linux-arm64-musl': 1.0.0-beta.58 '@rolldown/binding-linux-x64-gnu': 1.0.0-beta.58 '@rolldown/binding-linux-x64-musl': 1.0.0-beta.58 '@rolldown/binding-openharmony-arm64': 1.0.0-beta.58 '@rolldown/binding-wasm32-wasi': 1.0.0-beta.58 '@rolldown/binding-win32-arm64-msvc': 1.0.0-beta.58 '@rolldown/binding-win32-x64-msvc': 1.0.0-beta.58 rollup@4.57.1: dependencies: '@types/estree': 1.0.8 optionalDependencies: '@rollup/rollup-android-arm-eabi': 4.57.1 '@rollup/rollup-android-arm64': 4.57.1 '@rollup/rollup-darwin-arm64': 4.57.1 '@rollup/rollup-darwin-x64': 4.57.1 '@rollup/rollup-freebsd-arm64': 4.57.1 '@rollup/rollup-freebsd-x64': 4.57.1 '@rollup/rollup-linux-arm-gnueabihf': 4.57.1 '@rollup/rollup-linux-arm-musleabihf': 4.57.1 '@rollup/rollup-linux-arm64-gnu': 4.57.1 '@rollup/rollup-linux-arm64-musl': 4.57.1 '@rollup/rollup-linux-loong64-gnu': 4.57.1 '@rollup/rollup-linux-loong64-musl': 4.57.1 '@rollup/rollup-linux-ppc64-gnu': 4.57.1 '@rollup/rollup-linux-ppc64-musl': 4.57.1 '@rollup/rollup-linux-riscv64-gnu': 4.57.1 '@rollup/rollup-linux-riscv64-musl': 4.57.1 '@rollup/rollup-linux-s390x-gnu': 4.57.1 '@rollup/rollup-linux-x64-gnu': 4.57.1 '@rollup/rollup-linux-x64-musl': 4.57.1 '@rollup/rollup-openbsd-x64': 4.57.1 '@rollup/rollup-openharmony-arm64': 4.57.1 '@rollup/rollup-win32-arm64-msvc': 4.57.1 '@rollup/rollup-win32-ia32-msvc': 4.57.1 '@rollup/rollup-win32-x64-gnu': 4.57.1 '@rollup/rollup-win32-x64-msvc': 4.57.1 fsevents: 2.3.3 router@2.2.0: dependencies: debug: 4.4.3 depd: 2.0.0 is-promise: 4.0.0 parseurl: 1.3.3 path-to-regexp: 8.3.0 transitivePeerDependencies: - supports-color run-applescript@7.1.0: {} rxjs@7.8.2: dependencies: tslib: 2.8.1 safe-buffer@5.1.2: {} safe-buffer@5.2.1: {} safer-buffer@2.1.2: {} sass-loader@16.0.6(sass@1.97.1)(webpack@5.104.1(esbuild@0.27.2)): dependencies: neo-async: 2.6.2 optionalDependencies: sass: 1.97.1 webpack: 5.104.1(esbuild@0.27.2) sass@1.97.1: dependencies: chokidar: 4.0.3 immutable: 5.1.4 source-map-js: 1.2.1 optionalDependencies: '@parcel/watcher': 2.5.6 sax@1.4.4: optional: true schema-utils@4.3.3: dependencies: '@types/json-schema': 7.0.15 ajv: 8.17.1 ajv-formats: 2.1.1(ajv@8.17.1) ajv-keywords: 5.1.0(ajv@8.17.1) select-hose@2.0.0: {} selfsigned@2.4.1: dependencies: '@types/node-forge': 1.3.14 node-forge: 1.3.3 semver@5.7.2: optional: true semver@6.3.1: {} semver@7.7.3: {} send@0.19.2: dependencies: debug: 2.6.9 depd: 2.0.0 destroy: 1.2.0 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 fresh: 0.5.2 http-errors: 2.0.1 mime: 1.6.0 ms: 2.1.3 on-finished: 2.4.1 range-parser: 1.2.1 statuses: 2.0.2 transitivePeerDependencies: - supports-color send@1.2.1: dependencies: debug: 4.4.3 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 fresh: 2.0.0 http-errors: 2.0.1 mime-types: 3.0.2 ms: 2.1.3 on-finished: 2.4.1 range-parser: 1.2.1 statuses: 2.0.2 transitivePeerDependencies: - supports-color serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 serve-index@1.9.2: dependencies: accepts: 1.3.8 batch: 0.6.1 debug: 2.6.9 escape-html: 1.0.3 http-errors: 1.8.1 mime-types: 2.1.35 parseurl: 1.3.3 transitivePeerDependencies: - supports-color serve-static@1.16.3: dependencies: encodeurl: 2.0.0 escape-html: 1.0.3 parseurl: 1.3.3 send: 0.19.2 transitivePeerDependencies: - supports-color serve-static@2.2.1: dependencies: encodeurl: 2.0.0 escape-html: 1.0.3 parseurl: 1.3.3 send: 1.2.1 transitivePeerDependencies: - supports-color setprototypeof@1.2.0: {} shallow-clone@3.0.1: dependencies: kind-of: 6.0.3 shebang-command@2.0.0: dependencies: shebang-regex: 3.0.0 shebang-regex@3.0.0: {} shell-quote@1.8.3: {} side-channel-list@1.0.0: dependencies: es-errors: 1.3.0 object-inspect: 1.13.4 side-channel-map@1.0.1: dependencies: call-bound: 1.0.4 es-errors: 1.3.0 get-intrinsic: 1.3.0 object-inspect: 1.13.4 side-channel-weakmap@1.0.2: dependencies: call-bound: 1.0.4 es-errors: 1.3.0 get-intrinsic: 1.3.0 object-inspect: 1.13.4 side-channel-map: 1.0.1 side-channel@1.1.0: dependencies: es-errors: 1.3.0 object-inspect: 1.13.4 side-channel-list: 1.0.0 side-channel-map: 1.0.1 side-channel-weakmap: 1.0.2 signal-exit@4.1.0: {} sigstore@4.1.0: dependencies: '@sigstore/bundle': 4.0.0 '@sigstore/core': 3.1.0 '@sigstore/protobuf-specs': 0.5.0 '@sigstore/sign': 4.1.0 '@sigstore/tuf': 4.0.1 '@sigstore/verify': 3.1.0 transitivePeerDependencies: - supports-color slice-ansi@7.1.2: dependencies: ansi-styles: 6.2.3 is-fullwidth-code-point: 5.1.0 smart-buffer@4.2.0: {} sockjs@0.3.24: dependencies: faye-websocket: 0.11.4 uuid: 8.3.2 websocket-driver: 0.7.4 socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.4 debug: 4.4.3 socks: 2.8.7 transitivePeerDependencies: - supports-color socks@2.8.7: dependencies: ip-address: 10.1.0 smart-buffer: 4.2.0 source-map-js@1.2.1: {} source-map-loader@5.0.0(webpack@5.104.1(esbuild@0.27.2)): dependencies: iconv-lite: 0.6.3 source-map-js: 1.2.1 webpack: 5.104.1(esbuild@0.27.2) source-map-support@0.5.21: dependencies: buffer-from: 1.1.2 source-map: 0.6.1 source-map@0.6.1: {} source-map@0.7.6: {} spdx-correct@3.2.0: dependencies: spdx-expression-parse: 3.0.1 spdx-license-ids: 3.0.22 spdx-exceptions@2.5.0: {} spdx-expression-parse@3.0.1: dependencies: spdx-exceptions: 2.5.0 spdx-license-ids: 3.0.22 spdx-license-ids@3.0.22: {} spdy-transport@3.0.0: dependencies: debug: 4.4.3 detect-node: 2.1.0 hpack.js: 2.1.6 obuf: 1.1.2 readable-stream: 3.6.2 wbuf: 1.7.3 transitivePeerDependencies: - supports-color spdy@4.0.2: dependencies: debug: 4.4.3 handle-thing: 2.0.1 http-deceiver: 1.2.7 select-hose: 2.0.0 spdy-transport: 3.0.0 transitivePeerDependencies: - supports-color ssri@13.0.1: dependencies: minipass: 7.1.2 statuses@1.5.0: {} statuses@2.0.2: {} stdin-discarder@0.2.2: {} string-width@4.2.3: dependencies: emoji-regex: 8.0.0 is-fullwidth-code-point: 3.0.0 strip-ansi: 6.0.1 string-width@7.2.0: dependencies: emoji-regex: 10.6.0 get-east-asian-width: 1.4.0 strip-ansi: 7.1.2 string-width@8.1.1: dependencies: get-east-asian-width: 1.4.0 strip-ansi: 7.1.2 string_decoder@1.1.1: dependencies: safe-buffer: 5.1.2 string_decoder@1.3.0: dependencies: safe-buffer: 5.2.1 strip-ansi@6.0.1: dependencies: ansi-regex: 5.0.1 strip-ansi@7.1.2: dependencies: ansi-regex: 6.2.2 supports-color@8.1.1: dependencies: has-flag: 4.0.0 supports-preserve-symlinks-flag@1.0.0: {} tapable@2.3.0: {} tar@7.5.7: dependencies: '@isaacs/fs-minipass': 4.0.1 chownr: 3.0.0 minipass: 7.1.2 minizlib: 3.1.0 yallist: 5.0.0 terser-webpack-plugin@5.3.16(esbuild@0.27.2)(webpack@5.104.1): dependencies: '@jridgewell/trace-mapping': 0.3.31 jest-worker: 27.5.1 schema-utils: 4.3.3 serialize-javascript: 6.0.2 terser: 5.44.1 webpack: 5.104.1(esbuild@0.27.2) optionalDependencies: esbuild: 0.27.2 terser@5.44.1: dependencies: '@jridgewell/source-map': 0.3.11 acorn: 8.15.0 commander: 2.20.3 source-map-support: 0.5.21 thingies@2.5.0(tslib@2.8.1): dependencies: tslib: 2.8.1 thunky@1.1.0: {} tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 to-regex-range@5.0.1: dependencies: is-number: 7.0.0 toidentifier@1.0.1: {} tree-dump@1.1.0(tslib@2.8.1): dependencies: tslib: 2.8.1 tree-kill@1.2.2: {} tslib@2.8.1: {} tuf-js@4.1.0: dependencies: '@tufjs/models': 4.1.0 debug: 4.4.3 make-fetch-happen: 15.0.3 transitivePeerDependencies: - supports-color type-is@1.6.18: dependencies: media-typer: 0.3.0 mime-types: 2.1.35 type-is@2.0.1: dependencies: content-type: 1.0.5 media-typer: 1.1.0 mime-types: 3.0.2 typed-assert@1.0.9: {} typescript@5.9.3: {} undici-types@6.21.0: {} undici@7.18.0: {} unicode-canonical-property-names-ecmascript@2.0.1: {} unicode-match-property-ecmascript@2.0.0: dependencies: unicode-canonical-property-names-ecmascript: 2.0.1 unicode-property-aliases-ecmascript: 2.2.0 unicode-match-property-value-ecmascript@2.2.1: {} unicode-property-aliases-ecmascript@2.2.0: {} unique-filename@5.0.0: dependencies: unique-slug: 6.0.0 unique-slug@6.0.0: dependencies: imurmurhash: 0.1.4 unpipe@1.0.0: {} update-browserslist-db@1.2.3(browserslist@4.28.1): dependencies: browserslist: 4.28.1 escalade: 3.2.0 picocolors: 1.1.1 util-deprecate@1.0.2: {} utils-merge@1.0.1: {} uuid@8.3.2: {} validate-npm-package-license@3.0.4: dependencies: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 validate-npm-package-name@7.0.2: {} vary@1.1.2: {} vite@7.3.0(@types/node@20.19.33)(jiti@2.6.1)(less@4.4.2)(sass@1.97.1)(terser@5.44.1): dependencies: esbuild: 0.27.2 fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 rollup: 4.57.1 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 20.19.33 fsevents: 2.3.3 jiti: 2.6.1 less: 4.4.2 sass: 1.97.1 terser: 5.44.1 watchpack@2.5.0: dependencies: glob-to-regexp: 0.4.1 graceful-fs: 4.2.11 watchpack@2.5.1: dependencies: glob-to-regexp: 0.4.1 graceful-fs: 4.2.11 wbuf@1.7.3: dependencies: minimalistic-assert: 1.0.1 weak-lru-cache@1.2.2: optional: true webpack-dev-middleware@7.4.5(tslib@2.8.1)(webpack@5.104.1): dependencies: colorette: 2.0.20 memfs: 4.56.10(tslib@2.8.1) mime-types: 3.0.2 on-finished: 2.4.1 range-parser: 1.2.1 schema-utils: 4.3.3 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) transitivePeerDependencies: - tslib webpack-dev-server@5.2.2(tslib@2.8.1)(webpack@5.104.1): dependencies: '@types/bonjour': 3.5.13 '@types/connect-history-api-fallback': 1.5.4 '@types/express': 4.17.25 '@types/express-serve-static-core': 4.19.8 '@types/serve-index': 1.9.4 '@types/serve-static': 1.15.10 '@types/sockjs': 0.3.36 '@types/ws': 8.18.1 ansi-html-community: 0.0.8 bonjour-service: 1.3.0 chokidar: 3.6.0 colorette: 2.0.20 compression: 1.8.1 connect-history-api-fallback: 2.0.0 express: 4.22.1 graceful-fs: 4.2.11 http-proxy-middleware: 2.0.9(@types/express@4.17.25) ipaddr.js: 2.3.0 launch-editor: 2.12.0 open: 10.2.0 p-retry: 6.2.1 schema-utils: 4.3.3 selfsigned: 2.4.1 serve-index: 1.9.2 sockjs: 0.3.24 spdy: 4.0.2 webpack-dev-middleware: 7.4.5(tslib@2.8.1)(webpack@5.104.1) ws: 8.19.0 optionalDependencies: webpack: 5.104.1(esbuild@0.27.2) transitivePeerDependencies: - bufferutil - debug - supports-color - tslib - utf-8-validate webpack-merge@6.0.1: dependencies: clone-deep: 4.0.1 flat: 5.0.2 wildcard: 2.0.1 webpack-sources@3.3.3: {} webpack-subresource-integrity@5.1.0(webpack@5.104.1(esbuild@0.27.2)): dependencies: typed-assert: 1.0.9 webpack: 5.104.1(esbuild@0.27.2) webpack@5.104.1(esbuild@0.27.2): dependencies: '@types/eslint-scope': 3.7.7 '@types/estree': 1.0.8 '@types/json-schema': 7.0.15 '@webassemblyjs/ast': 1.14.1 '@webassemblyjs/wasm-edit': 1.14.1 '@webassemblyjs/wasm-parser': 1.14.1 acorn: 8.15.0 acorn-import-phases: 1.0.4(acorn@8.15.0) browserslist: 4.28.1 chrome-trace-event: 1.0.4 enhanced-resolve: 5.19.0 es-module-lexer: 2.0.0 eslint-scope: 5.1.1 events: 3.3.0 glob-to-regexp: 0.4.1 graceful-fs: 4.2.11 json-parse-even-better-errors: 2.3.1 loader-runner: 4.3.1 mime-types: 2.1.35 neo-async: 2.6.2 schema-utils: 4.3.3 tapable: 2.3.0 terser-webpack-plugin: 5.3.16(esbuild@0.27.2)(webpack@5.104.1) watchpack: 2.5.1 webpack-sources: 3.3.3 transitivePeerDependencies: - '@swc/core' - esbuild - uglify-js websocket-driver@0.7.4: dependencies: http-parser-js: 0.5.10 safe-buffer: 5.2.1 websocket-extensions: 0.1.4 websocket-extensions@0.1.4: {} which@2.0.2: dependencies: isexe: 2.0.0 which@6.0.1: dependencies: isexe: 4.0.0 wildcard@2.0.1: {} wrap-ansi@6.2.0: dependencies: ansi-styles: 4.3.0 string-width: 4.2.3 strip-ansi: 6.0.1 wrap-ansi@9.0.2: dependencies: ansi-styles: 6.2.3 string-width: 7.2.0 strip-ansi: 7.1.2 wrappy@1.0.2: {} ws@8.19.0: {} wsl-utils@0.1.0: dependencies: is-wsl: 3.1.0 wsl-utils@0.3.1: dependencies: is-wsl: 3.1.0 powershell-utils: 0.1.0 y18n@5.0.8: {} yallist@3.1.1: {} yallist@4.0.0: {} yallist@5.0.0: {} yargs-parser@22.0.0: {} yargs@18.0.0: dependencies: cliui: 9.0.1 escalade: 3.2.0 get-caller-file: 2.0.5 string-width: 7.2.0 y18n: 5.0.8 yargs-parser: 22.0.0 yocto-queue@0.1.0: {} yoctocolors-cjs@2.1.3: {} yoctocolors@2.1.2: {} zod-to-json-schema@3.25.1(zod@4.3.5): dependencies: zod: 4.3.5 zod@4.3.5: {} zone.js@0.16.0: {}
unknown
github
https://github.com/angular/angular
integration/ng-add-localize/pnpm-lock.yaml
# This class manages all the menu items in the main menu of the editor import wx from constants import * from evhdlr import * import util import logging from preference import Preferences from wx.lib.pubsub import setupkwargs, pub import pvscomm class MainFrameMenu(wx.MenuBar): """The class implementing and managing the main menu bar in the application""" def __init__(self): wx.MenuBar.__init__(self) self.plugins = {} self.toolbars = {} self._recentContexts = {} self._recentFiles = {} self.addFileMenu() self.addEditMenu() self.addViewMenu() self.addPVSMenu() self.addHelpMenu() self.setBindings() pub.subscribe(self.update, PUB_UPDATEMENUBAR) pub.subscribe(self.showPlugin, PUB_SHOWPLUGIN) pub.subscribe(self.addPluginToViewMenu, PUB_ADDITEMTOVIEWMENU) pub.subscribe(self.prepareRecentContextsSubMenu, PUB_UPDATEPVSCONTEXT) pub.subscribe(self.prepareRecentFilesSubMenu, PUB_PREPARERECENTFILESMENU) def addFileMenu(self): """Adding menu items to File menu""" fileMenu = wx.Menu() self.newFileMenuItem = fileMenu.Append(wx.ID_NEW, self._makeLabel(LABEL_NEW, "N", True), EMPTY_STRING, wx.ITEM_NORMAL) self.openFileMenuItem = fileMenu.Append(wx.ID_OPEN, self._makeLabel(LABEL_OPEN, "O", True), EMPTY_STRING, wx.ITEM_NORMAL) self.recentFilesMenu = wx.Menu() self.prepareRecentFilesSubMenu() fileMenu.AppendMenu(wx.ID_ANY, "Recent Files", self.recentFilesMenu) self.saveFileMenuItem = fileMenu.Append(wx.ID_SAVE, self._makeLabel(LABEL_SAVE, "S"), EMPTY_STRING, wx.ITEM_NORMAL) self.saveFileAsMenuItem = fileMenu.Append(wx.ID_SAVEAS, self._makeLabel(LABEL_SAVEAS, None, True), EMPTY_STRING, wx.ITEM_NORMAL) self.closeFileMenuItem = fileMenu.Append(wx.ID_CLOSE, self._makeLabel(LABEL_CLOSEFILE, "W"), EMPTY_STRING, wx.ITEM_NORMAL) fileMenu.AppendSeparator() self.quitMenuItem = fileMenu.Append(wx.ID_ANY, self._makeLabel(LABEL_QUIT, "Q"), EMPTY_STRING, wx.ITEM_NORMAL) self.Append(fileMenu, LABEL_FILE) def addEditMenu(self): """Adding menu items to Edit menu""" editMenu = wx.Menu() self.undoMenuItem = editMenu.Append(wx.ID_UNDO, self._makeLabel(LABEL_UNDO, "U"), EMPTY_STRING, wx.ITEM_NORMAL) self.redoMenuItem = editMenu.Append(wx.ID_UNDO, self._makeLabel(LABEL_REDO, SHIFT + "-Z"), EMPTY_STRING, wx.ITEM_NORMAL) editMenu.AppendSeparator() self.cutMenuItem = editMenu.Append(wx.ID_CUT, self._makeLabel(LABEL_CUT, "X"), EMPTY_STRING, wx.ITEM_NORMAL) self.copyMenuItem = editMenu.Append(wx.ID_COPY, self._makeLabel(LABEL_COPY, "C"), EMPTY_STRING, wx.ITEM_NORMAL) self.pasteMenuItem = editMenu.Append(wx.ID_PASTE, self._makeLabel(LABEL_PASTE, "V"), EMPTY_STRING, wx.ITEM_NORMAL) self.selectAllMenuItem = editMenu.Append(wx.ID_SELECTALL, self._makeLabel(LABEL_SELECTALL, "A"), EMPTY_STRING, wx.ITEM_NORMAL) editMenu.AppendSeparator() self.findMenuItem = editMenu.Append(wx.ID_FIND, self._makeLabel(LABEL_FIND, "F"), EMPTY_STRING, wx.ITEM_NORMAL) self.Append(editMenu, LABEL_EDIT) def addViewMenu(self): """Adding menu items to View menu""" self.viewMenu = wx.Menu() self.pluginMenu = wx.Menu() self.viewMenu.AppendMenu(wx.ID_ANY, 'Plugins', self.pluginMenu) # Add View Menu to the menu bar: self.Append(self.viewMenu, LABEL_VIEW) def addPVSMenu(self): """Adding menu items to PVS menu""" pvsMenu = wx.Menu() self.changeContextMenuItem = pvsMenu.Append(wx.ID_ANY, self._makeLabel("Change Context", None, True), EMPTY_STRING, wx.ITEM_NORMAL) self.recentContextsMenu = wx.Menu() self.prepareRecentContextsSubMenu() pvsMenu.AppendMenu(wx.ID_ANY, "Recent Contexts", self.recentContextsMenu) self.pvsResetMenuItem = pvsMenu.Append(wx.ID_ANY, "Reset PVS", EMPTY_STRING, wx.ITEM_NORMAL) pvsMenu.AppendSeparator() self.typecheckMenuItem = pvsMenu.Append(wx.ID_ANY, LABEL_TYPECHECK, EMPTY_STRING, wx.ITEM_NORMAL) self.pvsDialogMenuItem = pvsMenu.Append(wx.ID_ANY, "PVS Log...", EMPTY_STRING, wx.ITEM_NORMAL) self.Append(pvsMenu, PVS_U) def addHelpMenu(self): """Adding menu items to Help menu""" helpMenu = wx.Menu() self.helpMenuItem = helpMenu.Append(wx.ID_ANY, self._makeLabel("PVS GUI Help", None, True), EMPTY_STRING, wx.ITEM_NORMAL) self.Append(helpMenu, "Help") def prepareRecentFilesSubMenu(self): try: while True: #TODO: Find out if there is a better way to remove all the items from a menu item = self.recentFilesMenu.FindItemByPosition(0) self.recentFilesMenu.RemoveItem(item) except: pass self._recentFiles = {} preferences = Preferences() recentFiles = preferences.getRecentFiles() logging.debug("Recent Files: %s", recentFiles) frame = util.getMainFrame() for fullname in recentFiles: item = self.recentFilesMenu.Append(wx.ID_ANY, fullname, EMPTY_STRING, wx.ITEM_NORMAL) self._recentFiles[item.GetId()] = fullname frame.Bind(wx.EVT_MENU, self.onRecentFileSelected, item) def prepareRecentContextsSubMenu(self): try: while True: #TODO: Find out if there is a better way to remove all the items from a menu item = self.recentContextsMenu.FindItemByPosition(0) self.recentContextsMenu.RemoveItem(item) except: pass self._recentContexts = {} preferences = Preferences() recentContexts = preferences.getRecentContexts() logging.debug("Recent Contexts: %s", recentContexts) frame = util.getMainFrame() for cxt in recentContexts: item = self.recentContextsMenu.Append(wx.ID_ANY, cxt, EMPTY_STRING, wx.ITEM_NORMAL) self._recentContexts[item.GetId()] = cxt frame.Bind(wx.EVT_MENU, self.onRecentContextSelected, item) def onRecentContextSelected(self, event): context = self._recentContexts[event.GetId()] pvscomm.PVSCommandManager().changeContext(context) def onRecentFileSelected(self, event): fullname = self._recentFiles[event.GetId()] pub.sendMessage(PUB_ADDFILE, fullname=fullname) Preferences().removeFromRecentFiles(fullname) self.prepareRecentFilesSubMenu() def addPluginToViewMenu(self, name, callBackFunction): logging.debug("Name: %s", name) frame = util.getMainFrame() item = self.pluginMenu.Append(wx.ID_ANY, name, EMPTY_STRING, wx.ITEM_CHECK) self.plugins[name] = item self.pluginMenu.Check(item.GetId(), PluginManager().shouldPluginBeVisible(name, pvscomm.PVSCommandManager().pvsMode)) frame.Bind(wx.EVT_MENU, callBackFunction, item) def _makeLabel(self, name, shortcut=None, addDots = False): if addDots: name = name + DOTDOTDOT return name if shortcut is None else "%s\t%s-%s"%(name, CONTROL, shortcut) def setBindings(self): frame = util.getMainFrame() frame.Bind(wx.EVT_MENU, onCreateNewFile, self.newFileMenuItem) frame.Bind(wx.EVT_MENU, onOpenFile, self.openFileMenuItem) frame.Bind(wx.EVT_MENU, onSaveFile, self.saveFileMenuItem) frame.Bind(wx.EVT_MENU, onSaveAsFile, self.saveFileAsMenuItem) frame.Bind(wx.EVT_MENU, onCloseFile, self.closeFileMenuItem) frame.Bind(wx.EVT_MENU, onQuitFrame, self.quitMenuItem) frame.Bind(wx.EVT_MENU, onUndo, self.undoMenuItem) frame.Bind(wx.EVT_MENU, onRedo, self.redoMenuItem) frame.Bind(wx.EVT_MENU, onSelectAll, self.selectAllMenuItem) frame.Bind(wx.EVT_MENU, onCutText, self.cutMenuItem) frame.Bind(wx.EVT_MENU, onCopyText, self.copyMenuItem) frame.Bind(wx.EVT_MENU, onPasteText, self.pasteMenuItem) frame.Bind(wx.EVT_MENU, onFindText, self.findMenuItem) frame.Bind(wx.EVT_MENU, onChangeContext, self.changeContextMenuItem) frame.Bind(wx.EVT_MENU, onTypecheck, self.typecheckMenuItem) frame.Bind(wx.EVT_MENU, onResetPVS, self.pvsResetMenuItem) frame.Bind(wx.EVT_MENU, onShowPVSCommunicationLog, self.pvsDialogMenuItem) frame.Bind(wx.EVT_MENU, onShowHelpFrame, self.helpMenuItem) def update(self, parameters): if OPENFILES in parameters: value = parameters[OPENFILES] > 0 self.closeFileMenuItem.Enable(value) self.cutMenuItem.Enable(value) self.copyMenuItem.Enable(value) self.pasteMenuItem.Enable(value) self.selectAllMenuItem.Enable(value) self.findMenuItem.Enable(value) self.undoMenuItem.Enable(value) self.redoMenuItem.Enable(value) if PVSMODE in parameters: pvsMode = parameters[PVSMODE] if pvsMode == PVS_MODE_OFF: pass elif pvsMode == PVS_MODE_LISP: pass elif pvsMode == PVS_MODE_PROVER: pass elif pvsMode == PVS_MODE_UNKNOWN: pass else: logging.error("pvsMode %s is not recognized", pvsMode) def showPlugin(self, name, value): if name in self.plugins: logging.info("Changing the visibility of %s to %s", name, value) item = self.plugins[name] item.Check(value) else: logging.warn("No menu option for plugin %s", name)
unknown
codeparrot/codeparrot-clean
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyColormath(PythonPackage): """Color math and conversion library.""" homepage = "https://pypi.python.org/pypi/colormath/2.1.1" url = "https://pypi.io/packages/source/c/colormath/colormath-2.1.1.tar.gz" version('2.1.1', '10a0fb17e3c24363d0e1a3f2dccaa33b') depends_on('py-setuptools', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('py-networkx', type=('build', 'run'))
unknown
codeparrot/codeparrot-clean
# vim: set sw=4 expandtab : # # Copyright 2004 Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # Originally developed by Gregory Trubetskoy. # # $Id$ """ This module contains classes to support HTTP State Management Mechanism, also known as Cookies. The classes provide simple ways for creating, parsing and digitally signing cookies, as well as the ability to store simple Python objects in Cookies (using marshalling). The behaviour of the classes is designed to be most useful within mod_python applications. The current state of HTTP State Management standardization is rather unclear. It appears that the de-facto standard is the original Netscape specification, even though already two RFC's have been put out (RFC2109 (1997) and RFC2965 (2000)). The RFC's add a couple of useful features (e.g. using Max-Age instead of Expires, but my limited tests show that Max-Age is ignored by the two browsers tested (IE and Safari). As a result of this, perhaps trying to be RFC-compliant (by automatically providing Max-Age and Version) could be a waste of cookie space... """ import time import re import hmac import marshal import base64 # import apache class CookieError(Exception): pass class metaCookie(type): def __new__(cls, clsname, bases, clsdict): _valid_attr = ( "version", "path", "domain", "secure", "comment", "expires", "max_age", # RFC 2965 "commentURL", "discard", "port", # Microsoft Extension "httponly" ) # _valid_attr + property values # (note __slots__ is a new Python feature, it # prevents any other attribute from being set) __slots__ = _valid_attr + ("name", "value", "_value", "_expires", "__data__") clsdict["_valid_attr"] = _valid_attr clsdict["__slots__"] = __slots__ def set_expires(self, value): if type(value) == type(""): # if it's a string, it should be # valid format as per Netscape spec try: t = time.strptime(value, "%a, %d-%b-%Y %H:%M:%S GMT") except ValueError: raise ValueError, "Invalid expires time: %s" % value t = time.mktime(t) else: # otherwise assume it's a number # representing time as from time.time() t = value value = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", time.gmtime(t)) self._expires = "%s" % value def get_expires(self): return self._expires clsdict["expires"] = property(fget=get_expires, fset=set_expires) return type.__new__(cls, clsname, bases, clsdict) class Cookie(object): """ This class implements the basic Cookie functionality. Note that unlike the Python Standard Library Cookie class, this class represents a single cookie (not a list of Morsels). """ __metaclass__ = metaCookie DOWNGRADE = 0 IGNORE = 1 EXCEPTION = 3 def parse(Class, str, **kw): """ Parse a Cookie or Set-Cookie header value, and return a dict of Cookies. Note: the string should NOT include the header name, only the value. """ dict = _parse_cookie(str, Class, **kw) return dict parse = classmethod(parse) def __init__(self, name, value, **kw): """ This constructor takes at least a name and value as the arguments, as well as optionally any of allowed cookie attributes as defined in the existing cookie standards. """ self.name, self.value = name, value for k in kw: setattr(self, k.lower(), kw[k]) # subclasses can use this for internal stuff self.__data__ = {} def __str__(self): """ Provides the string representation of the Cookie suitable for sending to the browser. Note that the actual header name will not be part of the string. This method makes no attempt to automatically double-quote strings that contain special characters, even though the RFC's dictate this. This is because doing so seems to confuse most browsers out there. """ result = ["%s=%s" % (self.name, self.value)] for name in self._valid_attr: if hasattr(self, name): if name in ("secure", "discard", "httponly"): result.append(name) else: result.append("%s=%s" % (name, getattr(self, name))) return "; ".join(result) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, str(self)) class SignedCookie(Cookie): """ This is a variation of Cookie that provides automatic cryptographic signing of cookies and verification. It uses the HMAC support in the Python standard library. This ensures that the cookie has not been tamprered with on the client side. Note that this class does not encrypt cookie data, thus it is still plainly visible as part of the cookie. """ def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw): dict = _parse_cookie(s, Class, **kw) del_list = [] for k in dict: c = dict[k] try: c.unsign(secret) except CookieError: if mismatch == Cookie.EXCEPTION: raise elif mismatch == Cookie.IGNORE: del_list.append(k) else: # downgrade to Cookie dict[k] = Cookie.parse(Cookie.__str__(c))[k] for k in del_list: del dict[k] return dict parse = classmethod(parse) def __init__(self, name, value, secret=None, **kw): Cookie.__init__(self, name, value, **kw) self.__data__["secret"] = secret def hexdigest(self, str): if not self.__data__["secret"]: raise CookieError, "Cannot sign without a secret" _hmac = hmac.new(self.__data__["secret"], self.name) _hmac.update(str) return _hmac.hexdigest() def __str__(self): result = ["%s=%s%s" % (self.name, self.hexdigest(self.value), self.value)] for name in self._valid_attr: if hasattr(self, name): if name in ("secure", "discard", "httponly"): result.append(name) else: result.append("%s=%s" % (name, getattr(self, name))) return "; ".join(result) def unsign(self, secret): sig, val = self.value[:32], self.value[32:] mac = hmac.new(secret, self.name) mac.update(val) if mac.hexdigest() == sig: self.value = val self.__data__["secret"] = secret else: raise CookieError, "Incorrectly Signed Cookie: %s=%s" % (self.name, self.value) class MarshalCookie(SignedCookie): """ This is a variation of SignedCookie that can store more than just strings. It will automatically marshal the cookie value, therefore any marshallable object can be used as value. The standard library Cookie module provides the ability to pickle data, which is a major security problem. It is believed that unmarshalling (as opposed to unpickling) is safe, yet we still err on the side of caution which is why this class is a subclass of SignedCooke making sure what we are about to unmarshal passes the digital signature test. Here is a link to a sugesstion that marshalling is safer than unpickling http://groups.google.com/groups?hl=en&lr=&ie=UTF-8&selm=7xn0hcugmy.fsf%40ruckus.brouhaha.com """ def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw): dict = _parse_cookie(s, Class, **kw) del_list = [] for k in dict: c = dict[k] try: c.unmarshal(secret) except CookieError: if mismatch == Cookie.EXCEPTION: raise elif mismatch == Cookie.IGNORE: del_list.append(k) else: # downgrade to Cookie dict[k] = Cookie.parse(Cookie.__str__(c))[k] for k in del_list: del dict[k] return dict parse = classmethod(parse) def __str__(self): m = base64.encodestring(marshal.dumps(self.value)) # on long cookies, the base64 encoding can contain multiple lines # separated by \n or \r\n m = ''.join(m.split()) result = ["%s=%s%s" % (self.name, self.hexdigest(m), m)] for name in self._valid_attr: if hasattr(self, name): if name in ("secure", "discard", "httponly"): result.append(name) else: result.append("%s=%s" % (name, getattr(self, name))) return "; ".join(result) def unmarshal(self, secret): self.unsign(secret) try: data = base64.decodestring(self.value) except: raise CookieError, "Cannot base64 Decode Cookie: %s=%s" % (self.name, self.value) try: self.value = marshal.loads(data) except (EOFError, ValueError, TypeError): raise CookieError, "Cannot Unmarshal Cookie: %s=%s" % (self.name, self.value) # This is a simplified and in some places corrected # (at least I think it is) pattern from standard lib Cookie.py _cookiePattern = re.compile( r"(?x)" # Verbose pattern r"[,\ ]*" # space/comma (RFC2616 4.2) before attr-val is eaten r"(?P<key>" # Start of group 'key' r"[^;\ =]+" # anything but ';', ' ' or '=' r")" # End of group 'key' r"\ *(=\ *)?" # a space, then may be "=", more space r"(?P<val>" # Start of group 'val' r'"(?:[^\\"]|\\.)*"' # a doublequoted string r"|" # or r"[^;]*" # any word or empty string r")" # End of group 'val' r"\s*;?" # probably ending in a semi-colon ) def _parse_cookie(str, Class, names=None): # XXX problem is we should allow duplicate # strings result = {} matchIter = _cookiePattern.finditer(str) for match in matchIter: key, val = match.group("key"), match.group("val") # We just ditch the cookies names which start with a dollar sign since # those are in fact RFC2965 cookies attributes. See bug [#MODPYTHON-3]. if key[0]!='$' and names is None or key in names: result[key] = Class(key, val) return result def add_cookie(req, cookie, value="", **kw): """ Sets a cookie in outgoing headers and adds a cache directive so that caches don't cache the cookie. """ # is this a cookie? if not isinstance(cookie, Cookie): # make a cookie cookie = Cookie(cookie, value, **kw) if not req.headers_out.has_key("Set-Cookie"): req.headers_out.add("Cache-Control", 'no-cache="set-cookie"') req.headers_out.add("Set-Cookie", str(cookie)) def get_cookies(req, Class=Cookie, **kw): """ A shorthand for retrieveing and parsing cookies given a Cookie class. The class must be one of the classes from this module. """ if not req.headers_in.has_key("cookie"): return {} cookies = req.headers_in["cookie"] if type(cookies) == type([]): cookies = '; '.join(cookies) return Class.parse(cookies, **kw) def get_cookie(req, name, Class=Cookie, **kw): cookies = get_cookies(req, Class, names=[name], **kw) if cookies.has_key(name): return cookies[name]
unknown
codeparrot/codeparrot-clean
from django.shortcuts import render from core.models import * from core.forms import * def index(requisicao): contexto={ "faculdade":"FACULDADES PAULISTAS UNIDAS", "facul":"FAPALUN", "pagina":"HomePage" } return render(requisicao,"index.html",contexto) def cadastro(requisicao): return render(requisicao,"cadastro.html") def contato(request): if request.POST: form = ContatoForm(request.POST) if form.is_valid(): form.envia_email() else: form = ContatoForm() contexto = { "form":form, "contato_activate": True } return render(request,"contato.html",contexto) def cursos(requisicao): contexto={ "faculdade":"FACULDADES PAULISTAS UNIDAS", "facul":"FAPALUN", "pagina":"Cursos" } return render(requisicao,"cursos.html",contexto) def login(requisicao): return render(requisicao,"login.html") def noticias(request): context = { "noticias_activate": True } return render(request, 'noticias.html', context) def novos_alunos(requisicao): return render(requisicao,"novos_alunos.html") def entrar(request): context = { "entrar_activate": True } return render(request, 'entrar.html', context) def erro(request): context = { "entrar_activate": True } return render(request, 'erro.html', context)
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Database\Eloquent; use OutOfBoundsException; class MissingAttributeException extends OutOfBoundsException { /** * Create a new missing attribute exception instance. * * @param \Illuminate\Database\Eloquent\Model $model * @param string $key */ public function __construct($model, $key) { parent::__construct(sprintf( 'The attribute [%s] either does not exist or was not retrieved for model [%s].', $key, get_class($model) )); } }
php
github
https://github.com/laravel/framework
src/Illuminate/Database/Eloquent/MissingAttributeException.php
//===-- ClangdXPC.cpp --------------------------------------------*- C++-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// Returns the bundle identifier of the Clangd XPC service. extern "C" const char *clangd_xpc_get_bundle_identifier() { return "org.llvm.clangd"; }
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clangd/xpc/framework/ClangdXPC.cpp
# Contributing Guidelines There are two main ways to contribute to the project &mdash; submitting issues and submitting fixes/changes/improvements via pull requests. ## Submitting issues Both bug reports and feature requests are welcome. Submit issues [here](https://github.com/Kotlin/kotlinx.coroutines/issues). Questions about usage and general inquiries are better suited for [StackOverflow](https://stackoverflow.com) or the `#coroutines` channel in [KotlinLang Slack](https://surveys.jetbrains.com/s3/kotlin-slack-sign-up). There, questions get answered much quicker than they do in this issue tracker, while also reducing the load on the maintainers. ### Commenting on issues Describing why you're interested in some specific issue helps us a lot with looking for the best solution. The more experiences, concerns, or suggestions get shared, the better! If you feel that something you know hasn't been said (or even if the issue discussion is too long to actually make sure of it), please do not hesitate to drop a line. If you feel like your case was already described and you have nothing to add, please still let us know about your interest by leaving a smiley (👍)! We use these as indicators of demand. Plese not that there is no need to leave a comment in this case, though. "+1"-style remarks or questions about the progress on an issue do not bring anything new and only create noise, complicating the job of finding insight in the discussion. Please avoid this. We reserve the right to delete such comments. ## Submitting PRs We love PRs. Submit PRs [here](https://github.com/Kotlin/kotlinx.coroutines/pulls). However, please keep in mind that maintainers will have to support the resulting code of the project, so do familiarize yourself with the following guidelines. * All development (both new features and bug fixes) is performed in the `develop` branch. * The `master` branch contains the sources of the most recently released version. * Base your PRs against the `develop` branch. * The `develop` branch is pushed to the `master` branch during release. * Documentation in markdown files can be updated directly in the `master` branch, unless the documentation is in the source code, and the patch changes line numbers. * If you fix documentation: * After fixing/changing code examples in the [`docs`](docs) folder or updating any references in the markdown files run the [Knit tool](#running-the-knit-tool) and commit the resulting changes as well. The tests will not pass otherwise. * If you plan extensive rewrites/additions to the docs, then please [contact the maintainers](#contacting-maintainers) to coordinate the work in advance. * If you make any code changes: * Follow the [Kotlin Coding Conventions](https://kotlinlang.org/docs/reference/coding-conventions.html). Use 4 spaces for indentation. Do not add extra newlines in function bodies: if you feel that blocks of code should be logically separated, then separate them with a comment instead. * [Build the project](#building) to make sure everything works and passes the tests. * If you fix a bug: * Note that what is a bug for some can be a feature for others. Are you truly fixing a problem? Is there an open issue about it? * Write the test that reproduces the bug. * Fixes without tests are accepted only in exceptional circumstances if it can be shown that writing the corresponding test is too hard or otherwise impractical. * Follow the style of writing tests that is used in this project: name test functions as `testXxx`. Don't use backticks in test names. * If you introduce any new public APIs: * Before setting out to work on a problem, comment on the existing issue or create one, proposing a solution and gathering feedback first before implementing it. PRs that add new API without the corresponding issue with positive feedback about the proposed implementation are very unlikely to be approved or reviewed. * All new APIs must come with documentation and tests. * All new APIs are initially released with the `@ExperimentalCoroutineApi` annotation and graduate later. * [Update the public API dumps](#updating-the-public-api-dump) and commit the resulting changes as well. It will not pass the tests otherwise. ## Building This library is built with Gradle. * Run `./gradlew build` to build, also running all of the tests. * Run `./gradlew <module>:check` to test the module you are working with to speed things up during development. * Run `./gradlew <module>:jvmTest` to perform only the fast JVM tests of a multiplatform module. * Run `./gradlew <module>:jvmTest -Pstress=true` to run both fast and slow JVM tests. ### Environment requirements JDK >= 11 referred to by the `JAVA_HOME` environment variable. ### Running the Knit tool * Use [Knit](https://github.com/Kotlin/kotlinx-knit/blob/main/README.md) for updates to documentation: * Run `./gradlew knit` to update the example files, links, tables of content. * Commit the updated documents and examples together with other changes. ### Updating the public API dump * Use the [Binary Compatibility Validator](https://github.com/Kotlin/binary-compatibility-validator/blob/master/README.md) for updates to public API: * Run `./gradlew apiDump` to update API index files. * Commit the updated API indexes together with other changes. ## Releases * The full release procedure checklist is [here](RELEASE.md). ## Contacting maintainers * If something cannot be done, not convenient, or does not work &mdash; submit an [issue](#submitting-issues). * "How to do something" questions &mdash; [StackOverflow](https://stackoverflow.com). * Discussions and general inquiries &mdash; use `#coroutines` channel in [KotlinLang Slack](https://kotl.in/slack).
unknown
github
https://github.com/Kotlin/kotlinx.coroutines
CONTRIBUTING.md
import sys from decimal import Decimal import numpy as np from numpy.core import * from numpy.random import rand, randint, randn from numpy.testing import * from numpy.core.multiarray import dot as dot_ class Vec: def __init__(self,sequence=None): if sequence is None: sequence=[] self.array=array(sequence) def __add__(self,other): out=Vec() out.array=self.array+other.array return out def __sub__(self,other): out=Vec() out.array=self.array-other.array return out def __mul__(self,other): # with scalar out=Vec(self.array.copy()) out.array*=other return out def __rmul__(self,other): return self*other class TestDot(TestCase): def setUp(self): self.A = rand(10,8) self.b1 = rand(8,1) self.b2 = rand(8) self.b3 = rand(1,8) self.b4 = rand(10) self.N = 14 def test_matmat(self): A = self.A c1 = dot(A.transpose(), A) c2 = dot_(A.transpose(), A) assert_almost_equal(c1, c2, decimal=self.N) def test_matvec(self): A, b1 = self.A, self.b1 c1 = dot(A, b1) c2 = dot_(A, b1) assert_almost_equal(c1, c2, decimal=self.N) def test_matvec2(self): A, b2 = self.A, self.b2 c1 = dot(A, b2) c2 = dot_(A, b2) assert_almost_equal(c1, c2, decimal=self.N) def test_vecmat(self): A, b4 = self.A, self.b4 c1 = dot(b4, A) c2 = dot_(b4, A) assert_almost_equal(c1, c2, decimal=self.N) def test_vecmat2(self): b3, A = self.b3, self.A c1 = dot(b3, A.transpose()) c2 = dot_(b3, A.transpose()) assert_almost_equal(c1, c2, decimal=self.N) def test_vecmat3(self): A, b4 = self.A, self.b4 c1 = dot(A.transpose(),b4) c2 = dot_(A.transpose(),b4) assert_almost_equal(c1, c2, decimal=self.N) def test_vecvecouter(self): b1, b3 = self.b1, self.b3 c1 = dot(b1, b3) c2 = dot_(b1, b3) assert_almost_equal(c1, c2, decimal=self.N) def test_vecvecinner(self): b1, b3 = self.b1, self.b3 c1 = dot(b3, b1) c2 = dot_(b3, b1) assert_almost_equal(c1, c2, decimal=self.N) def test_columnvect1(self): b1 = ones((3,1)) b2 = [5.3] c1 = dot(b1,b2) c2 = dot_(b1,b2) assert_almost_equal(c1, c2, decimal=self.N) def test_columnvect2(self): b1 = ones((3,1)).transpose() b2 = [6.2] c1 = dot(b2,b1) c2 = dot_(b2,b1) assert_almost_equal(c1, c2, decimal=self.N) def test_vecscalar(self): b1 = rand(1,1) b2 = rand(1,8) c1 = dot(b1,b2) c2 = dot_(b1,b2) assert_almost_equal(c1, c2, decimal=self.N) def test_vecscalar2(self): b1 = rand(8,1) b2 = rand(1,1) c1 = dot(b1,b2) c2 = dot_(b1,b2) assert_almost_equal(c1, c2, decimal=self.N) def test_all(self): dims = [(),(1,),(1,1)] for dim1 in dims: for dim2 in dims: arg1 = rand(*dim1) arg2 = rand(*dim2) c1 = dot(arg1, arg2) c2 = dot_(arg1, arg2) assert (c1.shape == c2.shape) assert_almost_equal(c1, c2, decimal=self.N) def test_vecobject(self): U_non_cont = transpose([[1.,1.],[1.,2.]]) U_cont = ascontiguousarray(U_non_cont) x = array([Vec([1.,0.]),Vec([0.,1.])]) zeros = array([Vec([0.,0.]),Vec([0.,0.])]) zeros_test = dot(U_cont,x) - dot(U_non_cont,x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) class TestResize(TestCase): def test_copies(self): A = array([[1,2],[3,4]]) Ar1 = array([[1,2,3,4],[1,2,3,4]]) assert_equal(resize(A, (2,4)), Ar1) Ar2 = array([[1,2],[3,4],[1,2],[3,4]]) assert_equal(resize(A, (4,2)), Ar2) Ar3 = array([[1,2,3],[4,1,2],[3,4,1],[2,3,4]]) assert_equal(resize(A, (4,3)), Ar3) def test_zeroresize(self): A = array([[1,2],[3,4]]) Ar = resize(A, (0,)) assert_equal(Ar, array([])) class TestNonarrayArgs(TestCase): # check that non-array arguments to functions wrap them in arrays def test_squeeze(self): A = [[[1,1,1],[2,2,2],[3,3,3]]] assert squeeze(A).shape == (3,3) def test_cumproduct(self): A = [[1,2,3],[4,5,6]] assert all(cumproduct(A) == array([1,2,6,24,120,720])) def test_size(self): A = [[1,2,3],[4,5,6]] assert size(A) == 6 assert size(A,0) == 2 assert size(A,1) == 3 def test_mean(self): A = [[1,2,3],[4,5,6]] assert mean(A) == 3.5 assert all(mean(A,0) == array([2.5,3.5,4.5])) assert all(mean(A,1) == array([2.,5.])) def test_std(self): A = [[1,2,3],[4,5,6]] assert_almost_equal(std(A), 1.707825127659933) assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5])) assert_almost_equal(std(A,1), array([0.81649658, 0.81649658])) def test_var(self): A = [[1,2,3],[4,5,6]] assert_almost_equal(var(A), 2.9166666666666665) assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25])) assert_almost_equal(var(A,1), array([0.66666667, 0.66666667])) class TestBoolScalar(TestCase): def test_logical(self): f = False_ t = True_ s = "xyz" self.assertTrue((t and s) is s) self.assertTrue((f and s) is f) def test_bitwise_or(self): f = False_ t = True_ self.assertTrue((t | t) is t) self.assertTrue((f | t) is t) self.assertTrue((t | f) is t) self.assertTrue((f | f) is f) def test_bitwise_and(self): f = False_ t = True_ self.assertTrue((t & t) is t) self.assertTrue((f & t) is f) self.assertTrue((t & f) is f) self.assertTrue((f & f) is f) def test_bitwise_xor(self): f = False_ t = True_ self.assertTrue((t ^ t) is f) self.assertTrue((f ^ t) is t) self.assertTrue((t ^ f) is t) self.assertTrue((f ^ f) is f) class TestSeterr(TestCase): def test_set(self): err = seterr() try: old = seterr(divide='warn') self.assertTrue(err == old) new = seterr() self.assertTrue(new['divide'] == 'warn') seterr(over='raise') self.assertTrue(geterr()['over'] == 'raise') self.assertTrue(new['divide'] == 'warn') seterr(**old) self.assertTrue(geterr() == old) finally: seterr(**err) def test_divide_err(self): err = seterr(divide='raise') try: try: array([1.]) / array([0.]) except FloatingPointError: pass else: self.fail() seterr(divide='ignore') array([1.]) / array([0.]) finally: seterr(**err) class TestFloatExceptions(TestCase): def assert_raises_fpe(self, fpeerr, flop, x, y): ftype = type(x) try: flop(x, y) assert_(False, "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) except FloatingPointError, exc: assert_(str(exc).find(fpeerr) >= 0, "Type %s raised wrong fpe error '%s'." % (ftype, exc)) def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): """Check that fpe exception is raised. Given a floating operation `flop` and two scalar values, check that the operation raises the floating point exception specified by `fpeerr`. Tests all variants with 0-d array scalars as well. """ self.assert_raises_fpe(fpeerr, flop, sc1, sc2); self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2); self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]); self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]); def test_floating_exceptions(self): """Test basic arithmetic function errors""" oldsettings = np.seterr(all='raise') try: # Test for all real and complex float types for typecode in np.typecodes['AllFloat']: ftype = np.obj2sctype(typecode) if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' divbyzero = 'divide by zero' else: # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions underflow = '' divbyzero = '' overflow = 'overflow' invalid = 'invalid' self.assert_raises_fpe(underflow, lambda a,b:a/b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, lambda a,b:a*b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, lambda a,b:a*b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, lambda a,b:a/b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, lambda a,b:a+b, ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, lambda a,b:a-b, -ft_max, ft_max*ft_eps) self.assert_raises_fpe(divbyzero, lambda a,b:a/b, ftype(1), ftype(0)) self.assert_raises_fpe(invalid, lambda a,b:a/b, ftype(0), ftype(0)) self.assert_raises_fpe(invalid, lambda a,b:a-b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, lambda a,b:a+b, ftype(np.inf), ftype(-np.inf)) self.assert_raises_fpe(invalid, lambda a,b:a*b, ftype(0), ftype(np.inf)) if sys.platform != 'cli': # Windows CRT library does not raise over/underflow exceptions on pow. self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp)) finally: np.seterr(**oldsettings) class TestFromiter(TestCase): def makegen(self): for x in xrange(24): yield x**2 def test_types(self): ai32 = fromiter(self.makegen(), int32) ai64 = fromiter(self.makegen(), int64) af = fromiter(self.makegen(), float) self.assertTrue(ai32.dtype == dtype(int32)) self.assertTrue(ai64.dtype == dtype(int64)) self.assertTrue(af.dtype == dtype(float)) def test_lengths(self): expected = array(list(self.makegen())) a = fromiter(self.makegen(), int) a20 = fromiter(self.makegen(), int, 20) self.assertTrue(len(a) == len(expected)) self.assertTrue(len(a20) == 20) try: fromiter(self.makegen(), int, len(expected) + 10) except ValueError: pass else: self.fail() def test_values(self): expected = array(list(self.makegen())) a = fromiter(self.makegen(), int) a20 = fromiter(self.makegen(), int, 20) self.assertTrue(alltrue(a == expected,axis=0)) self.assertTrue(alltrue(a20 == expected[:20],axis=0)) class TestIndex(TestCase): def test_boolean(self): a = rand(3,5,8) V = rand(5,8) g1 = randint(0,5,size=15) g2 = randint(0,8,size=15) V[g1,g2] = -V[g1,g2] assert (array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all() class TestBinaryRepr(TestCase): def test_zero(self): assert_equal(binary_repr(0),'0') def test_large(self): assert_equal(binary_repr(10736848),'101000111101010011010000') def test_negative(self): assert_equal(binary_repr(-1), '-1') assert_equal(binary_repr(-1, width=8), '11111111') class TestBaseRepr(TestCase): def test_base3(self): assert_equal(base_repr(3**5, 3), '100000') def test_positive(self): assert_equal(base_repr(12, 10), '12') assert_equal(base_repr(12, 10, 4), '000012') assert_equal(base_repr(12, 4), '30') assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW') def test_negative(self): assert_equal(base_repr(-12, 10), '-12') assert_equal(base_repr(-12, 10, 4), '-000012') assert_equal(base_repr(-12, 4), '-30') class TestArrayComparisons(TestCase): def test_array_equal(self): res = array_equal(array([1,2]), array([1,2])) assert res assert type(res) is bool res = array_equal(array([1,2]), array([1,2,3])) assert not res assert type(res) is bool res = array_equal(array([1,2]), array([3,4])) assert not res assert type(res) is bool res = array_equal(array([1,2]), array([1,3])) assert not res assert type(res) is bool def test_array_equiv(self): res = array_equiv(array([1,2]), array([1,2])) assert res assert type(res) is bool res = array_equiv(array([1,2]), array([1,2,3])) assert not res assert type(res) is bool res = array_equiv(array([1,2]), array([3,4])) assert not res assert type(res) is bool res = array_equiv(array([1,2]), array([1,3])) assert not res assert type(res) is bool res = array_equiv(array([1,1]), array([1])) assert res assert type(res) is bool res = array_equiv(array([1,1]), array([[1],[1]])) assert res assert type(res) is bool res = array_equiv(array([1,2]), array([2])) assert not res assert type(res) is bool res = array_equiv(array([1,2]), array([[1],[2]])) assert not res assert type(res) is bool res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]])) assert not res assert type(res) is bool def assert_array_strict_equal(x, y): assert_array_equal(x, y) # Check flags assert x.flags == y.flags # check endianness assert x.dtype.isnative == y.dtype.isnative class TestClip(TestCase): def setUp(self): self.nr = 5 self.nc = 3 def fastclip(self, a, m, M, out=None): if out is None: return a.clip(m,M) else: return a.clip(m,M,out) def clip(self, a, m, M, out=None): # use slow-clip selector = less(a, m)+2*greater(a, M) return selector.choose((a, m, M), out=out) # Handy functions def _generate_data(self, n, m): return randn(n, m) def _generate_data_complex(self, n, m): return randn(n, m) + 1.j *rand(n, m) def _generate_flt_data(self, n, m): return (randn(n, m)).astype(float32) def _neg_byteorder(self, a): a = asarray(a) if sys.byteorder == 'little': a = a.astype(a.dtype.newbyteorder('>')) else: a = a.astype(a.dtype.newbyteorder('<')) return a def _generate_non_native_data(self, n, m): data = randn(n, m) data = self._neg_byteorder(data) assert not data.dtype.isnative return data def _generate_int_data(self, n, m): return (10 * rand(n, m)).astype(int64) def _generate_int32_data(self, n, m): return (10 * rand(n, m)).astype(int32) # Now the real test cases def test_simple_double(self): """Test native double input with scalar min/max.""" a = self._generate_data(self.nr, self.nc) m = 0.1 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_simple_int(self): """Test native int input with scalar min/max.""" a = self._generate_int_data(self.nr, self.nc) a = a.astype(int) m = -2 M = 4 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_array_double(self): """Test native double input with array min/max.""" a = self._generate_data(self.nr, self.nc) m = zeros(a.shape) M = m + 0.5 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_simple_nonnative(self): """Test non native double input with scalar min/max. Test native double input with non native double scalar min/max.""" a = self._generate_non_native_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_equal(ac, act) "Test native double input with non native double scalar min/max." a = self._generate_data(self.nr, self.nc) m = -0.5 M = self._neg_byteorder(0.6) assert not M.dtype.isnative ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_equal(ac, act) def test_simple_complex(self): """Test native complex input with native double scalar min/max. Test native input with complex double scalar min/max. """ a = 3 * self._generate_data_complex(self.nr, self.nc) m = -0.5 M = 1. ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) "Test native input with complex double scalar min/max." a = 3 * self._generate_data(self.nr, self.nc) m = -0.5 + 1.j M = 1. + 2.j ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_clip_non_contig(self): """Test clip for non contiguous native input and native scalar min/max.""" a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert not a.flags['F_CONTIGUOUS'] assert not a.flags['C_CONTIGUOUS'] ac = self.fastclip(a, -1.6, 1.7) act = self.clip(a, -1.6, 1.7) assert_array_strict_equal(ac, act) def test_simple_out(self): """Test native double input with scalar min/max.""" a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = zeros(a.shape) act = zeros(a.shape) self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int32_inout(self): """Test native int32 input with double min/max and int32 out.""" a = self._generate_int32_data(self.nr, self.nc) m = float64(0) M = float64(2) ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int64_out(self): """Test native int32 input with int32 scalar min/max and int64 out.""" a = self._generate_int32_data(self.nr, self.nc) m = int32(-1) M = int32(1) ac = zeros(a.shape, dtype = int64) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int64_inout(self): """Test native in32 input with double array min/max and int32 out.""" a = self._generate_int32_data(self.nr, self.nc) m = zeros(a.shape, float64) M = float64(1) ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int32_out(self): """Test native double input with scalar min/max and int out.""" a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_inplace_01(self): """Test native double input with array min/max in-place.""" a = self._generate_data(self.nr, self.nc) ac = a.copy() m = zeros(a.shape) M = 1.0 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_simple_inplace_02(self): """Test native double input with scalar min/max in-place.""" a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_noncontig_inplace(self): """Test non contiguous double input with double scalar min/max in-place.""" a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert not a.flags['F_CONTIGUOUS'] assert not a.flags['C_CONTIGUOUS'] ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_equal(a, ac) def test_type_cast_01(self): "Test native double input with scalar min/max." a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_02(self): "Test native int32 input with int32 scalar min/max." a = self._generate_int_data(self.nr, self.nc) a = a.astype(int32) m = -2 M = 4 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_03(self): "Test native int32 input with float64 scalar min/max." a = self._generate_int32_data(self.nr, self.nc) m = -2 M = 4 ac = self.fastclip(a, float64(m), float64(M)) act = self.clip(a, float64(m), float64(M)) assert_array_strict_equal(ac, act) def test_type_cast_04(self): "Test native int32 input with float32 scalar min/max." a = self._generate_int32_data(self.nr, self.nc) m = float32(-2) M = float32(4) act = self.fastclip(a,m,M) ac = self.clip(a,m,M) assert_array_strict_equal(ac, act) def test_type_cast_05(self): "Test native int32 with double arrays min/max." a = self._generate_int_data(self.nr, self.nc) m = -0.5 M = 1. ac = self.fastclip(a, m * zeros(a.shape), M) act = self.clip(a, m * zeros(a.shape), M) assert_array_strict_equal(ac, act) def test_type_cast_06(self): "Test native with NON native scalar min/max." a = self._generate_data(self.nr, self.nc) m = 0.5 m_s = self._neg_byteorder(m) M = 1. act = self.clip(a, m_s, M) ac = self.fastclip(a, m_s, M) assert_array_strict_equal(ac, act) def test_type_cast_07(self): "Test NON native with native array min/max." a = self._generate_data(self.nr, self.nc) m = -0.5 * ones(a.shape) M = 1. a_s = self._neg_byteorder(a) assert not a_s.dtype.isnative act = a_s.clip(m, M) ac = self.fastclip(a_s, m, M) assert_array_strict_equal(ac, act) def test_type_cast_08(self): "Test NON native with native scalar min/max." a = self._generate_data(self.nr, self.nc) m = -0.5 M = 1. a_s = self._neg_byteorder(a) assert not a_s.dtype.isnative ac = self.fastclip(a_s, m , M) act = a_s.clip(m, M) assert_array_strict_equal(ac, act) def test_type_cast_09(self): "Test native with NON native array min/max." a = self._generate_data(self.nr, self.nc) m = -0.5 * ones(a.shape) M = 1. m_s = self._neg_byteorder(m) assert not m_s.dtype.isnative ac = self.fastclip(a, m_s , M) act = self.clip(a, m_s, M) assert_array_strict_equal(ac, act) def test_type_cast_10(self): """Test native int32 with float min/max and float out for output argument.""" a = self._generate_int_data(self.nr, self.nc) b = zeros(a.shape, dtype = float32) m = float32(-0.5) M = float32(1) act = self.clip(a, m, M, out = b) ac = self.fastclip(a, m , M, out = b) assert_array_strict_equal(ac, act) def test_type_cast_11(self): "Test non native with native scalar, min/max, out non native" a = self._generate_non_native_data(self.nr, self.nc) b = a.copy() b = b.astype(b.dtype.newbyteorder('>')) bt = b.copy() m = -0.5 M = 1. self.fastclip(a, m , M, out = b) self.clip(a, m, M, out = bt) assert_array_strict_equal(b, bt) def test_type_cast_12(self): "Test native int32 input and min/max and float out" a = self._generate_int_data(self.nr, self.nc) b = zeros(a.shape, dtype = float32) m = int32(0) M = int32(1) act = self.clip(a, m, M, out = b) ac = self.fastclip(a, m , M, out = b) assert_array_strict_equal(ac, act) def test_clip_with_out_simple(self): "Test native double input with scalar min/max" a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = zeros(a.shape) act = zeros(a.shape) self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_simple2(self): "Test native int32 input with double min/max and int32 out" a = self._generate_int32_data(self.nr, self.nc) m = float64(0) M = float64(2) ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_simple_int32(self): "Test native int32 input with int32 scalar min/max and int64 out" a = self._generate_int32_data(self.nr, self.nc) m = int32(-1) M = int32(1) ac = zeros(a.shape, dtype = int64) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_array_int32(self): "Test native int32 input with double array min/max and int32 out" a = self._generate_int32_data(self.nr, self.nc) m = zeros(a.shape, float64) M = float64(1) ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_array_outint32(self): "Test native double input with scalar min/max and int out" a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 ac = zeros(a.shape, dtype = int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_inplace_array(self): "Test native double input with array min/max" a = self._generate_data(self.nr, self.nc) ac = a.copy() m = zeros(a.shape) M = 1.0 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_clip_inplace_simple(self): "Test native double input with scalar min/max" a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_clip_func_takes_out(self): """ Ensure that the clip() function takes an out= argument. """ a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 a2 = clip(a, m, M, out=a) self.clip(a, m, M, ac) assert_array_strict_equal(a2, ac) self.assert_(a2 is a) class test_allclose_inf(TestCase): rtol = 1e-5 atol = 1e-8 def tst_allclose(self,x,y): assert allclose(x,y), "%s and %s not close" % (x,y) def tst_not_allclose(self,x,y): assert not allclose(x,y), "%s and %s shouldn't be close" % (x,y) def test_ip_allclose(self): """Parametric test factory.""" arr = array([100,1000]) aran = arange(125).reshape((5,5,5)) atol = self.atol rtol = self.rtol data = [([1,0], [1,0]), ([atol], [0]), ([1], [1+rtol+atol]), (arr, arr + arr*rtol), (arr, arr + arr*rtol + atol*2), (aran, aran + aran*rtol),] for (x,y) in data: yield (self.tst_allclose,x,y) def test_ip_not_allclose(self): """Parametric test factory.""" aran = arange(125).reshape((5,5,5)) atol = self.atol rtol = self.rtol data = [([inf,0], [1,inf]), ([inf,0], [1,0]), ([inf,inf], [1,inf]), ([inf,inf], [1,0]), ([-inf, 0], [inf, 0]), ([nan,0], [nan,0]), ([atol*2], [0]), ([1], [1+rtol+atol*2]), (aran, aran + aran*atol + atol*2), (array([inf,1]), array([0,inf]))] for (x,y) in data: yield (self.tst_not_allclose,x,y) def test_no_parameter_modification(self): x = array([inf,1]) y = array([0,inf]) allclose(x,y) assert_array_equal(x,array([inf,1])) assert_array_equal(y,array([0,inf])) class TestStdVar(TestCase): def setUp(self): self.A = array([1,-1,1,-1]) self.real_var = 1 def test_basic(self): assert_almost_equal(var(self.A),self.real_var) assert_almost_equal(std(self.A)**2,self.real_var) def test_ddof1(self): assert_almost_equal(var(self.A,ddof=1), self.real_var*len(self.A)/float(len(self.A)-1)) assert_almost_equal(std(self.A,ddof=1)**2, self.real_var*len(self.A)/float(len(self.A)-1)) def test_ddof2(self): assert_almost_equal(var(self.A,ddof=2), self.real_var*len(self.A)/float(len(self.A)-2)) assert_almost_equal(std(self.A,ddof=2)**2, self.real_var*len(self.A)/float(len(self.A)-2)) class TestStdVarComplex(TestCase): def test_basic(self): A = array([1,1.j,-1,-1.j]) real_var = 1 assert_almost_equal(var(A),real_var) assert_almost_equal(std(A)**2,real_var) class TestLikeFuncs(TestCase): '''Test zeros_like and empty_like''' def setUp(self): self.data = [(array([[1,2,3],[4,5,6]],dtype=int32), (2,3), int32), (array([[1,2,3],[4,5,6]],dtype=float32), (2,3), float32), ] def test_zeros_like(self): for d, dshape, dtype in self.data: dz = zeros_like(d) assert dz.shape == dshape assert dz.dtype.type == dtype assert all(abs(dz) == 0) def test_empty_like(self): for d, dshape, dtype in self.data: dz = zeros_like(d) assert dz.shape == dshape assert dz.dtype.type == dtype class _TestCorrelate(TestCase): def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) self.y = np.array([-1, -2, -3], dtype=dt) self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt) def test_float(self): self._setup(np.float) z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z1) z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z2) def test_object(self): self._setup(Decimal) z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z1) z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, self.z2) class TestCorrelate(_TestCorrelate): old_behavior = True def _setup(self, dt): # correlate uses an unconventional definition so that correlate(a, b) # == correlate(b, a), so force the corresponding outputs to be the same # as well _TestCorrelate._setup(self, dt) self.z2 = self.z1 @dec.deprecated() @dec.skipif(sys.platform == 'cli', "nose decorators don't work on IronPython yet") def test_complex(self): x = np.array([1, 2, 3, 4+1j], dtype=np.complex) y = np.array([-1, -2j, 3+1j], dtype=np.complex) r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex) z = np.correlate(x, y, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, r_z) @dec.deprecated() @dec.skipif(sys.platform == 'cli', "nose decorators don't work on IronPython yet") def test_float(self): _TestCorrelate.test_float(self) @dec.deprecated() @dec.skipif(sys.platform == 'cli', "nose decorators don't work on IronPython yet") def test_object(self): _TestCorrelate.test_object(self) class TestCorrelateNew(_TestCorrelate): old_behavior = False def test_complex(self): x = np.array([1, 2, 3, 4+1j], dtype=np.complex) y = np.array([-1, -2j, 3+1j], dtype=np.complex) r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) #z = np.acorrelate(x, y, 'full') #assert_array_almost_equal(z, r_z) r_z = r_z[::-1].conjugate() z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) assert_array_almost_equal(z, r_z) class TestArgwhere: def test_2D(self): x = np.arange(6).reshape((2, 3)) assert_array_equal(np.argwhere(x > 1), [[0, 2], [1, 0], [1, 1], [1, 2]]) def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) class TestStringFunction: def test_set_string_function(self): a = np.array([1]) np.set_string_function(lambda x: "FOO", repr=True) assert_equal(repr(a), "FOO") np.set_string_function(None, repr=True) assert_equal(repr(a), "array([1])") np.set_string_function(lambda x: "FOO", repr=False) assert_equal(str(a), "FOO") np.set_string_function(None, repr=False) assert_equal(str(a), "[1]") if __name__ == "__main__": run_module_suite()
unknown
codeparrot/codeparrot-clean
import os import matplotlib matplotlib.use('TkAgg') import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter from matplotlib.figure import SubplotParams from matplotlib.ticker import MaxNLocator from os.path import isfile import tempfile from functions.science import linspace __all__ = ['smithchart'] from math import floor, log10, pi import math import cmath inf = float('inf') class smithchart(object): def __init__(self, **kwargs): self.dpi = kwargs.get('dpi', 100) self._caption = kwargs.get('caption', '') self.figsize = kwargs.get('figsize', (8, 6)) self._xlabel = kwargs.get('xlabel', '') self._ylabel = kwargs.get('ylabel', '') self.fontsize = kwargs.get('fontsize', 19) self._labels = list() self._plots = list() self.filename = kwargs.get('filename', 'image.png') self.PY_GRAPH_DIR = os.environ.get('PY_GRAPH_DIR', '') self.draw_cadran() def xlabel(self, label): self._xlabel = label def ylabel(self, label): self._ylabel = label def caption(self, caption): self._caption = caption def draw_cadran(self): grain=500. # quart de cercles a S constant Teta=linspace(0.,pi/2,step=pi/grain/2.) S=[5., 2.,1.,0.5, 0.2,-0.2, -0.5,-1.,-2.,-5, 0.] for s in S: data=[] R=np.tan(Teta) for r in R: d=(r+1.)**2+s**2 x=((r*r-1.)+s*s)/d y=2*s/d pt = complex(x,y) if abs(pt)<1: data.append(pt) self.plot(np.array(data),color='grey', linestyle=':', linewidth=1) # trace de l'abaque # cercles a r constant Teta=linspace(-pi/2.,pi/2.,step=pi/grain/2.) S=np.tan(Teta) R=[0.1, .3,0.6, 1.,2., 3.,10., 0.] for r in R: data=[] for s in S: d=s**2+(r+1.)**2 x=(s**2+(r**2-1.))/d y=2.*(s/d) data.append(complex(x,y)) if r==0.: self.plot(np.array(data),color='black') else: self.plot(np.array(data),color='grey', linestyle=':', linewidth=1) # ticks s = 0.0 R=[0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20] for r in R: data=[] d=s**2+(r+1.)**2 x=(s**2+(r**2-1.))/d y=2.*(s/d) data.append(complex(x,y+0.01)) data.append(complex(x,y-0.01)) self.plot(np.array(data),color='black', linestyle='-', linewidth=1.5) # self.plot(np.array([complex(-1,0), complex(1,0)]),color='black', linestyle='-', linewidth=1.5) # S = [0.0, 0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20] S += [-0.1, -0.2, -0.3,-0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, -1.2, -1.4, -1.6,-1.8,-2.0, -3., -4.,-5., -10, -20] for s in S: data=[] r=0 d=(r+1.)**2+s**2 x=((r*r-1.)+s*s)/d y=2*s/d pt = complex(x,y) m, phi = cmath.polar(pt) pt = cmath.rect(m*1.03, phi) x, y = pt.real, pt.imag pt1 = cmath.rect(m-0.02, phi) pt2 = cmath.rect(m, phi) data = [pt1, pt2] self.plot(np.array(data),color='black', linestyle='-', linewidth=1.5) def annotate(self, plt): R=[0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20] for r in R: data=[] s = 0.0 d=s**2+(r+1.)**2 x=(s**2+(r**2-1.))/d y=2.*(s/d) data.append(complex(x,y+0.01)) data.append(complex(x,y-0.01)) plt.annotate(str(r), xy=(x, y+0.07), size=10, rotation=90, va="center", ha="center", ) # S = [0.0, 0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20] for s in S: data=[] r=0 d=(r+1.)**2+s**2 x=((r*r-1.)+s*s)/d y=2*s/d pt = complex(x,y) m, phi = cmath.polar(pt) m = m*1.04 pt = cmath.rect(m, phi) x, y = pt.real, pt.imag plt.annotate(str(s), xy=(x, y), size=10, va="center", ha="center", rotation=phi*180/pi-90 ) S = [-0.1, -0.2, -0.3,-0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, -1.2, -1.4, -1.6,-1.8,-2.0, -3., -4.,-5., -10, -20] for s in S: data=[] r=0 d=(r+1.)**2+s**2 x=((r*r-1.)+s*s)/d y=2*s/d pt = complex(x,y) m, phi = cmath.polar(pt) m = m*1.05 pt = cmath.rect(m, phi) x, y = pt.real, pt.imag plt.annotate(str(s), xy=(x, y), size=10, va="center", ha="center", rotation=phi*180/pi+90 ) def plot(self, c, **kwargs): if not 'linewidth' in kwargs: kwargs['linewidth'] = 2 if 'label' in kwargs: if kwargs['label'] in self._labels: del kwargs['label'] else: self._labels.append(kwargs['label']) self._plots.append([c.real, c.imag, kwargs]) def scatter(self, c, **kwargs): markeredgecolor = kwargs.pop('color', 'r') markersize = kwargs.pop('size', 6) properties = {'marker':'s', 'markersize':markersize, 'linewidth':0, 'markerfacecolor':'none', 'markeredgecolor':markeredgecolor, 'markeredgewidth':2 } properties.update(**kwargs) self.plot(c, **properties) def savefig(self, filename=None, dpi=100, force=True): if filename == None: filename = tempfile.mktemp(dir=self.PY_GRAPH_DIR, suffix='.png') self.filename = filename self.dpi = dpi # generation of the image plt.rc('font', family='sans-serif', size=self.fontsize) plt.rc('figure', figsize=(8,6)) plt.rc('figure', dpi=self.dpi) plt.rc('figure.subplot', left=0.00, bottom=0.00, right=1.0, top=1.0, wspace=0.001, hspace=0.1) plt.rc('lines', markersize=6) plt.rc('axes', labelsize=self.fontsize) plt.rc('axes', color_cycle=('red', 'blue', 'green', 'black', 'grey', 'yellow')) plt.rc('axes', grid=False) plt.rc('axes', linewidth=0) plt.rc('xtick.major', size=8) # major tick size in points plt.rc('xtick.minor', size=5) # minor tick size in points plt.rc('xtick.major', width=0) # major tick width in points plt.rc('xtick.minor', width=0) # minor tick width in points plt.rc('xtick.major', pad=4) # distance to major tick label in points plt.rc('xtick', color='k') # color of the tick labels plt.rc('xtick', labelsize=0) # fontsize of the tick labels plt.rc('xtick', direction='in') # direction: in, out, or inout plt.rc('ytick.major', size=1) # major tick size in points plt.rc('ytick.minor', size=1) # minor tick size in points plt.rc('ytick.major', width=0) # major tick width in points plt.rc('ytick.minor', width=0) # minor tick width in points plt.rc('ytick', labelsize=0) # fontsize of the tick labels self.fig = plt.figure() self.ax = self.fig.add_subplot(1,1,1) self.ax.set_xlim(-1.15,1.15) self.ax.set_ylim(-1.15,1.15) plt.axes().set_aspect('equal', 'datalim') plt.axis('off') self.ax.set_axis_off() ax_r = plt.gca() #for each axis or whichever axis you want you should legend = False for plti in self._plots: if len(plti)==3: (x, y, parameters) = plti plt.plot(x, y, **parameters) elif len(plti)==4: (x, y, linespec, parameters) = plti plt.plot(x, y, linespec, **parameters) if 'label' in parameters: legend = True if len(self._plots)>0 and (force or not(isfile(self.filename))): if legend: plt.legend(loc=0, prop={'size':self.fontsize}) # transparent legend leg = self.ax.legend(loc='best', fancybox=False) leg.get_frame().set_alpha(0.5) self.annotate(plt) plt.draw() plt.savefig(self.filename, dpi=self.dpi) plt.close(self.fig) return self.filename, self._caption if __name__ == '__main__': from numpy import array plot1 = smithchart(xlabel='s11') s = array([ complex(-0.577832859,-0.631478424), complex(-0.872221469,0.175553879), complex(-0.27989901,0.848322599), complex(0.625836677,0.630661307), complex(0.833655352,-0.25903236), complex(0.200238299,-0.876183465), complex(0.091123769,-0.706343188), complex(0.511222482,-0.249041717), complex(0.385652964,0.223033934), complex(-0.045832001,0.354777424), complex(-0.245491847,0.136919746), complex(-0.193731962,-0.091411262), complex(-0.151810832,0.097273845), complex(0.007344177,0.147523939), complex(0.107016177,0.034567346), complex(0.057517023,-0.062991385), complex(-0.029108675,-0.061496518), complex(0.002598262,-0.004237322) ]) plot1.plot(s, label='model') plot1.scatter(s, label='meas.') plot1.savefig('toto.jpg')
unknown
codeparrot/codeparrot-clean
import os from fabric.context_managers import settings from fabric.state import env from fabric.utils import indent, warn from refabric.api import run, info from refabric.context_managers import sudo, silent from .base import BaseProvider from ..project import * from ... import debian from ... import uwsgi from ...app import blueprint class UWSGIProvider(BaseProvider): def install(self): """ Install system wide uWSGI and upstart service. """ uwsgi.setup() def get_config_path(self): """ Get or create uWSGI project vassals home dir. :return: Remote config path """ destination = uwsgi.blueprint.get('emperor') if destination and '*' in destination: # Destination can not be wildcard based warn('uWsgi emperor vassals dir contains wildcard, skipping') destination = None if not destination: # Join config path and make sure that it ends with a slash destination = os.path.join(project_home(), 'uwsgi.d', '') with sudo('root'): # Ensure destination exists debian.mkdir(destination) return destination def get_context(self): """ Build jinja context for web.ini vassal. :return: context """ context = super(UWSGIProvider, self).get_context() # Memory optimized options cpu_count = blueprint.get('web.max_cores', debian.nproc()) total_memory = int(round(debian.total_memory() / 1024.0 / 1024.0 / 1024.0)) total_memory = blueprint.get('web.max_memory', default=total_memory) workers = blueprint.get('web.workers', default=uwsgi.get_worker_count(cpu_count)) gevent = blueprint.get('web.gevent', default=0) info('Generating uWSGI conf based on {} core(s), {} GB memory and {} worker(s)', cpu_count, total_memory, workers) # TODO: Handle different loop engines (gevent) context.update({ 'cpu_affinity': uwsgi.get_cpu_affinity(cpu_count, workers), 'workers': workers, 'max_requests': int(uwsgi.get_max_requests(total_memory)), 'reload_on_as': int(uwsgi.get_reload_on_as(total_memory)), 'reload_on_rss': int(uwsgi.get_reload_on_rss(total_memory)), 'limit_as': int(uwsgi.get_limit_as(total_memory)), 'gevent': gevent, }) # Override context defaults with blueprint settings context.update(blueprint.get('web')) return context def configure_web(self): """ Render and upload web.ini vassal to <project>.ini. :return: Updated vassals """ destination = self.get_config_path() context = self.get_context() ini = self.get_web_vassal() template = os.path.join('uwsgi', ini) default_templates = uwsgi.blueprint.get_default_template_root() with settings(template_dirs=[default_templates]): # Check if a specific web vassal have been created or use the default if template not in blueprint.get_template_loader().list_templates(): # Upload default web vassal info(indent('...using default web vassal')) template = os.path.join('uwsgi', 'default', 'web.ini') uploads = blueprint.upload(template, os.path.join(destination, ini), context=context) if uploads: self.updates.extend(uploads) # Upload remaining (local) vassals user_vassals = blueprint.upload('uwsgi/', destination, context=context) # TODO: skip subdirs if user_vassals: self.updates.extend(user_vassals) return self.updates def configure_worker(self): """ Render and upload worker vassal(s) to projects uWSGI home dir. :return: Updated vassals """ # TODO: destination could be global (uwsgi.emperor setting) and therefore contain same vassal names (celery.ini) destination = self.get_config_path() context = super(UWSGIProvider, self).get_context() context.update({ 'workers': blueprint.get('worker.workers', debian.nproc()), 'queues': blueprint.get('worker.queues'), }) # Override context defaults with blueprint settings context.update(blueprint.get('worker')) # Upload vassals for vassal in self.list_worker_vassals(): template = os.path.join('uwsgi', 'default', vassal) default_templates = uwsgi.blueprint.get_default_template_root() with settings(template_dirs=[default_templates]): uploads = blueprint.upload(template, destination, context=context) self.updates.extend(uploads) return self.updates def get_web_vassal(self): """ Return file name for web vassal :return: [project_name].ini """ # TODO: Maybe check if uwsgi actually is a web provider return '{}.ini'.format(self.project) def list_worker_vassals(self): """ List all valid worker vassals for current host :return: Set of vassal.ini file names """ vassals = set() if not blueprint.get('worker.provider') == 'uwsgi': return vassals vassals.add('celery.ini') # Filter vassal extensions by host extensions = blueprint.get('worker.extensions') if isinstance(extensions, list): # Filter of bad values extensions = [extension for extension in extensions if extension] for extension in extensions: vassals.add('{}.ini'.format(extension)) elif isinstance(extensions, dict): for extension, extension_host in extensions.items(): if extension_host in ('*', env.host_string): vassals.add('{}.ini'.format(extension)) return vassals def list_vassals(self): """ List all valid vassals for current host :return: Set of vassal.ini file names """ vassals = self.list_worker_vassals() vassals.add(self.get_web_vassal()) return vassals def reload(self, vassals=None): """ Touch reload specified vassals :param vassals: Vassals to reload """ for vassal_ini in vassals or self.list_vassals(): vassal_ini_path = os.path.join(self.get_config_path(), vassal_ini) uwsgi.reload(vassal_ini_path)
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import babel.dates import email.utils import jinja2 from pyramid.threadlocal import get_current_request @jinja2.contextfilter def format_date(ctx, *args, **kwargs): request = ctx.get("request") or get_current_request() kwargs.setdefault("locale", request.locale) return babel.dates.format_date(*args, **kwargs) @jinja2.contextfilter def format_datetime(ctx, *args, **kwargs): request = ctx.get("request") or get_current_request() kwargs.setdefault("locale", request.locale) return babel.dates.format_datetime(*args, **kwargs) @jinja2.contextfilter def format_rfc822_datetime(ctx, dt, *args, **kwargs): return email.utils.formatdate(dt.timestamp(), usegmt=True)
unknown
codeparrot/codeparrot-clean
import { __String, addEmitHelpers, addRange, AllDecorators, append, Bundle, canHaveDecorators, chainBundle, childIsDecorated, ClassDeclaration, ClassElement, ClassExpression, ClassLikeDeclaration, classOrConstructorParameterIsDecorated, ConstructorDeclaration, Debug, Decorator, elideNodes, EmitFlags, EmitHint, EnumMember, Expression, filter, flatMap, GetAccessorDeclaration, getAllDecoratorsOfClass, getAllDecoratorsOfClassElement, getEmitScriptTarget, getOriginalNodeId, groupBy, hasAccessorModifier, hasSyntacticModifier, Identifier, idText, isBindingName, isBlock, isCallToHelper, isClassElement, isClassStaticBlockDeclaration, isComputedPropertyName, isDecorator, isExportOrDefaultModifier, isExpression, isGeneratedIdentifier, isHeritageClause, isIdentifier, isModifier, isModifierLike, isParameter, isPrivateIdentifier, isPropertyDeclaration, isPropertyName, isSimpleInlineableExpression, isStatic, map, MethodDeclaration, ModifierFlags, moveRangePastModifiers, Node, NodeArray, NodeCheckFlags, NodeFlags, nodeOrChildIsDecorated, ParameterDeclaration, PropertyDeclaration, ScriptTarget, SetAccessorDeclaration, setCommentRange, setEmitFlags, setOriginalNode, setSourceMapRange, setTextRange, singleOrMany, some, SourceFile, Statement, SyntaxKind, TransformationContext, TransformFlags, visitEachChild, visitNode, visitNodes, VisitResult, } from "../_namespaces/ts.js"; /** @internal */ export function transformLegacyDecorators(context: TransformationContext): (x: SourceFile | Bundle) => SourceFile | Bundle { const { factory, getEmitHelperFactory: emitHelpers, hoistVariableDeclaration, } = context; const resolver = context.getEmitResolver(); const compilerOptions = context.getCompilerOptions(); const languageVersion = getEmitScriptTarget(compilerOptions); // Save the previous transformation hooks. const previousOnSubstituteNode = context.onSubstituteNode; // Set new transformation hooks. context.onSubstituteNode = onSubstituteNode; /** * A map that keeps track of aliases created for classes with decorators to avoid issues * with the double-binding behavior of classes. */ let classAliases: Identifier[]; return chainBundle(context, transformSourceFile); function transformSourceFile(node: SourceFile) { const visited = visitEachChild(node, visitor, context); addEmitHelpers(visited, context.readEmitHelpers()); return visited; } function modifierVisitor(node: Node): VisitResult<Node | undefined> { return isDecorator(node) ? undefined : node; } function visitor(node: Node): VisitResult<Node | undefined> { if (!(node.transformFlags & TransformFlags.ContainsDecorators)) { return node; } switch (node.kind) { case SyntaxKind.Decorator: // Decorators are elided. They will be emitted as part of `visitClassDeclaration`. return undefined; case SyntaxKind.ClassDeclaration: return visitClassDeclaration(node as ClassDeclaration); case SyntaxKind.ClassExpression: return visitClassExpression(node as ClassExpression); case SyntaxKind.Constructor: return visitConstructorDeclaration(node as ConstructorDeclaration); case SyntaxKind.MethodDeclaration: return visitMethodDeclaration(node as MethodDeclaration); case SyntaxKind.SetAccessor: return visitSetAccessorDeclaration(node as SetAccessorDeclaration); case SyntaxKind.GetAccessor: return visitGetAccessorDeclaration(node as GetAccessorDeclaration); case SyntaxKind.PropertyDeclaration: return visitPropertyDeclaration(node as PropertyDeclaration); case SyntaxKind.Parameter: return visitParameterDeclaration(node as ParameterDeclaration); default: return visitEachChild(node, visitor, context); } } function visitClassDeclaration(node: ClassDeclaration): VisitResult<Statement> { if (!(classOrConstructorParameterIsDecorated(/*useLegacyDecorators*/ true, node) || childIsDecorated(/*useLegacyDecorators*/ true, node))) { return visitEachChild(node, visitor, context); } const statements = classOrConstructorParameterIsDecorated(/*useLegacyDecorators*/ true, node) ? transformClassDeclarationWithClassDecorators(node, node.name) : transformClassDeclarationWithoutClassDecorators(node, node.name); return singleOrMany(statements); } function decoratorContainsPrivateIdentifierInExpression(decorator: Decorator) { return !!(decorator.transformFlags & TransformFlags.ContainsPrivateIdentifierInExpression); } function parameterDecoratorsContainPrivateIdentifierInExpression(parameterDecorators: readonly Decorator[] | undefined) { return some(parameterDecorators, decoratorContainsPrivateIdentifierInExpression); } function hasClassElementWithDecoratorContainingPrivateIdentifierInExpression(node: ClassDeclaration) { for (const member of node.members) { if (!canHaveDecorators(member)) continue; const allDecorators = getAllDecoratorsOfClassElement(member, node, /*useLegacyDecorators*/ true); if (some(allDecorators?.decorators, decoratorContainsPrivateIdentifierInExpression)) return true; if (some(allDecorators?.parameters, parameterDecoratorsContainPrivateIdentifierInExpression)) return true; } return false; } function transformDecoratorsOfClassElements(node: ClassDeclaration, members: NodeArray<ClassElement>) { let decorationStatements: Statement[] | undefined = []; addClassElementDecorationStatements(decorationStatements, node, /*isStatic*/ false); addClassElementDecorationStatements(decorationStatements, node, /*isStatic*/ true); if (hasClassElementWithDecoratorContainingPrivateIdentifierInExpression(node)) { members = setTextRange( factory.createNodeArray([ ...members, factory.createClassStaticBlockDeclaration( factory.createBlock(decorationStatements, /*multiLine*/ true), ), ]), members, ); decorationStatements = undefined; } return { decorationStatements, members }; } /** * Transforms a non-decorated class declaration. * * @param node A ClassDeclaration node. * @param name The name of the class. */ function transformClassDeclarationWithoutClassDecorators(node: ClassDeclaration, name: Identifier | undefined) { // ${modifiers} class ${name} ${heritageClauses} { // ${members} // } const modifiers = visitNodes(node.modifiers, modifierVisitor, isModifier); const heritageClauses = visitNodes(node.heritageClauses, visitor, isHeritageClause); let members = visitNodes(node.members, visitor, isClassElement); let decorationStatements: Statement[] | undefined = []; ({ members, decorationStatements } = transformDecoratorsOfClassElements(node, members)); const updated = factory.updateClassDeclaration( node, modifiers, name, /*typeParameters*/ undefined, heritageClauses, members, ); return addRange([updated], decorationStatements); } /** * Transforms a decorated class declaration and appends the resulting statements. If * the class requires an alias to avoid issues with double-binding, the alias is returned. */ function transformClassDeclarationWithClassDecorators(node: ClassDeclaration, name: Identifier | undefined) { // When we emit an ES6 class that has a class decorator, we must tailor the // emit to certain specific cases. // // In the simplest case, we emit the class declaration as a let declaration, and // evaluate decorators after the close of the class body: // // [Example 1] // --------------------------------------------------------------------- // TypeScript | Javascript // --------------------------------------------------------------------- // @dec | let C = class C { // class C { | } // } | C = __decorate([dec], C); // --------------------------------------------------------------------- // @dec | let C = class C { // export class C { | } // } | C = __decorate([dec], C); // | export { C }; // --------------------------------------------------------------------- // // If a class declaration contains a reference to itself *inside* of the class body, // this introduces two bindings to the class: One outside of the class body, and one // inside of the class body. If we apply decorators as in [Example 1] above, there // is the possibility that the decorator `dec` will return a new value for the // constructor, which would result in the binding inside of the class no longer // pointing to the same reference as the binding outside of the class. // // As a result, we must instead rewrite all references to the class *inside* of the // class body to instead point to a local temporary alias for the class: // // [Example 2] // --------------------------------------------------------------------- // TypeScript | Javascript // --------------------------------------------------------------------- // @dec | let C = C_1 = class C { // class C { | static x() { return C_1.y; } // static x() { return C.y; } | } // static y = 1; | C.y = 1; // } | C = C_1 = __decorate([dec], C); // | var C_1; // --------------------------------------------------------------------- // @dec | let C = class C { // export class C { | static x() { return C_1.y; } // static x() { return C.y; } | } // static y = 1; | C.y = 1; // } | C = C_1 = __decorate([dec], C); // | export { C }; // | var C_1; // --------------------------------------------------------------------- // // If a class declaration is the default export of a module, we instead emit // the export after the decorated declaration: // // [Example 3] // --------------------------------------------------------------------- // TypeScript | Javascript // --------------------------------------------------------------------- // @dec | let default_1 = class { // export default class { | } // } | default_1 = __decorate([dec], default_1); // | export default default_1; // --------------------------------------------------------------------- // @dec | let C = class C { // export default class C { | } // } | C = __decorate([dec], C); // | export default C; // --------------------------------------------------------------------- // // If the class declaration is the default export and a reference to itself // inside of the class body, we must emit both an alias for the class *and* // move the export after the declaration: // // [Example 4] // --------------------------------------------------------------------- // TypeScript | Javascript // --------------------------------------------------------------------- // @dec | let C = class C { // export default class C { | static x() { return C_1.y; } // static x() { return C.y; } | } // static y = 1; | C.y = 1; // } | C = C_1 = __decorate([dec], C); // | export default C; // | var C_1; // --------------------------------------------------------------------- // const isExport = hasSyntacticModifier(node, ModifierFlags.Export); const isDefault = hasSyntacticModifier(node, ModifierFlags.Default); const modifiers = visitNodes(node.modifiers, node => isExportOrDefaultModifier(node) || isDecorator(node) ? undefined : node, isModifierLike); const location = moveRangePastModifiers(node); const classAlias = getClassAliasIfNeeded(node); // When we transform to ES5/3 this will be moved inside an IIFE and should reference the name // without any block-scoped variable collision handling const declName = languageVersion < ScriptTarget.ES2015 ? factory.getInternalName(node, /*allowComments*/ false, /*allowSourceMaps*/ true) : factory.getLocalName(node, /*allowComments*/ false, /*allowSourceMaps*/ true); // ... = class ${name} ${heritageClauses} { // ${members} // } const heritageClauses = visitNodes(node.heritageClauses, visitor, isHeritageClause); let members = visitNodes(node.members, visitor, isClassElement); let decorationStatements: Statement[] | undefined = []; ({ members, decorationStatements } = transformDecoratorsOfClassElements(node, members)); // If we're emitting to ES2022 or later then we need to reassign the class alias before // static initializers are evaluated. const assignClassAliasInStaticBlock = languageVersion >= ScriptTarget.ES2022 && !!classAlias && some(members, member => isPropertyDeclaration(member) && hasSyntacticModifier(member, ModifierFlags.Static) || isClassStaticBlockDeclaration(member)); if (assignClassAliasInStaticBlock) { members = setTextRange( factory.createNodeArray([ factory.createClassStaticBlockDeclaration( factory.createBlock([ factory.createExpressionStatement( factory.createAssignment(classAlias, factory.createThis()), ), ]), ), ...members, ]), members, ); } const classExpression = factory.createClassExpression( modifiers, name && isGeneratedIdentifier(name) ? undefined : name, /*typeParameters*/ undefined, heritageClauses, members, ); setOriginalNode(classExpression, node); setTextRange(classExpression, location); // let ${name} = ${classExpression} where name is either declaredName if the class doesn't contain self-reference // or decoratedClassAlias if the class contain self-reference. const varInitializer = classAlias && !assignClassAliasInStaticBlock ? factory.createAssignment(classAlias, classExpression) : classExpression; const varDecl = factory.createVariableDeclaration(declName, /*exclamationToken*/ undefined, /*type*/ undefined, varInitializer); setOriginalNode(varDecl, node); const varDeclList = factory.createVariableDeclarationList([varDecl], NodeFlags.Let); const varStatement = factory.createVariableStatement(/*modifiers*/ undefined, varDeclList); setOriginalNode(varStatement, node); setTextRange(varStatement, location); setCommentRange(varStatement, node); const statements: Statement[] = [varStatement]; addRange(statements, decorationStatements); addConstructorDecorationStatement(statements, node); if (isExport) { if (isDefault) { const exportStatement = factory.createExportDefault(declName); statements.push(exportStatement); } else { const exportStatement = factory.createExternalModuleExport(factory.getDeclarationName(node)); statements.push(exportStatement); } } return statements; } function visitClassExpression(node: ClassExpression) { // Legacy decorators were not supported on class expressions return factory.updateClassExpression( node, visitNodes(node.modifiers, modifierVisitor, isModifier), node.name, /*typeParameters*/ undefined, visitNodes(node.heritageClauses, visitor, isHeritageClause), visitNodes(node.members, visitor, isClassElement), ); } function visitConstructorDeclaration(node: ConstructorDeclaration) { return factory.updateConstructorDeclaration( node, visitNodes(node.modifiers, modifierVisitor, isModifier), visitNodes(node.parameters, visitor, isParameter), visitNode(node.body, visitor, isBlock), ); } function finishClassElement(updated: ClassElement, original: ClassElement) { if (updated !== original) { // While we emit the source map for the node after skipping decorators and modifiers, // we need to emit the comments for the original range. setCommentRange(updated, original); setSourceMapRange(updated, moveRangePastModifiers(original)); } return updated; } function visitMethodDeclaration(node: MethodDeclaration) { return finishClassElement( factory.updateMethodDeclaration( node, visitNodes(node.modifiers, modifierVisitor, isModifier), node.asteriskToken, Debug.checkDefined(visitNode(node.name, visitor, isPropertyName)), /*questionToken*/ undefined, /*typeParameters*/ undefined, visitNodes(node.parameters, visitor, isParameter), /*type*/ undefined, visitNode(node.body, visitor, isBlock), ), node, ); } function visitGetAccessorDeclaration(node: GetAccessorDeclaration) { return finishClassElement( factory.updateGetAccessorDeclaration( node, visitNodes(node.modifiers, modifierVisitor, isModifier), Debug.checkDefined(visitNode(node.name, visitor, isPropertyName)), visitNodes(node.parameters, visitor, isParameter), /*type*/ undefined, visitNode(node.body, visitor, isBlock), ), node, ); } function visitSetAccessorDeclaration(node: SetAccessorDeclaration) { return finishClassElement( factory.updateSetAccessorDeclaration( node, visitNodes(node.modifiers, modifierVisitor, isModifier), Debug.checkDefined(visitNode(node.name, visitor, isPropertyName)), visitNodes(node.parameters, visitor, isParameter), visitNode(node.body, visitor, isBlock), ), node, ); } function visitPropertyDeclaration(node: PropertyDeclaration) { if (node.flags & NodeFlags.Ambient || hasSyntacticModifier(node, ModifierFlags.Ambient)) { return undefined; } return finishClassElement( factory.updatePropertyDeclaration( node, visitNodes(node.modifiers, modifierVisitor, isModifier), Debug.checkDefined(visitNode(node.name, visitor, isPropertyName)), /*questionOrExclamationToken*/ undefined, /*type*/ undefined, visitNode(node.initializer, visitor, isExpression), ), node, ); } function visitParameterDeclaration(node: ParameterDeclaration) { const updated = factory.updateParameterDeclaration( node, elideNodes(factory, node.modifiers), node.dotDotDotToken, Debug.checkDefined(visitNode(node.name, visitor, isBindingName)), /*questionToken*/ undefined, /*type*/ undefined, visitNode(node.initializer, visitor, isExpression), ); if (updated !== node) { // While we emit the source map for the node after skipping decorators and modifiers, // we need to emit the comments for the original range. setCommentRange(updated, node); setTextRange(updated, moveRangePastModifiers(node)); setSourceMapRange(updated, moveRangePastModifiers(node)); setEmitFlags(updated.name, EmitFlags.NoTrailingSourceMap); } return updated; } function isSyntheticMetadataDecorator(node: Decorator) { return isCallToHelper(node.expression, "___metadata" as __String); } /** * Transforms all of the decorators for a declaration into an array of expressions. * * @param allDecorators An object containing all of the decorators for the declaration. */ function transformAllDecoratorsOfDeclaration(allDecorators: AllDecorators | undefined) { if (!allDecorators) { return undefined; } // ensure that metadata decorators are last const { false: decorators, true: metadata } = groupBy(allDecorators.decorators, isSyntheticMetadataDecorator); const decoratorExpressions: Expression[] = []; addRange(decoratorExpressions, map(decorators, transformDecorator)); addRange(decoratorExpressions, flatMap(allDecorators.parameters, transformDecoratorsOfParameter)); addRange(decoratorExpressions, map(metadata, transformDecorator)); return decoratorExpressions; } /** * Generates statements used to apply decorators to either the static or instance members * of a class. * * @param node The class node. * @param isStatic A value indicating whether to generate statements for static or * instance members. */ function addClassElementDecorationStatements(statements: Statement[], node: ClassDeclaration, isStatic: boolean) { addRange(statements, map(generateClassElementDecorationExpressions(node, isStatic), expr => factory.createExpressionStatement(expr))); } /** * Determines whether a class member is either a static or an instance member of a class * that is decorated, or has parameters that are decorated. * * @param member The class member. */ function isDecoratedClassElement(member: ClassElement, isStaticElement: boolean, parent: ClassLikeDeclaration) { return nodeOrChildIsDecorated(/*useLegacyDecorators*/ true, member, parent) && isStaticElement === isStatic(member); } /** * Gets either the static or instance members of a class that are decorated, or have * parameters that are decorated. * * @param node The class containing the member. * @param isStatic A value indicating whether to retrieve static or instance members of * the class. */ function getDecoratedClassElements(node: ClassExpression | ClassDeclaration, isStatic: boolean): readonly ClassElement[] { return filter(node.members, m => isDecoratedClassElement(m, isStatic, node)); } /** * Generates expressions used to apply decorators to either the static or instance members * of a class. * * @param node The class node. * @param isStatic A value indicating whether to generate expressions for static or * instance members. */ function generateClassElementDecorationExpressions(node: ClassExpression | ClassDeclaration, isStatic: boolean) { const members = getDecoratedClassElements(node, isStatic); let expressions: Expression[] | undefined; for (const member of members) { expressions = append(expressions, generateClassElementDecorationExpression(node, member)); } return expressions; } /** * Generates an expression used to evaluate class element decorators at runtime. * * @param node The class node that contains the member. * @param member The class member. */ function generateClassElementDecorationExpression(node: ClassExpression | ClassDeclaration, member: ClassElement) { const allDecorators = getAllDecoratorsOfClassElement(member, node, /*useLegacyDecorators*/ true); const decoratorExpressions = transformAllDecoratorsOfDeclaration(allDecorators); if (!decoratorExpressions) { return undefined; } // Emit the call to __decorate. Given the following: // // class C { // @dec method(@dec2 x) {} // @dec get accessor() {} // @dec prop; // } // // The emit for a method is: // // __decorate([ // dec, // __param(0, dec2), // __metadata("design:type", Function), // __metadata("design:paramtypes", [Object]), // __metadata("design:returntype", void 0) // ], C.prototype, "method", null); // // The emit for an accessor is: // // __decorate([ // dec // ], C.prototype, "accessor", null); // // The emit for a property is: // // __decorate([ // dec // ], C.prototype, "prop"); // const prefix = getClassMemberPrefix(node, member); const memberName = getExpressionForPropertyName(member, /*generateNameForComputedPropertyName*/ !hasSyntacticModifier(member, ModifierFlags.Ambient)); const descriptor = isPropertyDeclaration(member) && !hasAccessorModifier(member) // We emit `void 0` here to indicate to `__decorate` that it can invoke `Object.defineProperty` directly, but that it // should not invoke `Object.getOwnPropertyDescriptor`. ? factory.createVoidZero() // We emit `null` here to indicate to `__decorate` that it can invoke `Object.getOwnPropertyDescriptor` directly. // We have this extra argument here so that we can inject an explicit property descriptor at a later date. : factory.createNull(); const helper = emitHelpers().createDecorateHelper( decoratorExpressions, prefix, memberName, descriptor, ); setEmitFlags(helper, EmitFlags.NoComments); setSourceMapRange(helper, moveRangePastModifiers(member)); return helper; } /** * Generates a __decorate helper call for a class constructor. * * @param node The class node. */ function addConstructorDecorationStatement(statements: Statement[], node: ClassDeclaration) { const expression = generateConstructorDecorationExpression(node); if (expression) { statements.push(setOriginalNode(factory.createExpressionStatement(expression), node)); } } /** * Generates a __decorate helper call for a class constructor. * * @param node The class node. */ function generateConstructorDecorationExpression(node: ClassExpression | ClassDeclaration) { const allDecorators = getAllDecoratorsOfClass(node, /*useLegacyDecorators*/ true); const decoratorExpressions = transformAllDecoratorsOfDeclaration(allDecorators); if (!decoratorExpressions) { return undefined; } const classAlias = classAliases && classAliases[getOriginalNodeId(node)]; // When we transform to ES5/3 this will be moved inside an IIFE and should reference the name // without any block-scoped variable collision handling const localName = languageVersion < ScriptTarget.ES2015 ? factory.getInternalName(node, /*allowComments*/ false, /*allowSourceMaps*/ true) : factory.getDeclarationName(node, /*allowComments*/ false, /*allowSourceMaps*/ true); const decorate = emitHelpers().createDecorateHelper(decoratorExpressions, localName); const expression = factory.createAssignment(localName, classAlias ? factory.createAssignment(classAlias, decorate) : decorate); setEmitFlags(expression, EmitFlags.NoComments); setSourceMapRange(expression, moveRangePastModifiers(node)); return expression; } /** * Transforms a decorator into an expression. * * @param decorator The decorator node. */ function transformDecorator(decorator: Decorator) { return Debug.checkDefined(visitNode(decorator.expression, visitor, isExpression)); } /** * Transforms the decorators of a parameter. * * @param decorators The decorators for the parameter at the provided offset. * @param parameterOffset The offset of the parameter. */ function transformDecoratorsOfParameter(decorators: readonly Decorator[] | undefined, parameterOffset: number) { let expressions: Expression[] | undefined; if (decorators) { expressions = []; for (const decorator of decorators) { const helper = emitHelpers().createParamHelper( transformDecorator(decorator), parameterOffset, ); setTextRange(helper, decorator.expression); setEmitFlags(helper, EmitFlags.NoComments); expressions.push(helper); } } return expressions; } /** * Gets an expression that represents a property name (for decorated properties or enums). * For a computed property, a name is generated for the node. * * @param member The member whose name should be converted into an expression. */ function getExpressionForPropertyName(member: ClassElement | EnumMember, generateNameForComputedPropertyName: boolean): Expression { const name = member.name!; if (isPrivateIdentifier(name)) { return factory.createIdentifier(""); } else if (isComputedPropertyName(name)) { return generateNameForComputedPropertyName && !isSimpleInlineableExpression(name.expression) ? factory.getGeneratedNameForNode(name) : name.expression; } else if (isIdentifier(name)) { return factory.createStringLiteral(idText(name)); } else { return factory.cloneNode(name); } } function enableSubstitutionForClassAliases() { if (!classAliases) { // We need to enable substitutions for identifiers. This allows us to // substitute class names inside of a class declaration. context.enableSubstitution(SyntaxKind.Identifier); // Keep track of class aliases. classAliases = []; } } /** * Gets a local alias for a class declaration if it is a decorated class with an internal * reference to the static side of the class. This is necessary to avoid issues with * double-binding semantics for the class name. */ function getClassAliasIfNeeded(node: ClassDeclaration) { if (resolver.hasNodeCheckFlag(node, NodeCheckFlags.ContainsConstructorReference)) { enableSubstitutionForClassAliases(); const classAlias = factory.createUniqueName(node.name && !isGeneratedIdentifier(node.name) ? idText(node.name) : "default"); classAliases[getOriginalNodeId(node)] = classAlias; hoistVariableDeclaration(classAlias); return classAlias; } } function getClassPrototype(node: ClassExpression | ClassDeclaration) { return factory.createPropertyAccessExpression(factory.getDeclarationName(node), "prototype"); } function getClassMemberPrefix(node: ClassExpression | ClassDeclaration, member: ClassElement) { return isStatic(member) ? factory.getDeclarationName(node) : getClassPrototype(node); } /** * Hooks node substitutions. * * @param hint A hint as to the intended usage of the node. * @param node The node to substitute. */ function onSubstituteNode(hint: EmitHint, node: Node) { node = previousOnSubstituteNode(hint, node); if (hint === EmitHint.Expression) { return substituteExpression(node as Expression); } return node; } function substituteExpression(node: Expression) { switch (node.kind) { case SyntaxKind.Identifier: return substituteExpressionIdentifier(node as Identifier); } return node; } function substituteExpressionIdentifier(node: Identifier): Expression { return trySubstituteClassAlias(node) ?? node; } function trySubstituteClassAlias(node: Identifier): Expression | undefined { if (classAliases) { if (resolver.hasNodeCheckFlag(node, NodeCheckFlags.ConstructorReference)) { // Due to the emit for class decorators, any reference to the class from inside of the class body // must instead be rewritten to point to a temporary variable to avoid issues with the double-bind // behavior of class names in ES6. // Also, when emitting statics for class expressions, we must substitute a class alias for // constructor references in static property initializers. const declaration = resolver.getReferencedValueDeclaration(node); if (declaration) { const classAlias = classAliases[declaration.id!]; // TODO: GH#18217 if (classAlias) { const clone = factory.cloneNode(classAlias); setSourceMapRange(clone, node); setCommentRange(clone, node); return clone; } } } } return undefined; } }
typescript
github
https://github.com/microsoft/TypeScript
src/compiler/transformers/legacyDecorators.ts
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package options import ( "fmt" "github.com/spf13/pflag" "k8s.io/kubernetes/cmd/kube-controller-manager/names" endpointslicemirroringconfig "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config" ) const ( mirroringMinConcurrentServiceEndpointSyncs = 1 mirroringMaxConcurrentServiceEndpointSyncs = 50 mirroringMinMaxEndpointsPerSubset = 1 mirroringMaxMaxEndpointsPerSubset = 1000 ) // EndpointSliceMirroringControllerOptions holds the // EndpointSliceMirroringController options. type EndpointSliceMirroringControllerOptions struct { *endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration } // AddFlags adds flags related to EndpointSliceMirroringController for // controller manager to the specified FlagSet. func (o *EndpointSliceMirroringControllerOptions) AddFlags(fs *pflag.FlagSet) { if o == nil { return } fs.Int32Var(&o.MirroringConcurrentServiceEndpointSyncs, "mirroring-concurrent-service-endpoint-syncs", o.MirroringConcurrentServiceEndpointSyncs, fmt.Sprintf("The number of service endpoint syncing operations that will be done concurrently by the %s. Larger number = faster endpoint slice updating, but more CPU (and network) load. Defaults to 5.", names.EndpointSliceMirroringController)) fs.Int32Var(&o.MirroringMaxEndpointsPerSubset, "mirroring-max-endpoints-per-subset", o.MirroringMaxEndpointsPerSubset, fmt.Sprintf("The maximum number of endpoints that will be added to an EndpointSlice by the %s. More endpoints per slice will result in less endpoint slices, but larger resources. Defaults to 100.", names.EndpointSliceMirroringController)) fs.DurationVar(&o.MirroringEndpointUpdatesBatchPeriod.Duration, "mirroring-endpointslice-updates-batch-period", o.MirroringEndpointUpdatesBatchPeriod.Duration, fmt.Sprintf("The length of EndpointSlice updates batching period for %s. Processing of EndpointSlice changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of EndpointSlice updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated", names.EndpointSliceMirroringController)) } // ApplyTo fills up EndpointSliceMirroringController config with options. func (o *EndpointSliceMirroringControllerOptions) ApplyTo(cfg *endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration) error { if o == nil { return nil } cfg.MirroringConcurrentServiceEndpointSyncs = o.MirroringConcurrentServiceEndpointSyncs cfg.MirroringMaxEndpointsPerSubset = o.MirroringMaxEndpointsPerSubset cfg.MirroringEndpointUpdatesBatchPeriod = o.MirroringEndpointUpdatesBatchPeriod return nil } // Validate checks validation of EndpointSliceMirroringControllerOptions. func (o *EndpointSliceMirroringControllerOptions) Validate() []error { if o == nil { return nil } errs := []error{} if o.MirroringConcurrentServiceEndpointSyncs < mirroringMinConcurrentServiceEndpointSyncs { errs = append(errs, fmt.Errorf("mirroring-concurrent-service-endpoint-syncs must not be less than %d, but got %d", mirroringMinConcurrentServiceEndpointSyncs, o.MirroringConcurrentServiceEndpointSyncs)) } else if o.MirroringConcurrentServiceEndpointSyncs > mirroringMaxConcurrentServiceEndpointSyncs { errs = append(errs, fmt.Errorf("mirroring-concurrent-service-endpoint-syncs must not be more than %d, but got %d", mirroringMaxConcurrentServiceEndpointSyncs, o.MirroringConcurrentServiceEndpointSyncs)) } if o.MirroringMaxEndpointsPerSubset < mirroringMinMaxEndpointsPerSubset { errs = append(errs, fmt.Errorf("mirroring-max-endpoints-per-subset must not be less than %d, but got %d", mirroringMinMaxEndpointsPerSubset, o.MirroringMaxEndpointsPerSubset)) } else if o.MirroringMaxEndpointsPerSubset > mirroringMaxMaxEndpointsPerSubset { errs = append(errs, fmt.Errorf("mirroring-max-endpoints-per-subset must not be more than %d, but got %d", mirroringMaxMaxEndpointsPerSubset, o.MirroringMaxEndpointsPerSubset)) } return errs }
go
github
https://github.com/kubernetes/kubernetes
cmd/kube-controller-manager/app/options/endpointslicemirroringcontroller.go
from __future__ import absolute_import from itertools import chain from bokeh.models.annotations import ( Legend, Arrow, BoxAnnotation, Span, LabelSet, Label, Title ) from bokeh.models import ColumnDataSource, ArrowHead from bokeh.core.enums import ( NamedColor as Color, LineJoin, LineCap, FontStyle, TextAlign ) FILL = ["fill_color", "fill_alpha"] LINE = ["line_color", "line_width", "line_alpha", "line_join", "line_cap", "line_dash", "line_dash_offset"] TEXT = ["text_font", "text_font_size", "text_font_style", "text_color", "text_alpha", "text_align", "text_baseline"] ANGLE = ["angle", "angle_units"] PROPS = ["name", "tags"] def prefix(prefix, props): return [prefix + p for p in props] def check_props(annotation, *props): expected = set(chain(PROPS, *props)) found = set(annotation.properties()) missing = expected.difference(found) extra = found.difference(expected) assert len(missing) == 0, "Properties missing: {0}".format(", ".join(sorted(missing))) assert len(extra) == 0, "Extra properties: {0}".format(", ".join(sorted(extra))) def check_fill(annotation, prefix="", fill_color='#ffffff', fill_alpha=1.0): assert getattr(annotation, prefix + "fill_color") == fill_color assert getattr(annotation, prefix + "fill_alpha") == fill_alpha def check_line(annotation, prefix="", line_color=Color.black, line_width=1.0, line_alpha=1.0): assert getattr(annotation, prefix + "line_color") == line_color assert getattr(annotation, prefix + "line_width") == line_width assert getattr(annotation, prefix + "line_alpha") == line_alpha assert getattr(annotation, prefix + "line_join") == LineJoin.miter assert getattr(annotation, prefix + "line_cap") == LineCap.butt assert getattr(annotation, prefix + "line_dash") == [] assert getattr(annotation, prefix + "line_dash_offset") == 0 def check_text(annotation, prefix="", font_size='12pt', baseline='bottom', font_style='normal'): assert getattr(annotation, prefix + "text_font") == "helvetica" assert getattr(annotation, prefix + "text_font_size") == {"value": font_size} assert getattr(annotation, prefix + "text_font_style") == font_style assert getattr(annotation, prefix + "text_color") == "#444444" assert getattr(annotation, prefix + "text_alpha") == 1.0 assert getattr(annotation, prefix + "text_align") == TextAlign.left assert getattr(annotation, prefix + "text_baseline") == baseline def test_Legend(): legend = Legend() assert legend.plot is None assert legend.location == 'top_right' assert legend.label_standoff == 15 assert legend.label_height == 20 assert legend.label_width == 50 assert legend.glyph_height == 20 assert legend.glyph_width == 20 assert legend.legend_padding == 10 assert legend.legend_spacing == 3 assert legend.legends == [] yield check_line, legend, "border_" yield check_text, legend, "label_", "10pt", "middle" yield check_fill, legend, "background_" yield (check_props, legend, [ "plot", "location", "orientation", "label_standoff", "label_height", "label_width", "glyph_height", "glyph_width", "legend_padding", "legend_spacing", "legends", "level"], prefix('label_', TEXT), prefix('border_', LINE), prefix('background_', FILL)) def test_Arrow(): arrow = Arrow() assert arrow.plot is None assert arrow.x_start is None assert arrow.y_start is None assert arrow.start_units == 'data' assert arrow.start is None assert arrow.x_end is None assert arrow.y_end is None assert arrow.end_units == 'data' assert isinstance(arrow.end, ArrowHead) assert isinstance(arrow.source, ColumnDataSource) assert arrow.source.data == {} assert arrow.x_range_name == "default" assert arrow.y_range_name == "default" yield check_line, arrow yield (check_props, arrow, [ "plot", "level", "x_start", "y_start", "start_units", "start", "x_end", "y_end", "end_units", "end", "source", "x_range_name", "y_range_name"], LINE) def test_BoxAnnotation(): box = BoxAnnotation() assert box.plot is None assert box.left == None assert box.left_units == 'data' assert box.right == None assert box.right_units == 'data' assert box.bottom == None assert box.bottom_units == 'data' assert box.top == None assert box.top_units == 'data' assert box.x_range_name == 'default' assert box.y_range_name == 'default' assert box.level == 'annotation' yield check_line, box, "", '#cccccc', 1, 0.3 yield check_fill, box, "", "#fff9ba", 0.4 yield (check_props, box, [ "render_mode", "plot", "left", "left_units", "right", "right_units", "bottom", "bottom_units", "top", "top_units", "x_range_name", "y_range_name", "level", ], LINE, FILL) def test_Label(): label = Label() assert label.plot is None assert label.level == 'annotation' assert label.x is None assert label.y is None assert label.x_units == 'data' assert label.y_units == 'data' assert label.text is None assert label.angle == 0 assert label.angle_units == 'rad' assert label.x_offset == 0 assert label.y_offset == 0 assert label.render_mode == 'canvas' assert label.x_range_name == 'default' assert label.y_range_name == 'default' yield check_text, label yield check_fill, label, "background_", None, 1.0 yield check_line, label, "border_", None, 1.0, 1.0 yield (check_props, label, [ "plot", "level", "x", "y", "x_units", "y_units", "text", "angle", "angle_units", "x_offset", "y_offset", "render_mode", "x_range_name", "y_range_name"], TEXT, prefix('border_', LINE), prefix('background_', FILL)) def test_LabelSet(): label_set = LabelSet() assert label_set.plot is None assert label_set.level == 'annotation' assert label_set.x is None assert label_set.y is None assert label_set.x_units == 'data' assert label_set.y_units == 'data' assert label_set.text == 'text' assert label_set.angle == 0 assert label_set.angle_units == 'rad' assert label_set.x_offset == 0 assert label_set.y_offset == 0 assert label_set.render_mode == 'canvas' assert label_set.x_range_name == 'default' assert label_set.y_range_name == 'default' assert isinstance(label_set.source, ColumnDataSource) assert label_set.source.data == {} yield check_text, label_set yield check_fill, label_set, "background_", None, 1.0 yield check_line, label_set, "border_", None, 1.0, 1.0 yield (check_props, label_set, [ "plot", "level", "x", "y", "x_units", "y_units", "text", "angle", "angle_units", "x_offset", "y_offset", "render_mode", "x_range_name", "y_range_name", "source"], TEXT, ANGLE, prefix('border_', LINE), prefix('background_', FILL)) def test_Span(): line = Span() assert line.plot is None assert line.location is None assert line.location_units == 'data' assert line.dimension == 'width' assert line.x_range_name == 'default' assert line.y_range_name == 'default' assert line.level == 'annotation' assert line.render_mode == 'canvas' yield check_line, line, "", 'black', 1.0 yield (check_props, line, [ "plot", "location", "location_units", "dimension", "x_range_name", "y_range_name", "level", "render_mode" ], LINE) def test_Title(): title = Title() assert title.plot is None assert title.level == 'annotation' assert title.text is None assert title.title_align == 'center' assert title.title_padding == 0 assert title.text_font == 'helvetica' assert title.text_font_size == {'value': '12pt'} assert title.text_font_style == 'normal' assert title.text_color == '#444444' assert title.text_alpha == 1.0 yield check_fill, title, "background_", None, 1.0 yield check_line, title, "border_", None, 1.0, 1.0 yield (check_props, title, [ "plot", "level", "text", "title_align", "title_padding", "text_font", "text_font_size", "text_font_style", "text_color", "text_alpha", "render_mode"], prefix('border_', LINE), prefix('background_', FILL))
unknown
codeparrot/codeparrot-clean
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import contextmanager import glance_store import mock from mock import patch import webob.exc from glance.api.v1 import upload_utils from glance.common import exception from glance.common import store_utils from glance.common import utils import glance.registry.client.v1.api as registry from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils class TestUploadUtils(base.StoreClearingUnitTest): def setUp(self): super(TestUploadUtils, self).setUp() self.config(verbose=True, debug=True) def tearDown(self): super(TestUploadUtils, self).tearDown() def test_initiate_delete(self): req = unit_test_utils.get_fake_request() location = {"url": "file://foo/bar", "metadata": {}, "status": "active"} id = unit_test_utils.UUID1 with patch.object(store_utils, "safe_delete_from_backend") as mock_store_utils: upload_utils.initiate_deletion(req, location, id) mock_store_utils.assert_called_once_with(req.context, id, location) def test_initiate_delete_with_delayed_delete(self): self.config(delayed_delete=True) req = unit_test_utils.get_fake_request() location = {"url": "file://foo/bar", "metadata": {}, "status": "active"} id = unit_test_utils.UUID1 with patch.object(store_utils, "schedule_delayed_delete_from_backend", return_value=True) as mock_store_utils: upload_utils.initiate_deletion(req, location, id) mock_store_utils.assert_called_once_with(req.context, id, location) def test_safe_kill(self): req = unit_test_utils.get_fake_request() id = unit_test_utils.UUID1 with patch.object(registry, "update_image_metadata") as mock_registry: upload_utils.safe_kill(req, id, 'saving') mock_registry.assert_called_once_with(req.context, id, {'status': 'killed'}, from_state='saving') def test_safe_kill_with_error(self): req = unit_test_utils.get_fake_request() id = unit_test_utils.UUID1 with patch.object(registry, "update_image_metadata", side_effect=Exception()) as mock_registry: upload_utils.safe_kill(req, id, 'saving') mock_registry.assert_called_once_with(req.context, id, {'status': 'killed'}, from_state='saving') @contextmanager def _get_store_and_notifier(self, image_size=10, ext_update_data=None, ret_checksum="checksum", exc_class=None): location = "file://foo/bar" checksum = "checksum" size = 10 update_data = {'checksum': checksum} if ext_update_data is not None: update_data.update(ext_update_data) image_meta = {'id': unit_test_utils.UUID1, 'size': image_size} image_data = "blah" store = mock.MagicMock() notifier = mock.MagicMock() if exc_class is not None: store.add.side_effect = exc_class else: store.add.return_value = (location, size, ret_checksum, {}) yield (location, checksum, image_meta, image_data, store, notifier, update_data) store.add.assert_called_once_with(image_meta['id'], mock.ANY, image_meta['size'], context=mock.ANY) def test_upload_data_to_store(self): # 'user_storage_quota' is not set def store_add(image_id, data, size, **kwargs): # Check if 'data' is instance of 'CooperativeReader' when # 'user_storage_quota' is disabled. self.assertIsInstance(data, utils.CooperativeReader) return location, 10, "checksum", {} req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}, exc_class=store_add) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: actual_meta, location_data = upload_utils.upload_data_to_store( req, image_meta, image_data, store, notifier) self.assertEqual(location, location_data['url']) self.assertEqual(image_meta.update(update_data), actual_meta) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') def test_upload_data_to_store_user_storage_quota_enabled(self): # Enable user_storage_quota self.config(user_storage_quota='100B') def store_add(image_id, data, size, **kwargs): # Check if 'data' is instance of 'LimitingReader' when # 'user_storage_quota' is enabled. self.assertIsInstance(data, utils.LimitingReader) return location, 10, "checksum", {} req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}, exc_class=store_add) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) # mock 'check_quota' mock_check_quota = patch('glance.api.common.check_quota', return_value=100) mock_check_quota.start() self.addCleanup(mock_check_quota.stop) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: actual_meta, location_data = upload_utils.upload_data_to_store( req, image_meta, image_data, store, notifier) self.assertEqual(location, location_data['url']) self.assertEqual(image_meta.update(update_data), actual_meta) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') # 'check_quota' is called two times check_quota_call_count = ( mock_check_quota.target.check_quota.call_count) self.assertEqual(2, check_quota_call_count) def test_upload_data_to_store_mismatch_size(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( image_size=11) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: self.assertRaises(webob.exc.HTTPBadRequest, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_with( req.context, image_meta['id'], {'status': 'killed'}, from_state='saving') def test_upload_data_to_store_mismatch_checksum(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ret_checksum='fake') as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, "update_image_metadata", return_value=ret) as mock_update_image_metadata: self.assertRaises(webob.exc.HTTPBadRequest, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_with( req.context, image_meta['id'], {'status': 'killed'}, from_state='saving') def _test_upload_data_to_store_exception(self, exc_class, expected_class): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( exc_class=exc_class) as (location, checksum, image_meta, image_data, store, notifier, update_data): with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: self.assertRaises(expected_class, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_safe_kill.assert_called_once_with( req, image_meta['id'], 'saving') def _test_upload_data_to_store_exception_with_notify(self, exc_class, expected_class, image_killed=True): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( exc_class=exc_class) as (location, checksum, image_meta, image_data, store, notifier, update_data): with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: self.assertRaises(expected_class, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) if image_killed: mock_safe_kill.assert_called_with(req, image_meta['id'], 'saving') def test_upload_data_to_store_raises_store_disabled(self): """Test StoreDisabled exception is raised while uploading data""" self._test_upload_data_to_store_exception_with_notify( glance_store.StoreAddDisabled, webob.exc.HTTPGone, image_killed=True) def test_upload_data_to_store_duplicate(self): """See note in glance.api.v1.upload_utils on why we don't want image to be deleted in this case. """ self._test_upload_data_to_store_exception_with_notify( exception.Duplicate, webob.exc.HTTPConflict, image_killed=False) def test_upload_data_to_store_forbidden(self): self._test_upload_data_to_store_exception_with_notify( exception.Forbidden, webob.exc.HTTPForbidden) def test_upload_data_to_store_storage_full(self): self._test_upload_data_to_store_exception_with_notify( glance_store.StorageFull, webob.exc.HTTPRequestEntityTooLarge) def test_upload_data_to_store_storage_write_denied(self): self._test_upload_data_to_store_exception_with_notify( glance_store.StorageWriteDenied, webob.exc.HTTPServiceUnavailable) def test_upload_data_to_store_size_limit_exceeded(self): self._test_upload_data_to_store_exception_with_notify( exception.ImageSizeLimitExceeded, webob.exc.HTTPRequestEntityTooLarge) def test_upload_data_to_store_http_error(self): self._test_upload_data_to_store_exception_with_notify( webob.exc.HTTPError, webob.exc.HTTPError) def test_upload_data_to_store_client_disconnect(self): self._test_upload_data_to_store_exception( ValueError, webob.exc.HTTPBadRequest) def test_upload_data_to_store_client_disconnect_ioerror(self): self._test_upload_data_to_store_exception( IOError, webob.exc.HTTPBadRequest) def test_upload_data_to_store_exception(self): self._test_upload_data_to_store_exception_with_notify( Exception, webob.exc.HTTPInternalServerError) def test_upload_data_to_store_not_found_after_upload(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}) as (location, checksum, image_meta, image_data, store, notifier, update_data): exc = exception.ImageNotFound with patch.object(registry, 'update_image_metadata', side_effect=exc) as mock_update_image_metadata: with patch.object(upload_utils, "initiate_deletion") as mock_initiate_del: with patch.object(upload_utils, "safe_kill") as mock_safe_kill: self.assertRaises(webob.exc.HTTPPreconditionFailed, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') mock_initiate_del.assert_called_once_with( req, {'url': location, 'status': 'active', 'metadata': {}}, image_meta['id']) mock_safe_kill.assert_called_once_with( req, image_meta['id'], 'saving')
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import tools from openerp.osv import fields,osv class hr_holidays_remaining_leaves_user(osv.osv): _name = "hr.holidays.remaining.leaves.user" _description = "Total holidays by type" _auto = False _columns = { 'name': fields.char('Employee'), 'no_of_leaves': fields.integer('Remaining leaves'), 'user_id': fields.many2one('res.users', 'User'), 'leave_type': fields.char('Leave Type'), } def init(self, cr): tools.drop_view_if_exists(cr, 'hr_holidays_remaining_leaves_user') cr.execute(""" CREATE or REPLACE view hr_holidays_remaining_leaves_user as ( SELECT min(hrs.id) as id, rr.name as name, sum(hrs.number_of_days) as no_of_leaves, rr.user_id as user_id, hhs.name as leave_type FROM hr_holidays as hrs, hr_employee as hre, resource_resource as rr,hr_holidays_status as hhs WHERE hrs.employee_id = hre.id and hre.resource_id = rr.id and hhs.id = hrs.holiday_status_id GROUP BY rr.name,rr.user_id,hhs.name ) """)
unknown
codeparrot/codeparrot-clean
'''Thread-safe version of Tkinter. Copyright (c) 2009, Allen B. Taylor This module is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser Public License for more details. You should have received a copy of the GNU Lesser Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Usage: import mtTkinter as Tkinter # Use "Tkinter." as usual. or from mtTkinter import * # Use Tkinter module definitions as usual. This module modifies the original Tkinter module in memory, making all functionality thread-safe. It does this by wrapping the Tk class' tk instance with an object that diverts calls through an event queue when the call is issued from a thread other than the thread in which the Tk instance was created. The events are processed in the creation thread via an 'after' event. The modified Tk class accepts two additional keyword parameters on its __init__ method: mtDebug: 0 = No debug output (default) 1 = Minimal debug output ... 9 = Full debug output mtCheckPeriod: Amount of time in milliseconds (default 100) between checks for out-of-thread events when things are otherwise idle. Decreasing this value can improve GUI responsiveness, but at the expense of consuming more CPU cycles. Note that, because it modifies the original Tkinter module (in memory), other modules that use Tkinter (e.g., Pmw) reap the benefits automagically as long as mtTkinter is imported at some point before extra threads are created. Author: Allen B. Taylor, a.b.taylor@gmail.com ''' from Tkinter import * import threading import Queue class _Tk(object): """ Wrapper for underlying attribute tk of class Tk. """ def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10): self._tk = tk # Create the incoming event queue. self._eventQueue = Queue.Queue(1) # Identify the thread from which this object is being created so we can # tell later whether an event is coming from another thread. self._creationThread = threading.currentThread() # Store remaining values. self._debug = mtDebug self._checkPeriod = mtCheckPeriod def __getattr__(self, name): # Divert attribute accesses to a wrapper around the underlying tk # object. return _TkAttr(self, getattr(self._tk, name)) class _TkAttr(object): """ Thread-safe callable attribute wrapper. """ def __init__(self, tk, attr): self._tk = tk self._attr = attr def __call__(self, *args, **kwargs): """ Thread-safe method invocation. Diverts out-of-thread calls through the event queue. Forwards all other method calls to the underlying tk object directly. """ # Check if we're in the creation thread. if threading.currentThread() == self._tk._creationThread: # We're in the creation thread; just call the event directly. if self._tk._debug >= 8 or \ self._tk._debug >= 3 and self._attr.__name__ == 'call' and \ len(args) >= 1 and args[0] == 'after': print 'Calling event directly:', \ self._attr.__name__, args, kwargs return self._attr(*args, **kwargs) else: # We're in a different thread than the creation thread; enqueue # the event, and then wait for the response. responseQueue = Queue.Queue(1) if self._tk._debug >= 1: print 'Marshalling event:', self._attr.__name__, args, kwargs self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue)) isException, response = responseQueue.get() # Handle the response, whether it's a normal return value or # an exception. if isException: exType, exValue, exTb = response raise exType, exValue, exTb else: return response # Define a hook for class Tk's __init__ method. def _Tk__init__(self, *args, **kwargs): # We support some new keyword arguments that the original __init__ method # doesn't expect, so separate those out before doing anything else. new_kwnames = ('mtCheckPeriod', 'mtDebug') new_kwargs = {} for name, value in kwargs.items(): if name in new_kwnames: new_kwargs[name] = value del kwargs[name] # Call the original __init__ method, creating the internal tk member. self.__original__init__mtTkinter(*args, **kwargs) # Replace the internal tk member with a wrapper that handles calls from # other threads. self.tk = _Tk(self.tk, **new_kwargs) # Set up the first event to check for out-of-thread events. self.after_idle(_CheckEvents, self) # Replace Tk's original __init__ with the hook. Tk.__original__init__mtTkinter = Tk.__init__ Tk.__init__ = _Tk__init__ def _CheckEvents(tk): "Event checker event." used = False try: # Process all enqueued events, then exit. while True: try: # Get an event request from the queue. method, args, kwargs, responseQueue = \ tk.tk._eventQueue.get_nowait() except: # No more events to process. break else: # Call the event with the given arguments, and then return # the result back to the caller via the response queue. used = True if tk.tk._debug >= 2: print 'Calling event from main thread:', \ method.__name__, args, kwargs try: responseQueue.put((False, method(*args, **kwargs))) except SystemExit, ex: raise SystemExit, ex except Exception, ex: # Calling the event caused an exception; return the # exception back to the caller so that it can be raised # in the caller's thread. from sys import exc_info exType, exValue, exTb = exc_info() responseQueue.put((True, (exType, exValue, exTb))) finally: # Schedule to check again. If we just processed an event, check # immediately; if we didn't, check later. if used: tk.after_idle(_CheckEvents, tk) else: tk.after(tk.tk._checkPeriod, _CheckEvents, tk) # Test thread entry point. def _testThread(root): text = "This is Tcl/Tk version %s" % TclVersion if TclVersion >= 8.1: try: text = text + unicode("\nThis should be a cedilla: \347", "iso-8859-1") except NameError: pass # no unicode support try: if root.globalgetvar('tcl_platform(threaded)'): text = text + "\nTcl is built with thread support" else: raise RuntimeError except: text = text + "\nTcl is NOT built with thread support" text = text + "\nmtTkinter works with or without Tcl thread support" label = Label(root, text=text) label.pack() button = Button(root, text="Click me!", command=lambda root=root: root.button.configure( text="[%s]" % root.button['text'])) button.pack() root.button = button quit = Button(root, text="QUIT", command=root.destroy) quit.pack() # The following three commands are needed so the window pops # up on top on Windows... root.iconify() root.update() root.deiconify() # Simulate button presses... button.invoke() root.after(1000, _pressOk, root, button) # Test button continuous press event. def _pressOk(root, button): button.invoke() try: root.after(1000, _pressOk, root, button) except: pass # Likely we're exiting # Test. Mostly borrowed from the Tkinter module, but the important bits moved # into a separate thread. if __name__ == '__main__': import threading root = Tk(mtDebug = 1) thread = threading.Thread(target = _testThread, args=(root,)) thread.start() root.mainloop() thread.join()
unknown
codeparrot/codeparrot-clean
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.exception import dns.rdata import dns.tokenizer class HINFO(dns.rdata.Rdata): """HINFO record @ivar cpu: the CPU type @type cpu: string @ivar os: the OS type @type os: string @see: RFC 1035""" __slots__ = ['cpu', 'os'] def __init__(self, rdclass, rdtype, cpu, os): super(HINFO, self).__init__(rdclass, rdtype) self.cpu = cpu self.os = os def to_text(self, origin=None, relativize=True, **kw): return '"%s" "%s"' % (dns.rdata._escapify(self.cpu), dns.rdata._escapify(self.os)) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): cpu = tok.get_string() os = tok.get_string() tok.get_eol() return cls(rdclass, rdtype, cpu, os) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): l = len(self.cpu) assert l < 256 byte = chr(l) file.write(byte) file.write(self.cpu) l = len(self.os) assert l < 256 byte = chr(l) file.write(byte) file.write(self.os) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): l = ord(wire[current]) current += 1 rdlen -= 1 if l > rdlen: raise dns.exception.FormError cpu = wire[current : current + l] current += l rdlen -= l l = ord(wire[current]) current += 1 rdlen -= 1 if l != rdlen: raise dns.exception.FormError os = wire[current : current + l] return cls(rdclass, rdtype, cpu, os) from_wire = classmethod(from_wire) def _cmp(self, other): v = cmp(self.cpu, other.cpu) if v == 0: v = cmp(self.os, other.os) return v
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import unittest from devil.android import device_errors from devil.android import md5sum from pylib import constants sys.path.append( os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pymock')) import mock # pylint: disable=import-error TEST_OUT_DIR = os.path.join('test', 'out', 'directory') HOST_MD5_EXECUTABLE = os.path.join(TEST_OUT_DIR, 'md5sum_bin_host') class Md5SumTest(unittest.TestCase): def setUp(self): self._patchers = [ mock.patch('pylib.constants.GetOutDirectory', new=mock.Mock(return_value=TEST_OUT_DIR)), mock.patch('os.path.exists', new=mock.Mock(return_value=True)), ] for p in self._patchers: p.start() def tearDown(self): for p in self._patchers: p.stop() def testCalculateHostMd5Sums_singlePath(self): test_path = '/test/host/file.dat' mock_get_cmd_output = mock.Mock( return_value='0123456789abcdeffedcba9876543210 /test/host/file.dat') with mock.patch('devil.utils.cmd_helper.GetCmdOutput', new=mock_get_cmd_output): out = md5sum.CalculateHostMd5Sums(test_path) self.assertEquals(1, len(out)) self.assertTrue('/test/host/file.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/test/host/file.dat']) mock_get_cmd_output.assert_called_once_with( [HOST_MD5_EXECUTABLE, '/test/host/file.dat']) def testCalculateHostMd5Sums_list(self): test_paths = ['/test/host/file0.dat', '/test/host/file1.dat'] mock_get_cmd_output = mock.Mock( return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n' '123456789abcdef00fedcba987654321 /test/host/file1.dat\n') with mock.patch('devil.utils.cmd_helper.GetCmdOutput', new=mock_get_cmd_output): out = md5sum.CalculateHostMd5Sums(test_paths) self.assertEquals(2, len(out)) self.assertTrue('/test/host/file0.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/test/host/file0.dat']) self.assertTrue('/test/host/file1.dat' in out) self.assertEquals('123456789abcdef00fedcba987654321', out['/test/host/file1.dat']) mock_get_cmd_output.assert_called_once_with( [HOST_MD5_EXECUTABLE, '/test/host/file0.dat', '/test/host/file1.dat']) def testCalculateHostMd5Sums_generator(self): test_paths = ('/test/host/' + p for p in ['file0.dat', 'file1.dat']) mock_get_cmd_output = mock.Mock( return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n' '123456789abcdef00fedcba987654321 /test/host/file1.dat\n') with mock.patch('devil.utils.cmd_helper.GetCmdOutput', new=mock_get_cmd_output): out = md5sum.CalculateHostMd5Sums(test_paths) self.assertEquals(2, len(out)) self.assertTrue('/test/host/file0.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/test/host/file0.dat']) self.assertTrue('/test/host/file1.dat' in out) self.assertEquals('123456789abcdef00fedcba987654321', out['/test/host/file1.dat']) mock_get_cmd_output.assert_called_once_with( [HOST_MD5_EXECUTABLE, '/test/host/file0.dat', '/test/host/file1.dat']) def testCalculateDeviceMd5Sums_noPaths(self): device = mock.NonCallableMock() device.RunShellCommand = mock.Mock(side_effect=Exception()) out = md5sum.CalculateDeviceMd5Sums([], device) self.assertEquals(0, len(out)) def testCalculateDeviceMd5Sums_singlePath(self): test_path = '/storage/emulated/legacy/test/file.dat' device = mock.NonCallableMock() device_md5sum_output = [ '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file.dat', ] device.RunShellCommand = mock.Mock(return_value=device_md5sum_output) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(1, len(out)) self.assertTrue('/storage/emulated/legacy/test/file.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file.dat']) self.assertEquals(1, len(device.RunShellCommand.call_args_list)) def testCalculateDeviceMd5Sums_list(self): test_path = ['/storage/emulated/legacy/test/file0.dat', '/storage/emulated/legacy/test/file1.dat'] device = mock.NonCallableMock() device_md5sum_output = [ '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file0.dat', '123456789abcdef00fedcba987654321 ' '/storage/emulated/legacy/test/file1.dat', ] device.RunShellCommand = mock.Mock(return_value=device_md5sum_output) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(2, len(out)) self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file0.dat']) self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out) self.assertEquals('123456789abcdef00fedcba987654321', out['/storage/emulated/legacy/test/file1.dat']) self.assertEquals(1, len(device.RunShellCommand.call_args_list)) def testCalculateDeviceMd5Sums_generator(self): test_path = ('/storage/emulated/legacy/test/file%d.dat' % n for n in xrange(0, 2)) device = mock.NonCallableMock() device_md5sum_output = [ '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file0.dat', '123456789abcdef00fedcba987654321 ' '/storage/emulated/legacy/test/file1.dat', ] device.RunShellCommand = mock.Mock(return_value=device_md5sum_output) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(2, len(out)) self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file0.dat']) self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out) self.assertEquals('123456789abcdef00fedcba987654321', out['/storage/emulated/legacy/test/file1.dat']) self.assertEquals(1, len(device.RunShellCommand.call_args_list)) def testCalculateDeviceMd5Sums_singlePath_linkerWarning(self): # See crbug/479966 test_path = '/storage/emulated/legacy/test/file.dat' device = mock.NonCallableMock() device_md5sum_output = [ 'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: ' 'unused DT entry: type 0x1d arg 0x15db', 'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text', '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file.dat', ] device.RunShellCommand = mock.Mock(return_value=device_md5sum_output) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(1, len(out)) self.assertTrue('/storage/emulated/legacy/test/file.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file.dat']) self.assertEquals(1, len(device.RunShellCommand.call_args_list)) def testCalculateDeviceMd5Sums_list_fileMissing(self): test_path = ['/storage/emulated/legacy/test/file0.dat', '/storage/emulated/legacy/test/file1.dat'] device = mock.NonCallableMock() device_md5sum_output = [ '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file0.dat', '[0819/203513:ERROR:md5sum.cc(25)] Could not open file asdf', '', ] device.RunShellCommand = mock.Mock(return_value=device_md5sum_output) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(1, len(out)) self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file0.dat']) self.assertEquals(1, len(device.RunShellCommand.call_args_list)) def testCalculateDeviceMd5Sums_requiresBinary(self): test_path = '/storage/emulated/legacy/test/file.dat' device = mock.NonCallableMock() device.adb = mock.NonCallableMock() device.adb.Push = mock.Mock() device_md5sum_output = [ 'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: ' 'unused DT entry: type 0x1d arg 0x15db', 'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text', '0123456789abcdeffedcba9876543210 ' '/storage/emulated/legacy/test/file.dat', ] error = device_errors.AdbShellCommandFailedError('cmd', 'out', 2) device.RunShellCommand = mock.Mock( side_effect=(error, '', device_md5sum_output)) with mock.patch('os.path.getsize', return_value=1337): out = md5sum.CalculateDeviceMd5Sums(test_path, device) self.assertEquals(1, len(out)) self.assertTrue('/storage/emulated/legacy/test/file.dat' in out) self.assertEquals('0123456789abcdeffedcba9876543210', out['/storage/emulated/legacy/test/file.dat']) self.assertEquals(3, len(device.RunShellCommand.call_args_list)) device.adb.Push.assert_called_once_with( 'test/out/directory/md5sum_dist', '/data/local/tmp/md5sum') if __name__ == '__main__': unittest.main(verbosity=2)
unknown
codeparrot/codeparrot-clean
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.cells import opts as cells_opts from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception from nova.i18n import _LE from nova.objects import base from nova.objects import fields from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Converted network_info to store the model. # Version 1.2: Added new() and update_cells kwarg to save(). # Version 1.3: Added delete() # Version 1.4: String attributes updated to support unicode # Version 1.5: Actually set the deleted, created_at, updated_at, and # deleted_at attributes VERSION = '1.5' fields = { 'instance_uuid': fields.UUIDField(), 'network_info': fields.Field(fields.NetworkModel(), nullable=True), } @staticmethod def _from_db_object(context, info_cache, db_obj): for field in info_cache.fields: info_cache[field] = db_obj[field] info_cache.obj_reset_changes() info_cache._context = context return info_cache @classmethod def new(cls, context, instance_uuid): """Create an InfoCache object that can be used to create the DB entry for the first time. When save()ing this object, the info_cache_update() DB call will properly handle creating it if it doesn't exist already. """ info_cache = cls() info_cache.instance_uuid = instance_uuid info_cache.network_info = None info_cache._context = context # Leave the fields dirty return info_cache @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_obj = db.instance_info_cache_get(context, instance_uuid) if not db_obj: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) return cls._from_db_object(context, cls(context), db_obj) @staticmethod def _info_cache_cells_update(ctxt, info_cache): cell_type = cells_opts.get_cell_type() if cell_type != 'compute': return cells_api = cells_rpcapi.CellsAPI() try: cells_api.instance_info_cache_update_at_top(ctxt, info_cache) except Exception: LOG.exception(_LE("Failed to notify cells of instance info " "cache update")) @base.remotable def save(self, context, update_cells=True): if 'network_info' in self.obj_what_changed(): nw_info_json = self.fields['network_info'].to_primitive( self, 'network_info', self.network_info) rv = db.instance_info_cache_update(context, self.instance_uuid, {'network_info': nw_info_json}) if update_cells and rv: self._info_cache_cells_update(context, rv) self.obj_reset_changes() @base.remotable def delete(self, context): db.instance_info_cache_delete(context, self.instance_uuid) @base.remotable def refresh(self, context): current = self.__class__.get_by_instance_uuid(context, self.instance_uuid) current._context = None for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes()
unknown
codeparrot/codeparrot-clean
import os from datetime import datetime from api.bluemix_vision_recognition import VisionRecognizer from api.echonest import Echonest def read_file(path): lines = [] with open(path, "r", encoding="utf-8") as f: lines = f.readlines() lines = [ln.strip(os.linesep) for ln in lines] return lines def write_file(path, rows, separator="\t"): with open(path, "wb") as outfile: for row in rows: line = "" if isinstance(row, list) or isinstance(row, tuple): line = separator.join(row) + os.linesep else: line = row + os.linesep outfile.write(line.encode("utf-8")) def rekognize(image_url): vr = VisionRecognizer() result = vr.recognize(image_url) tag_and_score = {} if "images" in result: labels = result["images"][0]["labels"] for lb in labels: tag_and_score[lb["label_name"]] = lb["label_score"] return tag_and_score def rekognize_all(line): tags = [] data = [] for i, ln in enumerate(line): mood = ln[0] url = ln[1] try: r = rekognize(url) for k in r: if k not in tags: tags.append(k) data.append((mood, url, r)) print("{0} success.".format(i)) except Exception as ex: print("{0}: {1}".format(i, ex)) tags.sort() header = ["mood", "url"] + tags lines = [header] for d in data: mood, url, tag_scores = d mood_index = -1 if mood not in Echonest.MOOD else Echonest.MOOD.index(mood) ln = [str(mood_index), url] for t in tags: if t in tag_scores: ln.append(str(tag_scores[t])) else: ln.append(str(0)) lines.append(ln) timestamp = datetime.now().strftime("%Y%m%d%H%M%S") path = os.path.join(os.path.dirname(__file__), "../data/photo_to_mood_{0}.txt".format(timestamp)) write_file(path, lines) if __name__ == "__main__": import sys if len(sys.argv) > 1: path = os.path.join(os.path.dirname(__file__), sys.argv[1]) input_file = read_file(path) inputs = [] for ln in input_file: inputs.append(ln.split("\t")[:2]) rekognize_all(inputs) else: print("you have to set mood/image_url file.")
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true # :markup: markdown module TestUnit module Generators class InstallGenerator < ::Rails::Generators::Base source_root File.expand_path("templates", __dir__) def create_test_files template "fixtures.yml", "test/fixtures/action_text/rich_texts.yml" end end end end
ruby
github
https://github.com/rails/rails
actiontext/lib/rails/generators/test_unit/install_generator.rb
{ "html": { "type": "Fragment", "start": 45, "end": 56, "children": [ { "type": "Text", "start": 43, "end": 45, "raw": "\n\n", "data": "\n\n" }, { "type": "Element", "start": 45, "end": 56, "name": "div", "attributes": [], "children": [] } ] }, "instance": { "type": "Script", "start": 0, "end": 43, "context": "default", "content": { "type": "Program", "start": 8, "end": 34, "loc": { "start": { "line": 1, "column": 0 }, "end": { "line": 3, "column": 9 } }, "body": [], "sourceType": "module", "trailingComments": [ { "type": "Line", "value": " TODO write some code", "start": 10, "end": 33 } ] } } }
json
github
https://github.com/sveltejs/svelte
packages/svelte/tests/parser-legacy/samples/script-comment-only/output.json
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for session_bundle.gc.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.session_bundle import gc from tensorflow.python.framework import test_util from tensorflow.python.platform import gfile from tensorflow.python.platform import test class GcTest(test_util.TensorFlowTestCase): def testLargestExportVersions(self): paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)] newest = gc.largest_export_versions(2) n = newest(paths) self.assertEquals(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)]) def testLargestExportVersionsDoesNotDeleteZeroFolder(self): paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)] newest = gc.largest_export_versions(2) n = newest(paths) self.assertEquals(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)]) def testModExportVersion(self): paths = [ gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 9) ] mod = gc.mod_export_version(2) self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)]) mod = gc.mod_export_version(3) self.assertEquals(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)]) def testOneOfEveryNExportVersions(self): paths = [ gc.Path("/foo", 0), gc.Path("/foo", 1), gc.Path("/foo", 3), gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 7), gc.Path("/foo", 8), gc.Path("/foo", 33) ] one_of = gc.one_of_every_n_export_versions(3) self.assertEquals( one_of(paths), [ gc.Path("/foo", 3), gc.Path("/foo", 6), gc.Path("/foo", 8), gc.Path("/foo", 33) ]) def testOneOfEveryNExportVersionsZero(self): # Zero is a special case since it gets rolled into the first interval. # Test that here. paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)] one_of = gc.one_of_every_n_export_versions(3) self.assertEquals(one_of(paths), [gc.Path("/foo", 0), gc.Path("/foo", 5)]) def testUnion(self): paths = [] for i in xrange(10): paths.append(gc.Path("/foo", i)) f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3)) self.assertEquals( f(paths), [ gc.Path("/foo", 0), gc.Path("/foo", 3), gc.Path("/foo", 6), gc.Path("/foo", 7), gc.Path("/foo", 8), gc.Path("/foo", 9) ]) def testNegation(self): paths = [ gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 9) ] mod = gc.negation(gc.mod_export_version(2)) self.assertEquals(mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)]) mod = gc.negation(gc.mod_export_version(3)) self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)]) def testPathsWithParse(self): base_dir = os.path.join(test.get_temp_dir(), "paths_parse") self.assertFalse(gfile.Exists(base_dir)) for p in xrange(3): gfile.MakeDirs(os.path.join(base_dir, "%d" % p)) # add a base_directory to ignore gfile.MakeDirs(os.path.join(base_dir, "ignore")) # create a simple parser that pulls the export_version from the directory. def parser(path): match = re.match("^" + base_dir + "/(\\d+)$", path.path) if not match: return None return path._replace(export_version=int(match.group(1))) self.assertEquals( gc.get_paths( base_dir, parser=parser), [ gc.Path(os.path.join(base_dir, "0"), 0), gc.Path(os.path.join(base_dir, "1"), 1), gc.Path(os.path.join(base_dir, "2"), 2) ]) if __name__ == "__main__": test.main()
unknown
codeparrot/codeparrot-clean
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid import mock from oslo_serialization import jsonutils import six from jacket import context from jacket.compute import exception from jacket.objects import compute from jacket.objects.compute import base as base_obj from jacket.objects.compute import fields from jacket.compute.pci import stats from jacket.compute import test from jacket.compute.virt import hardware as hw class InstanceInfoTests(test.NoDBTestCase): def test_instance_info_default(self): ii = hw.InstanceInfo() self.assertIsNone(ii.state) self.assertIsNone(ii.id) self.assertEqual(0, ii.max_mem_kb) self.assertEqual(0, ii.mem_kb) self.assertEqual(0, ii.num_cpu) self.assertEqual(0, ii.cpu_time_ns) def test_instance_info(self): ii = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') self.assertEqual('fake-state', ii.state) self.assertEqual('fake-id', ii.id) self.assertEqual(1, ii.max_mem_kb) self.assertEqual(2, ii.mem_kb) self.assertEqual(3, ii.num_cpu) self.assertEqual(4, ii.cpu_time_ns) def test_instance_infoi_equals(self): ii1 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii2 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii3 = hw.InstanceInfo(state='fake-estat', max_mem_kb=11, mem_kb=22, num_cpu=33, cpu_time_ns=44, id='fake-di') self.assertEqual(ii1, ii2) self.assertNotEqual(ii1, ii3) class CpuSetTestCase(test.NoDBTestCase): def test_get_vcpu_pin_set(self): self.flags(vcpu_pin_set="1-3,5,^2") cpuset_ids = hw.get_vcpu_pin_set() self.assertEqual(set([1, 3, 5]), cpuset_ids) def test_parse_cpu_spec_none_returns_none(self): self.flags(vcpu_pin_set=None) cpuset_ids = hw.get_vcpu_pin_set() self.assertIsNone(cpuset_ids) def test_parse_cpu_spec_valid_syntax_works(self): cpuset_ids = hw.parse_cpu_spec("1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,2") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3") self.assertEqual(set([1, 2, 3]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,^2") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-2, ^1") self.assertEqual(set([2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-3,5,^2") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("^0-1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("0-3,^1-2") self.assertEqual(set([0, 3]), cpuset_ids) def test_parse_cpu_spec_invalid_syntax_raises(self): self.assertRaises(exception.Invalid, hw.parse_cpu_spec, " -1-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2^") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2-") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "--13,^^5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "a-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-a,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,b,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^c") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "3 - 1, 5 , ^ 2 ") def test_format_cpu_spec(self): cpus = set([]) spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = [] spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = set([1, 3]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = [1, 3] spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) class VCPUTopologyTest(test.NoDBTestCase): def test_validate_config(self): testdata = [ { # Flavor sets preferred topology only "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": {} }, "expect": ( 8, 2, 1, 65536, 65536, 65536 ) }, { # Image topology overrides flavor "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": ( 4, 2, 2, 65536, 65536, 2, ) }, { # Partial image topology overrides flavor "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_sockets": "2", } }, "expect": ( 2, -1, -1, 65536, 65536, 65536, ) }, { # Restrict use of threads "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_max_threads": "1", } }, "expect": ( -1, -1, -1, 65536, 65536, 1, ) }, { # Force use of at least two sockets "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": ( -1, -1, -1, 65536, 8, 1 ) }, { # Image limits reduce flavor "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 1 ) }, { # Image limits kill flavor preferred "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 65536 ) }, { # Image limits cannot exceed flavor "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "16", } }, "expect": exception.ImageVCPULimitsRangeExceeded, }, { # Image preferred cannot exceed flavor "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_cores": "16", } }, "expect": exception.ImageVCPUTopologyRangeExceeded, }, ] for topo_test in testdata: image_meta = compute.ImageMeta.from_dict(topo_test["image"]) if type(topo_test["expect"]) == tuple: (preferred, maximum) = hw._get_cpu_topology_constraints( topo_test["flavor"], image_meta) self.assertEqual(topo_test["expect"][0], preferred.sockets) self.assertEqual(topo_test["expect"][1], preferred.cores) self.assertEqual(topo_test["expect"][2], preferred.threads) self.assertEqual(topo_test["expect"][3], maximum.sockets) self.assertEqual(topo_test["expect"][4], maximum.cores) self.assertEqual(topo_test["expect"][5], maximum.threads) else: self.assertRaises(topo_test["expect"], hw._get_cpu_topology_constraints, topo_test["flavor"], image_meta) def test_possible_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 1, 2], ] }, { "allow_threads": True, "vcpus": 7, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [7, 1, 1], [1, 7, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 1, "expect": exception.ImageVCPULimitsRangeImpossible, }, { "allow_threads": False, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 4, "expect": exception.ImageVCPULimitsRangeImpossible, }, ] for topo_test in testdata: if type(topo_test["expect"]) == list: actual = [] for topology in hw._get_possible_cpu_topologies( topo_test["vcpus"], compute.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]): actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) else: self.assertRaises(topo_test["expect"], hw._get_possible_cpu_topologies, topo_test["vcpus"], compute.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) def test_sorting_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "sockets": 4, "cores": 2, "threads": 1, "expect": [ [4, 2, 1], # score = 2 [8, 1, 1], # score = 1 [2, 4, 1], # score = 1 [1, 8, 1], # score = 1 [4, 1, 2], # score = 1 [2, 2, 2], # score = 1 [1, 4, 2], # score = 1 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "sockets": -1, "cores": 4, "threads": -1, "expect": [ [2, 4, 1], # score = 1 [1, 4, 2], # score = 1 [8, 1, 1], # score = 0 [4, 2, 1], # score = 0 [1, 8, 1], # score = 0 [4, 1, 2], # score = 0 [2, 2, 2], # score = 0 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [4, 1, 2], # score = 1 [8, 1, 1], # score = 0 ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [8, 1, 1], # score = 0 ] }, ] for topo_test in testdata: actual = [] possible = hw._get_possible_cpu_topologies( topo_test["vcpus"], compute.VirtCPUTopology(sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) tops = hw._sort_possible_cpu_topologies( possible, compute.VirtCPUTopology(sockets=topo_test["sockets"], cores=topo_test["cores"], threads=topo_test["threads"])) for topology in tops: actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) def test_best_config(self): testdata = [ { # Flavor sets preferred topology only "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": {} }, "expect": [8, 2, 1], }, { # Image topology overrides flavor "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [4, 2, 2], }, { # Image topology overrides flavor "allow_threads": False, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [8, 2, 1], }, { # Partial image topology overrides flavor "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": { "hw_cpu_sockets": "2" } }, "expect": [2, 8, 1], }, { # Restrict use of threads "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "1" }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Force use of at least two sockets "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Image limits reduce flavor "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "8", "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_sockets": 4, } }, "expect": [4, 4, 1] }, { # Image limits kill flavor preferred "allow_threads": True, "flavor": compute.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": 4, } }, "expect": [16, 1, 1] }, { # NUMA needs threads, only cores requested by flavor "allow_threads": True, "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_cores": "2", }), "image": { "properties": { "hw_cpu_max_cores": 2, } }, "numa_topology": compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=1, threads=2)), compute.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024)]), "expect": [1, 2, 2] }, { # NUMA needs threads, but more than requested by flavor - the # least amount of threads wins "allow_threads": True, "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_threads": "2", }), "image": { "properties": {} }, "numa_topology": compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but more than limit in flavor - the # least amount of threads which divides into the vcpu # count wins. So with desired 4, max of 3, and # vcpu count of 4, we should get 2 threads. "allow_threads": True, "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "5", "hw:cpu_max_cores": "2", "hw:cpu_max_threads": "3", }), "image": { "properties": {} }, "numa_topology": compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but thread count does not # divide into flavor vcpu count, so we must # reduce thread count to closest divisor "allow_threads": True, "flavor": compute.Flavor(vcpus=6, memory_mb=2048, extra_specs={ }), "image": { "properties": {} }, "numa_topology": compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 3] }, { # NUMA needs different number of threads per cell - the least # amount of threads wins "allow_threads": True, "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={}), "image": { "properties": {} }, "numa_topology": compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=2, threads=2)), compute.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024, cpu_topology=compute.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [4, 1, 2] }, ] for topo_test in testdata: image_meta = compute.ImageMeta.from_dict(topo_test["image"]) topology = hw._get_desirable_cpu_topologies( topo_test["flavor"], image_meta, topo_test["allow_threads"], topo_test.get("numa_topology"))[0] self.assertEqual(topo_test["expect"][0], topology.sockets) self.assertEqual(topo_test["expect"][1], topology.cores) self.assertEqual(topo_test["expect"][2], topology.threads) class NUMATopologyTest(test.NoDBTestCase): def test_topology_constraints(self): testdata = [ { "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { }, "expect": None, }, { "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { }, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), compute.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024), ]), }, { "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:mem_page_size": 2048 }), "image": { }, "expect": compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048, pagesize=2048) ]), }, { # vcpus is not a multiple of nodes, so it # is an error to not provide cpu/mem mapping "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3 }), "image": { }, "expect": exception.ImageNUMATopologyAsymmetric, }, { "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "4,6", "hw:numa_mem.1": "512", "hw:numa_cpus.2": "5,7", "hw:numa_mem.2": "512", }), "image": { }, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), compute.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), compute.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { "properties": { "hw_numa_nodes": 3, "hw_numa_cpus.0": "0-3", "hw_numa_mem.0": "1024", "hw_numa_cpus.1": "4,6", "hw_numa_mem.1": "512", "hw_numa_cpus.2": "5,7", "hw_numa_mem.2": "512", }, }, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), compute.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), compute.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { # Request a CPU that is out of range # wrt vCPU count "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 1, "hw:numa_cpus.0": "0-16", "hw:numa_mem.0": "2048", }), "image": { }, "expect": exception.ImageNUMATopologyCPUOutOfRange, }, { # Request the same CPU in two nodes "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-7", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "0-7", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUDuplicates, }, { # Request with some CPUs not assigned "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-2", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "3-4", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUsUnassigned, }, { # Request too little memory vs flavor total "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "512", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "512", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request too much memory vs flavor total "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request missing mem.0 "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Request missing cpu.0 "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Image attempts to override flavor "flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, }), "image": { "properties": { "hw_numa_nodes": 4} }, "expect": exception.ImageNUMATopologyForbidden, }, { # NUMA + CPU pinning requested in the flavor "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the flavor "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # NUMA + CPU pinning requested in the image "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the image "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={}), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": compute.InstanceNUMATopology(cells= [ compute.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # Invalid CPU pinning override "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED} }, "expect": exception.ImageCPUPinningForbidden, }, { # Invalid CPU pinning policy with realtime "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeConfigurationInvalid, }, { # Invalid CPU thread pinning override "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw_cpu_thread_policy": fields.CPUThreadAllocationPolicy.REQUIRE, } }, "expect": exception.ImageCPUThreadPolicyForbidden, }, { # Invalid CPU pinning policy with CPU thread pinning "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": {} }, "expect": exception.CPUThreadPolicyConfigurationInvalid, }, { # Invalid vCPUs mask with realtime "flavor": compute.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": "dedicated", "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeMaskNotFoundOrInvalid, }, ] for testitem in testdata: image_meta = compute.ImageMeta.from_dict(testitem["image"]) if testitem["expect"] is None: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNone(topology) elif type(testitem["expect"]) == type: self.assertRaises(testitem["expect"], hw.numa_get_constraints, testitem["flavor"], image_meta) else: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNotNone(topology) self.assertEqual(len(testitem["expect"].cells), len(topology.cells)) for i in range(len(topology.cells)): self.assertEqual(testitem["expect"].cells[i].id, topology.cells[i].id) self.assertEqual(testitem["expect"].cells[i].cpuset, topology.cells[i].cpuset) self.assertEqual(testitem["expect"].cells[i].memory, topology.cells[i].memory) self.assertEqual(testitem["expect"].cells[i].pagesize, topology.cells[i].pagesize) self.assertEqual(testitem["expect"].cells[i].cpu_pinning, topology.cells[i].cpu_pinning) def test_host_usage_contiguous(self): hpages0_4K = compute.NUMAPagesTopology(size_kb=4, total=256, used=0) hpages0_2M = compute.NUMAPagesTopology(size_kb=2048, total=0, used=1) hpages1_4K = compute.NUMAPagesTopology(size_kb=4, total=128, used=2) hpages1_2M = compute.NUMAPagesTopology(size_kb=2048, total=0, used=3) hosttopo = compute.NUMATopology(cells=[ compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[ hpages0_4K, hpages0_2M], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[ hpages1_4K, hpages1_2M], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=2, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), compute.InstanceNUMACell(id=1, cpuset=set([4]), memory=256), ]) instance2 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), compute.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], compute.NUMACell) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[0].mempages, [ hpages0_4K, hpages0_2M]) self.assertIsInstance(hostusage.cells[1], compute.NUMACell) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 3) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[1].mempages, [ hpages1_4K, hpages1_2M]) self.assertEqual(256, hpages0_4K.total) self.assertEqual(0, hpages0_4K.used) self.assertEqual(0, hpages0_2M.total) self.assertEqual(1, hpages0_2M.used) self.assertIsInstance(hostusage.cells[2], compute.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) self.assertEqual(128, hpages1_4K.total) self.assertEqual(2, hpages1_4K.used) self.assertEqual(0, hpages1_2M.total) self.assertEqual(3, hpages1_2M.used) def test_host_usage_sparse(self): hosttopo = compute.NUMATopology(cells=[ compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=5, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=6, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), compute.InstanceNUMACell(id=6, cpuset=set([4]), memory=256), ]) instance2 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), compute.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], compute.NUMACell) self.assertEqual(hosttopo.cells[0].id, hostusage.cells[0].id) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertIsInstance(hostusage.cells[1], compute.NUMACell) self.assertEqual(hosttopo.cells[1].id, hostusage.cells[1].id) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 256) self.assertIsInstance(hostusage.cells[2], compute.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) def test_host_usage_culmulative_with_free(self): hosttopo = compute.NUMATopology(cells=[ compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=2, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=1, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=2, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512), compute.InstanceNUMACell(id=1, cpuset=set([3]), memory=256), compute.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1]) self.assertIsInstance(hostusage.cells[0], compute.NUMACell) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 1024) self.assertIsInstance(hostusage.cells[1], compute.NUMACell) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 768) self.assertIsInstance(hostusage.cells[2], compute.NUMACell) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) # Test freeing of resources hostusage = hw.numa_usage_from_instances( hostusage, [instance1], free=True) self.assertEqual(hostusage.cells[0].cpu_usage, 2) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[1].cpu_usage, 1) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) def test_topo_usage_none(self): hosttopo = compute.NUMATopology(cells=[ compute.NUMACell(id=0, cpuset=set([0, 1]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([2, 3]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = compute.InstanceNUMATopology(cells=[ compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), compute.InstanceNUMACell(id=2, cpuset=set([2]), memory=256), ]) hostusage = hw.numa_usage_from_instances( None, [instance1]) self.assertIsNone(hostusage) hostusage = hw.numa_usage_from_instances( hosttopo, []) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) hostusage = hw.numa_usage_from_instances( hosttopo, None) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) def assertNUMACellMatches(self, expected_cell, got_cell): attrs = ('cpuset', 'memory', 'id') if isinstance(expected_cell, compute.NUMATopology): attrs += ('cpu_usage', 'memory_usage') for attr in attrs: self.assertEqual(getattr(expected_cell, attr), getattr(got_cell, attr)) def test_json(self): expected = compute.NUMATopology( cells=[ compute.NUMACell(id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) got = compute.NUMATopology.obj_from_db_obj(expected._to_json()) for exp_cell, got_cell in zip(expected.cells, got.cells): self.assertNUMACellMatches(exp_cell, got_cell) class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase): def test_fit_instance_cell_success_no_limit(self): host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = compute.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell) self.assertIsInstance(fitted_cell, compute.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_success_w_limit(self): host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = compute.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = compute.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsInstance(fitted_cell, compute.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_self_overcommit(self): host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = compute.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = compute.InstanceNUMACell( id=0, cpuset=set([1, 2, 3]), memory=4096) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) def test_fit_instance_cell_fail_w_limit(self): host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = compute.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=4096) limit_cell = compute.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) instance_cell = compute.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) class VirtNUMAHostTopologyTestCase(test.NoDBTestCase): def setUp(self): super(VirtNUMAHostTopologyTestCase, self).setUp() self.host = compute.NUMATopology( cells=[ compute.NUMACell(id=1, cpuset=set([1, 2]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=2, cpuset=set([3, 4]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([]))]) self.limits = compute.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) self.instance1 = compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=2048)]) self.instance2 = compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4]), memory=1024)]) self.instance3 = compute.InstanceNUMATopology( cells=[ compute.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024)]) def test_get_fitting_success_no_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1) self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3) self.assertIsInstance(fitted_instance2, compute.InstanceNUMATopology) def test_get_fitting_success_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance, compute.InstanceNUMATopology) self.assertEqual(1, fitted_instance.cells[0].id) def test_get_fitting_fails_no_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance) def test_get_fitting_culmulative_fails_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance2) def test_get_fitting_culmulative_success_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance2, compute.InstanceNUMATopology) self.assertEqual(2, fitted_instance2.cells[0].id) def test_get_fitting_pci_success(self): pci_request = compute.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= True): fitted_instance1 = hw.numa_fit_instance_to_host(self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology) def test_get_fitting_pci_fail(self): pci_request = compute.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= False): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsNone(fitted_instance1) class NumberOfSerialPortsTest(test.NoDBTestCase): def test_flavor(self): flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = compute.ImageMeta.from_dict({}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(3, num_ports) def test_image_meta(self): flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={}) image_meta = compute.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_invalid_value(self): flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 'foo'}) image_meta = compute.ImageMeta.from_dict({}) self.assertRaises(exception.ImageSerialPortNumberInvalid, hw.get_number_of_serial_ports, flavor, image_meta) def test_image_meta_smaller_than_flavor(self): flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = compute.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_smaller_than_image_meta(self): flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = compute.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 4}}) self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue, hw.get_number_of_serial_ports, flavor, image_meta) class HelperMethodsTestCase(test.NoDBTestCase): def setUp(self): super(HelperMethodsTestCase, self).setUp() self.hosttopo = compute.NUMATopology(cells=[ compute.NUMACell(id=0, cpuset=set([0, 1]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([2, 3]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) self.instancetopo = compute.InstanceNUMATopology( instance_uuid='fake-uuid', cells=[ compute.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=256, pagesize=2048, cpu_pinning={0: 0, 1: 1}), compute.InstanceNUMACell( id=1, cpuset=set([2]), memory=256, pagesize=2048, cpu_pinning={2: 3}), ]) self.context = context.RequestContext('fake-user', 'fake-project') def _check_usage(self, host_usage): self.assertEqual(2, host_usage.cells[0].cpu_usage) self.assertEqual(256, host_usage.cells[0].memory_usage) self.assertEqual(1, host_usage.cells[1].cpu_usage) self.assertEqual(256, host_usage.cells[1].memory_usage) def test_dicts_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_dicts_instance_json(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, compute.NUMATopology) self._check_usage(res) def test_dicts_instance_json_old(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': jsonutils.dumps(self.instancetopo._to_dict())} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, compute.NUMATopology) self._check_usage(res) def test_dicts_host_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_dicts_host_json_old(self): host = {'numa_topology': jsonutils.dumps( self.hosttopo._to_dict())} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance_json(self): host = compute.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance(self): host = compute.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_instance_with_fetch(self): host = compute.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = {'uuid': fake_uuid} with mock.patch.object(compute.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_object_instance_with_load(self): host = compute.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = compute.Instance(context=self.context, uuid=fake_uuid) with mock.patch.object(compute.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_instance_serialized_by_build_request_spec(self): host = compute.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = compute.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec # We can remove this test once we no longer use that method. instance_raw = jsonutils.to_primitive( base_obj.obj_to_primitive(instance)) res = hw.get_host_numa_usage_from_instance(host, instance_raw) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_attr_host(self): class Host(object): def __init__(obj): obj.numa_topology = self.hosttopo._to_json() host = Host() instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(compute.NUMATopology.obj_from_db_obj(res)) def test_never_serialize_result(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance, never_serialize_result=True) self.assertIsInstance(res, compute.NUMATopology) self._check_usage(res) def test_dict_numa_topology_to_obj(self): fake_uuid = str(uuid.uuid4()) instance = compute.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) instance_dict = base_obj.obj_to_primitive(instance) instance_numa_topo = hw.instance_topology_from_instance(instance_dict) for expected_cell, actual_cell in zip(self.instancetopo.cells, instance_numa_topo.cells): for k in expected_cell.fields: self.assertEqual(getattr(expected_cell, k), getattr(actual_cell, k)) class VirtMemoryPagesTestCase(test.NoDBTestCase): def test_cell_instance_pagesize(self): cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) self.assertEqual(0, cell.id) self.assertEqual(set([0]), cell.cpuset) self.assertEqual(1024, cell.memory) self.assertEqual(2048, cell.pagesize) def test_numa_pagesize_usage_from_cell(self): instcell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=512, pagesize=2048) hostcell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[compute.NUMAPagesTopology( size_kb=2048, total=512, used=0)], siblings=[], pinned_cpus=set([])) topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1) self.assertEqual(2048, topo[0].size_kb) self.assertEqual(512, topo[0].total) self.assertEqual(256, topo[0].used) def _test_get_requested_mempages_pagesize(self, spec=None, props=None): flavor = compute.Flavor(vcpus=16, memory_mb=2048, extra_specs=spec or {}) image_meta = compute.ImageMeta.from_dict({"properties": props or {}}) return hw._numa_get_pagesize_constraints(flavor, image_meta) def test_get_requested_mempages_pagesize_from_flavor_swipe(self): self.assertEqual( hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "small"})) self.assertEqual( hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"})) self.assertEqual( hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"})) def test_get_requested_mempages_pagesize_from_flavor_specific(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_flavor_invalid(self): self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "foo"}) self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "-42"}) def test_get_requested_mempages_pagesizes_from_flavor_suffix_sweep(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048KB"})) self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2MB"})) self.assertEqual( 1048576, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "1GB"})) def test_get_requested_mempages_pagesize_from_image_flavor_any(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_flavor_large(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_forbidden(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "small"}, {"hw_mem_page_size": "2048"}) def test_get_requested_mempages_pagesize_from_image_forbidden2(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {}, {"hw_mem_page_size": "2048"}) def test_cell_accepts_request_wipe(self): host_cell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ compute.NUMAPagesTopology(size_kb=4, total=262144, used=0), ], siblings=[], pinned_cpus=set([])) inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_large_pass(self): inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) host_cell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ compute.NUMAPagesTopology(size_kb=4, total=256, used=0), compute.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_custom_pass(self): inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) host_cell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ compute.NUMAPagesTopology(size_kb=4, total=256, used=0), compute.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_remainder_memory(self): # Test memory can't be divided with no rem by mempage's size_kb inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024 + 1, pagesize=2048) host_cell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ compute.NUMAPagesTopology(size_kb=4, total=256, used=0), compute.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_host_mempages(self): # Test pagesize not in host's mempages inst_cell = compute.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=4096) host_cell = compute.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ compute.NUMAPagesTopology(size_kb=4, total=256, used=0), compute.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertRaises(exception.MemoryPageSizeNotSupported, hw._numa_cell_supports_pagesize_request, host_cell, inst_cell) class _CPUPinningTestCaseBase(object): def assertEqualTopology(self, expected, got): for attr in ('sockets', 'cores', 'threads'): self.assertEqual(getattr(expected, attr), getattr(got, attr), "Mismatch on %s" % attr) def assertInstanceCellPinned(self, instance_cell, cell_ids=None): default_cell_id = 0 self.assertIsNotNone(instance_cell) if cell_ids is None: self.assertEqual(default_cell_id, instance_cell.id) else: self.assertIn(instance_cell.id, cell_ids) self.assertEqual(len(instance_cell.cpuset), len(instance_cell.cpu_pinning)) def assertPinningPreferThreads(self, instance_cell, host_cell): """Make sure we are preferring threads. We do this by assessing that at least 2 CPUs went to the same core if that was even possible to begin with. """ max_free_siblings = max(map(len, host_cell.free_siblings)) if len(instance_cell) > 1 and max_free_siblings > 1: cpu_to_sib = {} for sib in host_cell.free_siblings: for cpu in sib: cpu_to_sib[cpu] = tuple(sorted(sib)) pins_per_sib = collections.defaultdict(int) for inst_p, host_p in instance_cell.cpu_pinning.items(): pins_per_sib[cpu_to_sib[host_p]] += 1 self.assertTrue(max(pins_per_sib.values()) > 1, "Seems threads were not preferred by the pinning " "logic.") class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_get_pinning_inst_too_large_cpu(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_too_large_mem(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=1024, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_not_avail(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([0]), siblings=[], mempages=[]) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_no_sibling_fits_empty(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 3)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_no_sibling_fits_w_usage(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([1]), mempages=[], siblings=[]) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_pinning = {0: 0, 1: 2, 2: 3} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_fits(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 8)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([1, 2, 5, 6]), siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[]) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {0: 0, 1: 3, 2: 4, 3: 7} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit_single_core(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=1, threads=4) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_require_policy_too_few_siblings(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1, 2]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_require_policy_fits(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_require_policy_fits_w_usage(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_fit_optimize_threads(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4, 5]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5, 6])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_large_instance_odd_fit(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), memory=4096, memory_usage=0, siblings=[set([0, 8]), set([1, 9]), set([2, 10]), set([3, 11]), set([4, 12]), set([5, 13]), set([6, 14]), set([7, 15])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) self.assertPinningPreferThreads(inst_pin, host_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_too_few_fully_free_cores(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_no_fully_free_cores(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1, 2])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_fits(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_fits_ht_host(self): host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_fits_w_usage(self): host_pin = compute.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_host_numa_fit_instance_to_host_single_cell(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))] ) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_single_cell_w_usage(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1]), pinned_cpus=set([0]), memory=2048, memory_usage=0, siblings=[], mempages=[]), compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(1,)) def test_host_numa_fit_instance_to_host_single_cell_fail(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, pinned_cpus=set([0]), memory_usage=0, siblings=[], mempages=[]), compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, pinned_cpus=set([2]), memory_usage=0, siblings=[], mempages=[])]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fit(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_barely_fit(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, pinned_cpus=set([0]), siblings=[], mempages=[], memory_usage=0), compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6])), compute.NUMACell(id=2, cpuset=set([8, 9, 10, 11]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([10, 11]))]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 2)) def test_host_numa_fit_instance_to_host_fail_capacity(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([0])), compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6]))]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fail_topology(self): host_topo = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), compute.InstanceNUMACell( cpuset=set([4, 5]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_cpu_pinning_usage_from_instances(self): host_pin = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = compute.InstanceNUMATopology( cells = [compute.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 1, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2]) self.assertEqual(set([0, 1, 2, 3]), host_pin.cells[0].pinned_cpus) def test_cpu_pinning_usage_from_instances_free(self): host_pin = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([0, 1, 3]))]) inst_pin_1 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2], free=True) self.assertEqual(set(), host_pin.cells[0].pinned_cpus) def test_host_usage_from_instances_fail(self): host_pin = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = compute.InstanceNUMATopology( cells = [compute.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) self.assertRaises(exception.CPUPinningInvalid, hw.numa_usage_from_instances, host_pin, [inst_pin_1, inst_pin_2]) def test_host_usage_from_instances_isolate(self): host_pin = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1]) self.assertEqual(host_pin.cells[0].cpuset, new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 4) def test_host_usage_from_instances_isolate_free(self): host_pin = compute.NUMATopology( cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=4, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([0, 1, 2, 3]))]) inst_pin_1 = compute.InstanceNUMATopology( cells=[compute.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1], free=True) self.assertEqual(set([]), new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 0) class CPURealtimeTestCase(test.NoDBTestCase): def test_success_flavor(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}} image = compute.ImageMeta.from_dict({}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([0, 2]), rt) self.assertEqual(set([1]), em) def test_success_image(self): flavor = {"extra_specs": {}} image = compute.ImageMeta.from_dict( {"properties": {"hw_cpu_realtime_mask": "^0-1"}}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([2]), rt) self.assertEqual(set([0, 1]), em) def test_no_mask_configured(self): flavor = {"extra_specs": {}} image = compute.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image) def test_mask_badly_configured(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}} image = compute.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
unknown
codeparrot/codeparrot-clean
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1_test import ( "reflect" "testing" apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api/legacyscheme" // ensure types are installed _ "k8s.io/kubernetes/pkg/apis/scheduling/install" ) func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { codec := legacyscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) data, err := runtime.Encode(codec, obj) if err != nil { t.Errorf("%v\n %#v", err, obj) return nil } obj2, err := runtime.Decode(codec, data) if err != nil { t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) return nil } obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) err = legacyscheme.Scheme.Convert(obj2, obj3, nil) if err != nil { t.Errorf("%v\nSource: %#v", err, obj2) return nil } return obj3 } func TestSetDefaultPreempting(t *testing.T) { priorityClass := &v1beta1.PriorityClass{} output := roundTrip(t, runtime.Object(priorityClass)).(*v1beta1.PriorityClass) if output.PreemptionPolicy == nil || *output.PreemptionPolicy != apiv1.PreemptLowerPriority { t.Errorf("Expected PriorityClass.Preempting value: %+v\ngot: %+v\n", apiv1.PreemptLowerPriority, output.PreemptionPolicy) } }
go
github
https://github.com/kubernetes/kubernetes
pkg/apis/scheduling/v1beta1/defaults_test.go
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use std::io; use std::path::{Path, PathBuf}; use std::process::Command; /// Helper for generic catch-all errors. type Result = std::result::Result<(), Box<dyn std::error::Error>>; /// <https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--1300-1699-> #[cfg(windows)] const ERROR_PRIVILEGE_NOT_HELD: i32 = 1314; /// Represents a successfully created symlink. enum Symlink { /// Path to the created symlink Created(PathBuf), /// A symlink that failed due to missing permissions (Windows). #[allow(dead_code)] Privilege, } /// Compile the test binary, run it, and compare it with expected output. /// /// Failing to create a symlink due to permissions issues is also a success /// for the purpose of this runner. fn symlink_runner(create_symlinks: impl Fn(&Path) -> io::Result<Symlink>) -> Result { let mut compiled_binary = PathBuf::from(env!("OUT_DIR")).join("../../../restart"); if cfg!(windows) { compiled_binary.set_extension("exe"); } println!("{compiled_binary:?}"); // set up all the temporary file paths let temp = tempfile::TempDir::new()?; let bin = temp.path().canonicalize()?.join("restart.exe"); // copy the built restart test binary to our temporary directory std::fs::copy(compiled_binary, &bin)?; if let Symlink::Created(link) = create_symlinks(&bin)? { // run the command from the symlink, so that we can test if restart resolves it correctly let mut cmd = Command::new(link); // add the restart parameter so that the invocation will call tauri::process::restart cmd.arg("restart"); let output = cmd.output()?; // run `TempDir` destructors to prevent resource leaking if the assertion fails drop(temp); if output.status.success() { // gather the output into a string let stdout = String::from_utf8_lossy(&output.stdout); // we expect the output to be the bin path, twice assert_eq!(stdout, format!("{bin}\n{bin}\n", bin = bin.display())); } else if cfg!(all( target_os = "macos", not(feature = "process-relaunch-dangerous-allow-symlink-macos") )) { // we expect this to fail on macOS without the dangerous symlink flag set let stderr = String::from_utf8_lossy(&output.stderr); // make sure it's the error that we expect assert!(stderr.contains( "StartingBinary found current_exe() that contains a symlink on a non-allowed platform" )); } else { // we didn't expect the program to fail in this configuration, just panic panic!("restart integration test runner failed for unknown reason"); } } Ok(()) } /// Cross-platform way to create a symlink /// /// Symlinks that failed to create due to permissions issues (like on Windows) /// are also seen as successful for the purpose of this testing suite. fn create_symlink(original: &Path, link: PathBuf) -> io::Result<Symlink> { #[cfg(unix)] return std::os::unix::fs::symlink(original, &link).map(|()| Symlink::Created(link)); #[cfg(windows)] return match std::os::windows::fs::symlink_file(original, &link) { Ok(()) => Ok(Symlink::Created(link)), Err(e) => match e.raw_os_error() { Some(ERROR_PRIVILEGE_NOT_HELD) => Ok(Symlink::Privilege), _ => Err(e), }, }; } /// Only use 1 test to prevent cargo from waiting on itself. /// /// While not ideal, this is fine because they use the same solution for both cases. #[test] fn restart_symlinks() -> Result { // single symlink symlink_runner(|bin| { let mut link = bin.to_owned(); link.set_file_name("symlink"); link.set_extension("exe"); create_symlink(bin, link) })?; // nested symlinks symlink_runner(|bin| { let mut link1 = bin.to_owned(); link1.set_file_name("symlink1"); link1.set_extension("exe"); create_symlink(bin, link1.clone())?; let mut link2 = bin.to_owned(); link2.set_file_name("symlink2"); link2.set_extension("exe"); create_symlink(&link1, link2) }) }
rust
github
https://github.com/tauri-apps/tauri
crates/tests/restart/tests/restart.rs
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.GroupProtocol; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.AllTopicsMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.AsyncPollEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.CheckAndUpdatePositionsEvent; import org.apache.kafka.clients.consumer.internals.events.CommitEvent; import org.apache.kafka.clients.consumer.internals.events.CommitOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.CreateFetchRequestsEvent; import org.apache.kafka.clients.consumer.internals.events.CurrentLagEvent; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.clients.consumer.internals.events.EventProcessor; import org.apache.kafka.clients.consumer.internals.events.FetchCommittedOffsetsEvent; import org.apache.kafka.clients.consumer.internals.events.LeaveGroupOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; import org.apache.kafka.clients.consumer.internals.events.PausePartitionsEvent; import org.apache.kafka.clients.consumer.internals.events.ResetOffsetEvent; import org.apache.kafka.clients.consumer.internals.events.ResumePartitionsEvent; import org.apache.kafka.clients.consumer.internals.events.SeekUnvalidatedEvent; import org.apache.kafka.clients.consumer.internals.events.StopFindCoordinatorOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnAllTasksLostCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnAllTasksLostCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksAssignedCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksAssignedCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksRevokedCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksRevokedCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.TopicPatternSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.TopicRe2JPatternSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.TopicSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent; import org.apache.kafka.clients.consumer.internals.events.UpdatePatternSubscriptionEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceCallbackMetricsManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetryUtils; import org.apache.kafka.common.utils.AppInfoParser; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import org.slf4j.event.Level; import java.net.InetSocketAddress; import java.time.Duration; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.Collectors; import static java.util.Objects.requireNonNull; import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.configuredConsumerInterceptors; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchMetricsManager; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createLogContext; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.common.utils.Utils.closeQuietly; import static org.apache.kafka.common.utils.Utils.isBlank; import static org.apache.kafka.common.utils.Utils.swallow; /** * This {@link Consumer} implementation uses an {@link ApplicationEventHandler event handler} to process * {@link ApplicationEvent application events} so that the network I/O can be processed in a dedicated * {@link ConsumerNetworkThread network thread}. Visit * <a href="https://cwiki.apache.org/confluence/display/KAFKA/Consumer+threading+refactor+design">this document</a> * for implementation detail. * * <p/> * * <em>Note:</em> this {@link Consumer} implementation is part of the revised consumer group protocol from KIP-848. * This class should not be invoked directly; users should instead create a {@link KafkaConsumer} as before. * This consumer implements the new consumer group protocol and is intended to be the default in coming releases. */ public class AsyncKafkaConsumer<K, V> implements ConsumerDelegate<K, V> { private static final long NO_CURRENT_THREAD = -1L; /** * An {@link org.apache.kafka.clients.consumer.internals.events.EventProcessor} that is created and executes in the * application thread for the purpose of processing {@link BackgroundEvent background events} generated by the * {@link ConsumerNetworkThread network thread}. * Those events are generally of two types: * * <ul> * <li>Errors that occur in the network thread that need to be propagated to the application thread</li> * <li>{@link ConsumerRebalanceListener} callbacks that are to be executed on the application thread</li> * </ul> */ private class BackgroundEventProcessor implements EventProcessor<BackgroundEvent> { @Override public void process(final BackgroundEvent event) { switch (event.type()) { case ERROR: process((ErrorEvent) event); break; case CONSUMER_REBALANCE_LISTENER_CALLBACK_NEEDED: process((ConsumerRebalanceListenerCallbackNeededEvent) event); break; case STREAMS_ON_TASKS_REVOKED_CALLBACK_NEEDED: processStreamsOnTasksRevokedCallbackNeededEvent((StreamsOnTasksRevokedCallbackNeededEvent) event); break; case STREAMS_ON_TASKS_ASSIGNED_CALLBACK_NEEDED: processStreamsOnTasksAssignedCallbackNeededEvent((StreamsOnTasksAssignedCallbackNeededEvent) event); break; case STREAMS_ON_ALL_TASKS_LOST_CALLBACK_NEEDED: processStreamsOnAllTasksLostCallbackNeededEvent((StreamsOnAllTasksLostCallbackNeededEvent) event); break; default: throw new IllegalArgumentException("Background event type " + event.type() + " was not expected"); } } private void process(final ErrorEvent event) { throw event.error(); } private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { ConsumerRebalanceListenerCallbackCompletedEvent invokedEvent = invokeRebalanceCallbacks( rebalanceListenerInvoker, event.methodName(), event.partitions(), event.future() ); applicationEventHandler.add(invokedEvent); if (invokedEvent.error().isPresent()) { throw invokedEvent.error().get(); } } private void processStreamsOnTasksRevokedCallbackNeededEvent(final StreamsOnTasksRevokedCallbackNeededEvent event) { StreamsOnTasksRevokedCallbackCompletedEvent invokedEvent = invokeOnTasksRevokedCallback(event.activeTasksToRevoke(), event.future()); applicationEventHandler.add(invokedEvent); if (invokedEvent.error().isPresent()) { throw invokedEvent.error().get(); } } private void processStreamsOnTasksAssignedCallbackNeededEvent(final StreamsOnTasksAssignedCallbackNeededEvent event) { StreamsOnTasksAssignedCallbackCompletedEvent invokedEvent = invokeOnTasksAssignedCallback(event.assignment(), event.future()); applicationEventHandler.add(invokedEvent); if (invokedEvent.error().isPresent()) { throw invokedEvent.error().get(); } } private void processStreamsOnAllTasksLostCallbackNeededEvent(final StreamsOnAllTasksLostCallbackNeededEvent event) { StreamsOnAllTasksLostCallbackCompletedEvent invokedEvent = invokeOnAllTasksLostCallback(event.future()); applicationEventHandler.add(invokedEvent); if (invokedEvent.error().isPresent()) { throw invokedEvent.error().get(); } } private StreamsOnTasksRevokedCallbackCompletedEvent invokeOnTasksRevokedCallback(final Set<StreamsRebalanceData.TaskId> activeTasksToRevoke, final CompletableFuture<Void> future) { final Optional<Exception> exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksRevoked(activeTasksToRevoke)); final Optional<KafkaException> error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task revocation callback throws an error")); return new StreamsOnTasksRevokedCallbackCompletedEvent(future, error); } private StreamsOnTasksAssignedCallbackCompletedEvent invokeOnTasksAssignedCallback(final StreamsRebalanceData.Assignment assignment, final CompletableFuture<Void> future) { final Optional<Exception> exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksAssigned(assignment)); final Optional<KafkaException> error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task assignment callback throws an error")); return new StreamsOnTasksAssignedCallbackCompletedEvent(future, error); } private StreamsOnAllTasksLostCallbackCompletedEvent invokeOnAllTasksLostCallback(final CompletableFuture<Void> future) { final Optional<Exception> exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeAllTasksLost()); final Optional<KafkaException> error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "All tasks lost callback throws an error")); return new StreamsOnAllTasksLostCallbackCompletedEvent(future, error); } private StreamsRebalanceListenerInvoker streamsRebalanceListenerInvoker() { return streamsRebalanceListenerInvoker.orElseThrow( () -> new IllegalStateException("Background event processor was not created to be used with Streams " + "rebalance protocol events")); } } private final ApplicationEventHandler applicationEventHandler; private final Time time; private final AtomicReference<Optional<ConsumerGroupMetadata>> groupMetadata = new AtomicReference<>(Optional.empty()); private final AsyncConsumerMetrics asyncConsumerMetrics; private final KafkaConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; private final BlockingQueue<BackgroundEvent> backgroundEventQueue; private final BackgroundEventHandler backgroundEventHandler; private final BackgroundEventProcessor backgroundEventProcessor; private final CompletableEventReaper backgroundEventReaper; private final Deserializers<K, V> deserializers; /** * A thread-safe {@link FetchBuffer fetch buffer} for the results that are populated in the * {@link ConsumerNetworkThread network thread} when the results are available. Because of the interaction * of the fetch buffer in the application thread and the network I/O thread, this is shared between the * two threads and is thus designed to be thread-safe. */ private final FetchBuffer fetchBuffer; private final FetchCollector<K, V> fetchCollector; private final ConsumerInterceptors<K, V> interceptors; private final IsolationLevel isolationLevel; private final SubscriptionState subscriptions; /** * This is a snapshot of the partitions assigned to this consumer. HOWEVER, this is only populated and used in * the case where this consumer is in a consumer group. Self-assigned partitions do not appear here. */ private final AtomicReference<Set<TopicPartition>> groupAssignmentSnapshot = new AtomicReference<>(Collections.emptySet()); private final ConsumerMetadata metadata; private final Metrics metrics; private final long retryBackoffMs; private final int requestTimeoutMs; private final Duration defaultApiTimeoutMs; private final boolean autoCommitEnabled; private volatile boolean closed = false; // Init value is needed to avoid NPE in case of exception raised in the constructor private Optional<ClientTelemetryReporter> clientTelemetryReporter = Optional.empty(); private final PositionsValidator positionsValidator; private AsyncPollEvent inflightPoll; private final WakeupTrigger wakeupTrigger = new WakeupTrigger(); private final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; private final Optional<StreamsRebalanceListenerInvoker> streamsRebalanceListenerInvoker; // Last triggered async commit future. Used to wait until all previous async commits are completed. // We only need to keep track of the last one, since they are guaranteed to complete in order. private CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> lastPendingAsyncCommit = null; // currentThread holds the threadId of the current thread accessing the AsyncKafkaConsumer // and is used to prevent multithreaded access private final AtomicLong currentThread = new AtomicLong(NO_CURRENT_THREAD); private final AtomicInteger refCount = new AtomicInteger(0); private final MemberStateListener memberStateListener = new MemberStateListener() { @Override public void onMemberEpochUpdated(Optional<Integer> memberEpoch, String memberId) { updateGroupMetadata(memberEpoch, memberId); } @Override public void onGroupAssignmentUpdated(Set<TopicPartition> partitions) { setGroupAssignmentSnapshot(partitions); } }; public AsyncKafkaConsumer(final ConsumerConfig config, final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer, final Optional<StreamsRebalanceData> streamsRebalanceData) { this( config, keyDeserializer, valueDeserializer, Time.SYSTEM, ApplicationEventHandler::new, CompletableEventReaper::new, FetchCollector::new, ConsumerMetadata::new, new LinkedBlockingQueue<>(), streamsRebalanceData ); } // Visible for testing @SuppressWarnings({"this-escape"}) AsyncKafkaConsumer(final ConsumerConfig config, final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer, final Time time, final ApplicationEventHandlerFactory applicationEventHandlerFactory, final CompletableEventReaperFactory backgroundEventReaperFactory, final FetchCollectorFactory<K, V> fetchCollectorFactory, final ConsumerMetadataFactory metadataFactory, final LinkedBlockingQueue<BackgroundEvent> backgroundEventQueue, final Optional<StreamsRebalanceData> streamsRebalanceData) { try { GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, GroupRebalanceConfig.ProtocolType.CONSUMER ); this.clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); this.autoCommitEnabled = config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); LogContext logContext = createLogContext(config, groupRebalanceConfig); this.backgroundEventQueue = backgroundEventQueue; this.log = logContext.logger(getClass()); log.debug("Initializing the Kafka consumer"); this.defaultApiTimeoutMs = Duration.ofMillis(config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)); this.time = time; List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config); this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); List<ConsumerInterceptor<K, V>> interceptorList = configuredConsumerInterceptors(config); this.interceptors = new ConsumerInterceptors<>(interceptorList, metrics); this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners(metrics.reporters(), interceptorList, Arrays.asList(deserializers.keyDeserializer(), deserializers.valueDeserializer())); this.metadata = metadataFactory.build(config, subscriptions, logContext, clusterResourceListeners); final List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); FetchMetricsManager fetchMetricsManager = createFetchMetricsManager(metrics); FetchConfig fetchConfig = new FetchConfig(config); this.isolationLevel = fetchConfig.isolationLevel; ApiVersions apiVersions = new ApiVersions(); final BlockingQueue<ApplicationEvent> applicationEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics ); // This FetchBuffer is shared between the application and network threads. this.fetchBuffer = new FetchBuffer(logContext); this.positionsValidator = new PositionsValidator(logContext, time, subscriptions, metadata); final Supplier<NetworkClientDelegate> networkClientDelegateSupplier = NetworkClientDelegate.supplier(time, logContext, metadata, config, apiVersions, metrics, fetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), backgroundEventHandler, false, asyncConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); final Supplier<RequestManagers> requestManagersSupplier = RequestManagers.supplier(time, logContext, backgroundEventHandler, metadata, subscriptions, fetchBuffer, config, groupRebalanceConfig, apiVersions, fetchMetricsManager, networkClientDelegateSupplier, clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, memberStateListener, streamsRebalanceData, positionsValidator ); final Supplier<ApplicationEventProcessor> applicationEventProcessorSupplier = ApplicationEventProcessor.supplier(logContext, metadata, subscriptions, requestManagersSupplier ); this.applicationEventHandler = applicationEventHandlerFactory.build( logContext, time, config.getInt(CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_CONFIG), applicationEventQueue, new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics ); this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, time, new RebalanceCallbackMetricsManager(metrics) ); this.streamsRebalanceListenerInvoker = streamsRebalanceData.map(s -> new StreamsRebalanceListenerInvoker(logContext, s)); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); // The FetchCollector is only used on the application thread. this.fetchCollector = fetchCollectorFactory.build(logContext, metadata, subscriptions, fetchConfig, deserializers, fetchMetricsManager, time); if (groupMetadata.get().isPresent() && GroupProtocol.of(config.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)) == GroupProtocol.CONSUMER) { config.ignore(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG); // Used by background thread } config.logUnused(); AppInfoParser.registerAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics, time.milliseconds()); log.debug("Kafka consumer initialized"); } catch (Throwable t) { // call close methods if internal objects are already constructed; this is to prevent resource leak. see KAFKA-2121 // we do not need to call `close` at all when `log` is null, which means no internal objects were initialized. if (this.log != null) { close(Duration.ZERO, CloseOptions.GroupMembershipOperation.LEAVE_GROUP, true); } // now propagate the exception throw new KafkaException("Failed to construct kafka consumer", t); } } // Visible for testing AsyncKafkaConsumer(LogContext logContext, String clientId, Deserializers<K, V> deserializers, FetchBuffer fetchBuffer, FetchCollector<K, V> fetchCollector, ConsumerInterceptors<K, V> interceptors, Time time, ApplicationEventHandler applicationEventHandler, BlockingQueue<BackgroundEvent> backgroundEventQueue, CompletableEventReaper backgroundEventReaper, ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, Metrics metrics, SubscriptionState subscriptions, ConsumerMetadata metadata, long retryBackoffMs, int requestTimeoutMs, int defaultApiTimeoutMs, String groupId, boolean autoCommitEnabled, PositionsValidator positionsValidator) { this.log = logContext.logger(getClass()); this.subscriptions = subscriptions; this.clientId = clientId; this.fetchBuffer = fetchBuffer; this.fetchCollector = fetchCollector; this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; this.interceptors = Objects.requireNonNull(interceptors); this.time = time; this.backgroundEventQueue = backgroundEventQueue; this.rebalanceListenerInvoker = rebalanceListenerInvoker; this.streamsRebalanceListenerInvoker = Optional.empty(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; this.groupMetadata.set(initializeGroupMetadata(groupId, Optional.empty())); this.metadata = metadata; this.retryBackoffMs = retryBackoffMs; this.requestTimeoutMs = requestTimeoutMs; this.defaultApiTimeoutMs = Duration.ofMillis(defaultApiTimeoutMs); this.deserializers = deserializers; this.applicationEventHandler = applicationEventHandler; this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); this.clientTelemetryReporter = Optional.empty(); this.autoCommitEnabled = autoCommitEnabled; this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics ); this.positionsValidator = positionsValidator; } AsyncKafkaConsumer(LogContext logContext, Time time, ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, KafkaClient client, SubscriptionState subscriptions, ConsumerMetadata metadata) { this.log = logContext.logger(getClass()); this.subscriptions = subscriptions; this.clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); this.autoCommitEnabled = config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); this.fetchBuffer = new FetchBuffer(logContext); this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; this.time = time; this.metrics = new Metrics(time); this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); this.metadata = metadata; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.defaultApiTimeoutMs = Duration.ofMillis(config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)); this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.clientTelemetryReporter = Optional.empty(); ConsumerMetrics metricsRegistry = new ConsumerMetrics(); FetchMetricsManager fetchMetricsManager = new FetchMetricsManager(metrics, metricsRegistry.fetcherMetrics); this.fetchCollector = new FetchCollector<>(logContext, metadata, subscriptions, new FetchConfig(config), deserializers, fetchMetricsManager, time); this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, GroupRebalanceConfig.ProtocolType.CONSUMER ); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); BlockingQueue<ApplicationEvent> applicationEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics ); this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, time, new RebalanceCallbackMetricsManager(metrics) ); ApiVersions apiVersions = new ApiVersions(); this.positionsValidator = new PositionsValidator(logContext, time, subscriptions, metadata); Supplier<NetworkClientDelegate> networkClientDelegateSupplier = NetworkClientDelegate.supplier( time, config, logContext, client, metadata, backgroundEventHandler, false, asyncConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); Supplier<RequestManagers> requestManagersSupplier = RequestManagers.supplier( time, logContext, backgroundEventHandler, metadata, subscriptions, fetchBuffer, config, groupRebalanceConfig, apiVersions, fetchMetricsManager, networkClientDelegateSupplier, clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, memberStateListener, Optional.empty(), positionsValidator ); Supplier<ApplicationEventProcessor> applicationEventProcessorSupplier = ApplicationEventProcessor.supplier( logContext, metadata, subscriptions, requestManagersSupplier ); this.applicationEventHandler = new ApplicationEventHandler(logContext, time, config.getInt(CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_CONFIG), applicationEventQueue, new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics); this.streamsRebalanceListenerInvoker = Optional.empty(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); } // auxiliary interface for testing interface ApplicationEventHandlerFactory { ApplicationEventHandler build( final LogContext logContext, final Time time, final int initializationTimeoutMs, final BlockingQueue<ApplicationEvent> applicationEventQueue, final CompletableEventReaper applicationEventReaper, final Supplier<ApplicationEventProcessor> applicationEventProcessorSupplier, final Supplier<NetworkClientDelegate> networkClientDelegateSupplier, final Supplier<RequestManagers> requestManagersSupplier, final AsyncConsumerMetrics asyncConsumerMetrics ); } // auxiliary interface for testing interface CompletableEventReaperFactory { CompletableEventReaper build(final LogContext logContext); } // auxiliary interface for testing interface FetchCollectorFactory<K, V> { FetchCollector<K, V> build( final LogContext logContext, final ConsumerMetadata metadata, final SubscriptionState subscriptions, final FetchConfig fetchConfig, final Deserializers<K, V> deserializers, final FetchMetricsManager metricsManager, final Time time ); } // auxiliary interface for testing interface ConsumerMetadataFactory { ConsumerMetadata build( final ConsumerConfig config, final SubscriptionState subscriptions, final LogContext logContext, final ClusterResourceListeners clusterResourceListeners ); } private Optional<ConsumerGroupMetadata> initializeGroupMetadata(final ConsumerConfig config, final GroupRebalanceConfig groupRebalanceConfig) { final Optional<ConsumerGroupMetadata> groupMetadata = initializeGroupMetadata( groupRebalanceConfig.groupId, groupRebalanceConfig.groupInstanceId ); if (groupMetadata.isEmpty()) { config.ignore(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG); config.ignore(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); } return groupMetadata; } private Optional<ConsumerGroupMetadata> initializeGroupMetadata(final String groupId, final Optional<String> groupInstanceId) { if (groupId != null) { if (groupId.isEmpty()) { throw new InvalidGroupIdException("The configured " + ConsumerConfig.GROUP_ID_CONFIG + " should not be an empty string or whitespace."); } else { return Optional.of(initializeConsumerGroupMetadata(groupId, groupInstanceId)); } } return Optional.empty(); } @SuppressWarnings("removal") private ConsumerGroupMetadata initializeConsumerGroupMetadata(final String groupId, final Optional<String> groupInstanceId) { return new ConsumerGroupMetadata( groupId, JoinGroupRequest.UNKNOWN_GENERATION_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId ); } @SuppressWarnings("removal") private void updateGroupMetadata(final Optional<Integer> memberEpoch, final String memberId) { memberEpoch.ifPresent(epoch -> groupMetadata.updateAndGet( oldGroupMetadataOptional -> oldGroupMetadataOptional.map( oldGroupMetadata -> new ConsumerGroupMetadata( oldGroupMetadata.groupId(), memberEpoch.orElse(oldGroupMetadata.generationId()), memberId, oldGroupMetadata.groupInstanceId() ) ) ) ); } void setGroupAssignmentSnapshot(final Set<TopicPartition> partitions) { groupAssignmentSnapshot.set(Collections.unmodifiableSet(partitions)); } @Override public void registerMetricForSubscription(KafkaMetric metric) { if (!metrics().containsKey(metric.metricName())) { clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); } else { log.debug("Skipping registration for metric {}. Existing consumer metrics cannot be overwritten.", metric.metricName()); } } @Override public void unregisterMetricFromSubscription(KafkaMetric metric) { if (!metrics().containsKey(metric.metricName())) { clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); } else { log.debug("Skipping unregistration for metric {}. Existing consumer metrics cannot be removed.", metric.metricName()); } } /** * poll implementation using {@link ApplicationEventHandler}. * 1. Poll for background events. If there's a fetch response event, process the record and return it. If it is * another type of event, process it. * 2. Send fetches if needed. * If the timeout expires, return an empty ConsumerRecord. * * @param timeout timeout of the poll loop * @return ConsumerRecord. It can be empty if time timeout expires. * * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.RecordTooLargeException if the fetched record is larger than the maximum * allowable size * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws java.lang.IllegalStateException if the consumer is not subscribed to any topics or manually assigned any * partitions to consume from or an unexpected error occurred * @throws org.apache.kafka.clients.consumer.OffsetOutOfRangeException if the fetch position of the consumer is * out of range and no offset reset policy is configured. * @throws org.apache.kafka.common.errors.TopicAuthorizationException if the consumer is not authorized to read * from a partition * @throws org.apache.kafka.common.errors.SerializationException if the fetched records cannot be deserialized * @throws org.apache.kafka.common.errors.UnsupportedAssignorException if the `group.remote.assignor` configuration * is set to an assignor that is not available on the broker. */ @Override public ConsumerRecords<K, V> poll(final Duration timeout) { Timer timer = time.timer(timeout); acquireAndEnsureOpen(); try { kafkaConsumerMetrics.recordPollStart(timer.currentTimeMs()); if (subscriptions.hasNoSubscriptionOrUserAssignment()) { throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); } // This distinguishes the first pass of the inner do/while loop from subsequent passes for the // inflight poll event logic. boolean firstPass = true; do { // We must not allow wake-ups between polling for fetches and returning the records. // If the polled fetches are not empty the consumed position has already been updated in the polling // of the fetches. A wakeup between returned fetches and returning records would lead to never // returning the records in the fetches. Thus, we trigger a possible wake-up before we poll fetches. wakeupTrigger.maybeTriggerWakeup(); checkInflightPoll(timer, firstPass); firstPass = false; final Fetch<K, V> fetch = pollForFetches(timer); if (!fetch.isEmpty()) { // before returning the fetched records, we can send off the next round of fetches // and avoid block waiting for their responses to enable pipelining while the user // is handling the fetched records. // // NOTE: since the consumed position has already been updated, we must not allow // wakeups or any other errors to be triggered prior to returning the fetched records. sendPrefetches(timer); if (fetch.records().isEmpty()) { log.trace("Returning empty records from `poll()` " + "since the consumer's position has advanced for at least one topic partition"); } return interceptors.onConsume(new ConsumerRecords<>(fetch.records(), fetch.nextOffsets())); } // We will wait for retryBackoffMs } while (timer.notExpired()); return ConsumerRecords.empty(); } finally { kafkaConsumerMetrics.recordPollEnd(timer.currentTimeMs()); release(); } } /** * {@code checkInflightPoll()} manages the lifetime of the {@link AsyncPollEvent} processing. If it is * called when no event is currently processing, it will start a new event processing asynchronously. A check * is made during each invocation to see if the <em>inflight</em> event has completed. If it has, it will be * processed accordingly. */ private void checkInflightPoll(Timer timer, boolean firstPass) { if (firstPass && inflightPoll != null) { // Handle the case where there's a remaining inflight poll from the *previous* invocation // of AsyncKafkaConsumer.poll(). maybeClearPreviousInflightPoll(); } boolean newlySubmittedEvent = false; if (inflightPoll == null) { inflightPoll = new AsyncPollEvent(calculateDeadlineMs(timer), time.milliseconds()); newlySubmittedEvent = true; log.trace("Inflight event {} submitted", inflightPoll); applicationEventHandler.add(inflightPoll); } try { // Note: this is calling user-supplied code, so make sure that any errors thrown here are caught and // the inflight event is cleared. offsetCommitCallbackInvoker.executeCallbacks(); processBackgroundEvents(); } catch (Throwable t) { // If an exception was thrown during execution of offset commit callbacks or background events, // bubble it up to the user but make sure to clear out the inflight request because the error effectively // renders it complete. log.trace("Inflight event {} failed due to {}, clearing", inflightPoll, String.valueOf(t)); inflightPoll = null; throw ConsumerUtils.maybeWrapAsKafkaException(t); } finally { timer.update(); } if (inflightPoll != null) { maybeClearCurrentInflightPoll(newlySubmittedEvent); } } private void maybeClearPreviousInflightPoll() { if (inflightPoll.isComplete()) { Optional<KafkaException> errorOpt = inflightPoll.error(); if (errorOpt.isPresent()) { // If the previous inflight event is complete, check if it resulted in an error. If there was // an error, throw it without delay. KafkaException error = errorOpt.get(); log.trace("Previous inflight event {} completed with an error ({}), clearing", inflightPoll, error); inflightPoll = null; throw error; } else { // Successful case... if (fetchBuffer.isEmpty()) { // If it completed without error, but without populating the fetch buffer, clear the event // so that a new event will be enqueued below. log.trace("Previous inflight event {} completed without filling the buffer, clearing", inflightPoll); inflightPoll = null; } else { // However, if the event completed, and it populated the buffer, *don't* create a new event. // This is to prevent an edge case of starvation when poll() is called with a timeout of 0. // If a new event was created on *every* poll, each time the event would have to complete the // validate positions stage before the data in the fetch buffer is used. Because there is // no blocking, and effectively a 0 wait, the data in the fetch buffer is continuously ignored // leading to no data ever being returned from poll(). log.trace("Previous inflight event {} completed and filled the buffer, not clearing", inflightPoll); } } } else if (inflightPoll.isExpired(time) && inflightPoll.isValidatePositionsComplete()) { // The inflight event validated positions, but it has expired. log.trace("Previous inflight event {} expired without completing, clearing", inflightPoll); inflightPoll = null; } } private void maybeClearCurrentInflightPoll(boolean newlySubmittedEvent) { if (inflightPoll.isComplete()) { Optional<KafkaException> errorOpt = inflightPoll.error(); if (errorOpt.isPresent()) { // If the inflight event completed with an error, throw it without delay. KafkaException error = errorOpt.get(); log.trace("Inflight event {} completed with an error ({}), clearing", inflightPoll, error); inflightPoll = null; throw error; } else { log.trace("Inflight event {} completed without error, clearing", inflightPoll); inflightPoll = null; } } else if (!newlySubmittedEvent) { if (inflightPoll.isExpired(time) && inflightPoll.isValidatePositionsComplete()) { // The inflight event validated positions, but it has expired. log.trace("Inflight event {} expired without completing, clearing", inflightPoll); inflightPoll = null; } } } /** * Commit offsets returned on the last {@link #poll(Duration) poll()} for all the subscribed list of topics and * partitions. */ @Override public void commitSync() { commitSync(defaultApiTimeoutMs); } /** * This method sends a commit event to the EventHandler and return. */ @Override public void commitAsync() { commitAsync(null); } @Override public void commitAsync(OffsetCommitCallback callback) { commitAsync(Optional.empty(), callback); } @Override public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { commitAsync(Optional.of(new HashMap<>(offsets)), callback); } private void commitAsync(Optional<Map<TopicPartition, OffsetAndMetadata>> offsets, OffsetCommitCallback callback) { acquireAndEnsureOpen(); try { AsyncCommitEvent asyncCommitEvent = new AsyncCommitEvent(offsets); lastPendingAsyncCommit = commit(asyncCommitEvent).whenComplete((committedOffsets, throwable) -> { if (throwable == null) { offsetCommitCallbackInvoker.enqueueInterceptorInvocation(committedOffsets); } if (callback == null) { if (throwable != null) { log.error("Offset commit with offsets {} failed", committedOffsets, throwable); } return; } offsetCommitCallbackInvoker.enqueueUserCallbackInvocation(callback, committedOffsets, (Exception) throwable); }); } finally { release(); } } private CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> commit(final CommitEvent commitEvent) { throwIfGroupIdNotDefined(); offsetCommitCallbackInvoker.executeCallbacks(); if (commitEvent.offsets().isPresent() && commitEvent.offsets().get().isEmpty()) { return CompletableFuture.completedFuture(null); } applicationEventHandler.add(commitEvent); // This blocks until the background thread retrieves allConsumed positions to commit if none were explicitly specified. // This operation will ensure that the offsets to commit are not affected by fetches which may start after this ConsumerUtils.getResult(commitEvent.offsetsReady(), defaultApiTimeoutMs.toMillis()); return commitEvent.future(); } @Override public void seek(TopicPartition partition, long offset) { if (offset < 0) throw new IllegalArgumentException("seek offset must not be a negative number"); acquireAndEnsureOpen(); try { log.info("Seeking to offset {} for partition {}", offset, partition); SeekUnvalidatedEvent seekUnvalidatedEventEvent = new SeekUnvalidatedEvent( defaultApiTimeoutDeadlineMs(), partition, offset, Optional.empty() ); applicationEventHandler.addAndGet(seekUnvalidatedEventEvent); } finally { release(); } } @Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { long offset = offsetAndMetadata.offset(); if (offset < 0) { throw new IllegalArgumentException("seek offset must not be a negative number"); } acquireAndEnsureOpen(); try { if (offsetAndMetadata.leaderEpoch().isPresent()) { log.info("Seeking to offset {} for partition {} with epoch {}", offset, partition, offsetAndMetadata.leaderEpoch().get()); } else { log.info("Seeking to offset {} for partition {}", offset, partition); } applicationEventHandler.addAndGet(new SeekUnvalidatedEvent( defaultApiTimeoutDeadlineMs(), partition, offsetAndMetadata.offset(), offsetAndMetadata.leaderEpoch() )); } finally { release(); } } @Override public void seekToBeginning(Collection<TopicPartition> partitions) { seek(partitions, AutoOffsetResetStrategy.EARLIEST); } @Override public void seekToEnd(Collection<TopicPartition> partitions) { seek(partitions, AutoOffsetResetStrategy.LATEST); } private void seek(Collection<TopicPartition> partitions, AutoOffsetResetStrategy offsetResetStrategy) { if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); try { applicationEventHandler.addAndGet(new ResetOffsetEvent( partitions, offsetResetStrategy, defaultApiTimeoutDeadlineMs()) ); } finally { release(); } } @Override public long position(TopicPartition partition) { return position(partition, defaultApiTimeoutMs); } @Override public long position(TopicPartition partition, Duration timeout) { acquireAndEnsureOpen(); try { if (!subscriptions.isAssigned(partition)) throw new IllegalStateException("You can only check the position for partitions assigned to this consumer."); Timer timer = time.timer(timeout); do { SubscriptionState.FetchPosition position = subscriptions.validPosition(partition); if (position != null) return position.offset; updateFetchPositions(timer); timer.update(); wakeupTrigger.maybeTriggerWakeup(); } while (timer.notExpired()); throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before the position " + "for partition " + partition + " could be determined"); } finally { release(); } } @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) { return committed(partitions, defaultApiTimeoutMs); } @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions, final Duration timeout) { acquireAndEnsureOpen(); long start = time.nanoseconds(); try { throwIfGroupIdNotDefined(); if (partitions.isEmpty()) { return Collections.emptyMap(); } final FetchCommittedOffsetsEvent event = new FetchCommittedOffsetsEvent( partitions, calculateDeadlineMs(time, timeout)); wakeupTrigger.setActiveTask(event.future()); try { return applicationEventHandler.addAndGet(event); } catch (TimeoutException e) { throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before the last " + "committed offset for partitions " + partitions + " could be determined. Try tuning " + ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG + " larger to relax the threshold."); } finally { wakeupTrigger.clearTask(); } } finally { kafkaConsumerMetrics.recordCommitted(time.nanoseconds() - start); release(); } } private void throwIfGroupIdNotDefined() { if (groupMetadata.get().isEmpty()) { throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " + "provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration."); } } @Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(metrics.metrics()); } @Override public List<PartitionInfo> partitionsFor(String topic) { return partitionsFor(topic, defaultApiTimeoutMs); } @Override public List<PartitionInfo> partitionsFor(String topic, Duration timeout) { acquireAndEnsureOpen(); try { Cluster cluster = this.metadata.fetch(); List<PartitionInfo> parts = cluster.partitionsForTopic(topic); if (!parts.isEmpty()) return parts; if (timeout.toMillis() == 0L) { throw new TimeoutException(); } final TopicMetadataEvent topicMetadataEvent = new TopicMetadataEvent(topic, calculateDeadlineMs(time, timeout)); wakeupTrigger.setActiveTask(topicMetadataEvent.future()); try { Map<String, List<PartitionInfo>> topicMetadata = applicationEventHandler.addAndGet(topicMetadataEvent); return topicMetadata.getOrDefault(topic, Collections.emptyList()); } finally { wakeupTrigger.clearTask(); } } finally { release(); } } @Override public Map<String, List<PartitionInfo>> listTopics() { return listTopics(defaultApiTimeoutMs); } @Override public Map<String, List<PartitionInfo>> listTopics(Duration timeout) { acquireAndEnsureOpen(); try { if (timeout.toMillis() == 0L) { throw new TimeoutException(); } final AllTopicsMetadataEvent topicMetadataEvent = new AllTopicsMetadataEvent(calculateDeadlineMs(time, timeout)); wakeupTrigger.setActiveTask(topicMetadataEvent.future()); try { return applicationEventHandler.addAndGet(topicMetadataEvent); } finally { wakeupTrigger.clearTask(); } } finally { release(); } } @Override public Set<TopicPartition> paused() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(subscriptions.pausedPartitions()); } finally { release(); } } @Override public void pause(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { Objects.requireNonNull(partitions, "The partitions to pause must be nonnull"); if (!partitions.isEmpty()) applicationEventHandler.addAndGet(new PausePartitionsEvent(partitions, defaultApiTimeoutDeadlineMs())); } finally { release(); } } @Override public void resume(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { Objects.requireNonNull(partitions, "The partitions to resume must be nonnull"); if (!partitions.isEmpty()) applicationEventHandler.addAndGet(new ResumePartitionsEvent(partitions, defaultApiTimeoutDeadlineMs())); } finally { release(); } } @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { return offsetsForTimes(timestampsToSearch, defaultApiTimeoutMs); } @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) { acquireAndEnsureOpen(); try { // Keeping same argument validation error thrown by the current consumer implementation // to avoid API level changes. requireNonNull(timestampsToSearch, "Timestamps to search cannot be null"); for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) { // Exclude the earliest and latest offset here so the timestamp in the returned // OffsetAndTimestamp is always positive. if (entry.getValue() < 0) throw new IllegalArgumentException("The target time for partition " + entry.getKey() + " is " + entry.getValue() + ". The target time cannot be negative."); } if (timestampsToSearch.isEmpty()) { return Collections.emptyMap(); } ListOffsetsEvent listOffsetsEvent = new ListOffsetsEvent( timestampsToSearch, calculateDeadlineMs(time, timeout), true); // If timeout is set to zero return empty immediately; otherwise try to get the results // and throw timeout exception if it cannot complete in time. if (timeout.toMillis() == 0L) { applicationEventHandler.add(listOffsetsEvent); return listOffsetsEvent.emptyResults(); } try { Map<TopicPartition, OffsetAndTimestampInternal> offsets = applicationEventHandler.addAndGet(listOffsetsEvent); Map<TopicPartition, OffsetAndTimestamp> results = new HashMap<>(offsets.size()); offsets.forEach((k, v) -> results.put(k, v != null ? v.buildOffsetAndTimestamp() : null)); return results; } catch (TimeoutException e) { throw new TimeoutException("Failed to get offsets by times in " + timeout.toMillis() + "ms"); } } finally { release(); } } @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { return beginningOffsets(partitions, defaultApiTimeoutMs); } @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout) { return beginningOrEndOffset(partitions, ListOffsetsRequest.EARLIEST_TIMESTAMP, timeout); } @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) { return endOffsets(partitions, defaultApiTimeoutMs); } @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Duration timeout) { return beginningOrEndOffset(partitions, ListOffsetsRequest.LATEST_TIMESTAMP, timeout); } private Map<TopicPartition, Long> beginningOrEndOffset(Collection<TopicPartition> partitions, long timestamp, Duration timeout) { acquireAndEnsureOpen(); try { // Keeping same argument validation error thrown by the current consumer implementation // to avoid API level changes. requireNonNull(partitions, "Partitions cannot be null"); if (partitions.isEmpty()) { return Collections.emptyMap(); } Map<TopicPartition, Long> timestampToSearch = partitions .stream() .collect(Collectors.toMap(Function.identity(), tp -> timestamp)); ListOffsetsEvent listOffsetsEvent = new ListOffsetsEvent( timestampToSearch, calculateDeadlineMs(time, timeout), false); // If timeout is set to zero return empty immediately; otherwise try to get the results // and throw timeout exception if it cannot complete in time. if (timeout.isZero()) { applicationEventHandler.add(listOffsetsEvent); // It is used to align with classic consumer. // When the "timeout == 0", the classic consumer will return an empty map. // Therefore, the AsyncKafkaConsumer needs to be consistent with it. return new HashMap<>(); } Map<TopicPartition, OffsetAndTimestampInternal> offsetAndTimestampMap; try { offsetAndTimestampMap = applicationEventHandler.addAndGet(listOffsetsEvent); return offsetAndTimestampMap.entrySet() .stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> entry.getValue().offset())); } catch (TimeoutException e) { throw new TimeoutException("Failed to get offsets by times in " + timeout.toMillis() + "ms"); } } finally { release(); } } @Override public OptionalLong currentLag(TopicPartition topicPartition) { acquireAndEnsureOpen(); try { return applicationEventHandler.addAndGet(new CurrentLagEvent( topicPartition, isolationLevel, defaultApiTimeoutDeadlineMs() )); } finally { release(); } } @Override public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { throwIfGroupIdNotDefined(); return groupMetadata.get().get(); } finally { release(); } } @Override public void enforceRebalance() { log.warn("Operation not supported in new consumer group protocol"); } @Override public void enforceRebalance(String reason) { log.warn("Operation not supported in new consumer group protocol"); } @Override public void close() { close(CloseOptions.timeout(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS))); } @Deprecated @Override public void close(Duration timeout) { close(CloseOptions.timeout(timeout)); } @Override public void close(CloseOptions option) { Duration timeout = option.timeout().orElseGet(() -> Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); if (timeout.toMillis() < 0) throw new IllegalArgumentException("The timeout cannot be negative."); acquire(); try { if (!closed) { // need to close before setting the flag since the close function // itself may trigger rebalance callback that needs the consumer to be open still close(timeout, option.groupMembershipOperation(), false); } } finally { closed = true; release(); } } /** * Please keep these tenets in mind for the implementation of the {@link AsyncKafkaConsumer}’s * {@link #close(Duration)} method. In the future, these tenets may be made officially part of the top-level * {@link KafkaConsumer#close(Duration)} API, but for now they remain here. * * <ol> * <li> * The execution of the {@link ConsumerRebalanceListener} callback (if applicable) must be performed on * the application thread to ensure it does not interfere with the network I/O on the background thread. * </li> * <li> * The {@link ConsumerRebalanceListener} callback execution must complete before an attempt to leave * the consumer group is performed. In this context, “complete” does not necessarily imply * <em>success</em>; execution is “complete” even if the execution <em>fails</em> with an error. * </li> * <li> * Any error thrown during the {@link ConsumerRebalanceListener} callback execution will be caught to * ensure it does not prevent execution of the remaining {@link #close()} logic. * </li> * <li> * The application thread will be blocked during the entire duration of the execution of the * {@link ConsumerRebalanceListener}. The consumer does not employ a mechanism to short-circuit the * callback execution, so execution is not bound by the timeout in {@link #close(Duration)}. * </li> * <li> * A given {@link ConsumerRebalanceListener} implementation may be affected by the application thread's * interrupt state. If the callback implementation performs any blocking operations, it may result in * an error. An implementation may choose to preemptively check the thread's interrupt flag via * {@link Thread#isInterrupted()} or {@link Thread#isInterrupted()} and alter its behavior. * </li> * <li> * If the application thread was interrupted <em>prior</em> to the execution of the * {@link ConsumerRebalanceListener} callback, the thread's interrupt state will be preserved for the * {@link ConsumerRebalanceListener} execution. * </li> * <li> * If the application thread was interrupted <em>prior</em> to the execution of the * {@link ConsumerRebalanceListener} callback <em>but</em> the callback cleared out the interrupt state, * the {@link #close()} method will not make any effort to restore the application thread's interrupt * state for the remainder of the execution of {@link #close()}. * </li> * <li> * Leaving the consumer group is achieved by issuing a ‘leave group‘ network request. The consumer will * attempt to leave the group on a “best-case” basis. There is no stated guarantee that the consumer will * have successfully left the group before the {@link #close()} method completes processing. * </li> * <li> * The consumer will attempt to leave the group regardless of the timeout elapsing or the application * thread receiving an {@link InterruptException} or {@link InterruptedException}. * </li> * <li> * The application thread will wait for confirmation that the consumer left the group until one of the * following occurs: * * <ol> * <li>Confirmation that the ’leave group‘ response was received from the group coordinator</li> * <li>The timeout provided by the user elapses</li> * <li>An {@link InterruptException} or {@link InterruptedException} is thrown</li> * </ol> * </li> * </ol> */ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membershipOperation, boolean swallowException) { log.trace("Closing the Kafka consumer"); AtomicReference<Throwable> firstException = new AtomicReference<>(); // We are already closing with a timeout, don't allow wake-ups from here on. wakeupTrigger.disableWakeups(); final Timer closeTimer = createTimerForCloseRequests(timeout); clientTelemetryReporter.ifPresent(ClientTelemetryReporter::initiateClose); closeTimer.update(); // Prepare shutting down the network thread // Prior to closing the network thread, we need to make sure the following operations happen in the right // sequence... swallow(log, Level.ERROR, "Failed to auto-commit offsets", () -> autoCommitOnClose(closeTimer), firstException); swallow(log, Level.ERROR, "Failed to stop finding coordinator", this::stopFindCoordinatorOnClose, firstException); swallow(log, Level.ERROR, "Failed to run rebalance callbacks", this::runRebalanceCallbacksOnClose, firstException); swallow(log, Level.ERROR, "Failed to leave group while closing consumer", () -> leaveGroupOnClose(closeTimer, membershipOperation), firstException); swallow(log, Level.ERROR, "Failed invoking asynchronous commit callbacks while closing consumer", () -> awaitPendingAsyncCommitsAndExecuteCommitCallbacks(closeTimer, false), firstException); if (applicationEventHandler != null) closeQuietly(() -> applicationEventHandler.close(Duration.ofMillis(closeTimer.remainingMs())), "Failed shutting down network thread", firstException); closeTimer.update(); // close() can be called from inside one of the constructors. In that case, it's possible that neither // the reaper nor the background event queue were constructed, so check them first to avoid NPE. if (backgroundEventReaper != null && backgroundEventQueue != null) backgroundEventReaper.reap(backgroundEventQueue); closeQuietly(interceptors, "consumer interceptors", firstException); closeQuietly(kafkaConsumerMetrics, "kafka consumer metrics", firstException); closeQuietly(asyncConsumerMetrics, "async consumer metrics", firstException); closeQuietly(metrics, "consumer metrics", firstException); closeQuietly(deserializers, "consumer deserializers", firstException); clientTelemetryReporter.ifPresent(reporter -> closeQuietly(reporter, "async consumer telemetry reporter", firstException)); AppInfoParser.unregisterAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics); log.debug("Kafka consumer has been closed"); Throwable exception = firstException.get(); if (exception != null && !swallowException) { if (exception instanceof InterruptException) { throw (InterruptException) exception; } throw new KafkaException("Failed to close kafka consumer", exception); } } private Timer createTimerForCloseRequests(Duration timeout) { // this.time could be null if an exception occurs in constructor prior to setting the this.time field final Time time = (this.time == null) ? Time.SYSTEM : this.time; return time.timer(Math.min(timeout.toMillis(), requestTimeoutMs)); } private void autoCommitOnClose(final Timer timer) { if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; if (autoCommitEnabled) commitSyncAllConsumed(timer); applicationEventHandler.add(new CommitOnCloseEvent()); } private void runRebalanceCallbacksOnClose() { if (groupMetadata.get().isEmpty()) return; int memberEpoch = groupMetadata.get().get().generationId(); Exception error = null; if (streamsRebalanceListenerInvoker != null && streamsRebalanceListenerInvoker.isPresent()) { if (memberEpoch > 0) { error = streamsRebalanceListenerInvoker.get().invokeAllTasksRevoked(); } else { error = streamsRebalanceListenerInvoker.get().invokeAllTasksLost(); } } else if (rebalanceListenerInvoker != null) { Set<TopicPartition> assignedPartitions = groupAssignmentSnapshot.get(); if (assignedPartitions.isEmpty()) // Nothing to revoke. return; SortedSet<TopicPartition> droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); droppedPartitions.addAll(assignedPartitions); if (memberEpoch > 0) { error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); } else { error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); } } if (error != null) throw ConsumerUtils.maybeWrapAsKafkaException(error); } private void leaveGroupOnClose(final Timer timer, final CloseOptions.GroupMembershipOperation membershipOperation) { if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; log.debug("Leaving the consumer group during consumer close"); try { applicationEventHandler.addAndGet(new LeaveGroupOnCloseEvent(calculateDeadlineMs(timer), membershipOperation)); log.info("Completed leaving the group"); } catch (TimeoutException e) { log.warn("Consumer attempted to leave the group but couldn't " + "complete it within {} ms. It will proceed to close.", timer.timeoutMs()); } finally { timer.update(); } } private void stopFindCoordinatorOnClose() { if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; log.debug("Stop finding coordinator during consumer close"); applicationEventHandler.add(new StopFindCoordinatorOnCloseEvent()); } // Visible for testing void commitSyncAllConsumed(final Timer timer) { log.debug("Sending synchronous auto-commit on closing"); try { commitSync(Duration.ofMillis(timer.remainingMs())); } catch (Exception e) { // consistent with async auto-commit failures, we do not propagate the exception log.warn("Synchronous auto-commit failed", e); } timer.update(); } @Override public void wakeup() { wakeupTrigger.wakeup(); } /** * This method sends a commit event to the EventHandler and waits for * the event to finish. * * @param timeout max wait time for the blocking operation. */ @Override public void commitSync(final Duration timeout) { commitSync(Optional.empty(), timeout); } @Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) { commitSync(Optional.of(new HashMap<>(offsets)), defaultApiTimeoutMs); } @Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, Duration timeout) { commitSync(Optional.of(new HashMap<>(offsets)), timeout); } private void commitSync(Optional<Map<TopicPartition, OffsetAndMetadata>> offsets, Duration timeout) { acquireAndEnsureOpen(); long commitStart = time.nanoseconds(); try { SyncCommitEvent syncCommitEvent = new SyncCommitEvent(offsets, calculateDeadlineMs(time, timeout)); CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> commitFuture = commit(syncCommitEvent); Timer requestTimer = time.timer(timeout.toMillis()); awaitPendingAsyncCommitsAndExecuteCommitCallbacks(requestTimer, true); wakeupTrigger.setActiveTask(commitFuture); Map<TopicPartition, OffsetAndMetadata> committedOffsets = ConsumerUtils.getResult(commitFuture, requestTimer); interceptors.onCommit(committedOffsets); } finally { wakeupTrigger.clearTask(); kafkaConsumerMetrics.recordCommitSync(time.nanoseconds() - commitStart); release(); } } private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, boolean enableWakeup) { if (lastPendingAsyncCommit == null || offsetCommitCallbackInvoker == null) { return; } try { final CompletableFuture<Void> futureToAwait = new CompletableFuture<>(); // We don't want the wake-up trigger to complete our pending async commit future, // so create new future here. Any errors in the pending async commit will be handled // by the async commit future / the commit callback - here, we just want to wait for it to complete. lastPendingAsyncCommit.whenComplete((v, t) -> futureToAwait.complete(null)); if (enableWakeup) { wakeupTrigger.setActiveTask(futureToAwait); } ConsumerUtils.getResult(futureToAwait, timer); lastPendingAsyncCommit = null; } finally { if (enableWakeup) { wakeupTrigger.clearTask(); } timer.update(); } offsetCommitCallbackInvoker.executeCallbacks(); } @Override public Uuid clientInstanceId(Duration timeout) { if (clientTelemetryReporter.isEmpty()) { throw new IllegalStateException("Telemetry is not enabled. Set config `" + ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`."); } return ClientTelemetryUtils.fetchClientInstanceId(clientTelemetryReporter.get(), timeout); } @Override public Set<TopicPartition> assignment() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(subscriptions.assignedPartitions()); } finally { release(); } } /** * Get the current subscription. or an empty set if no such call has * been made. * @return The set of topics currently subscribed to */ @Override public Set<String> subscription() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(subscriptions.subscription()); } finally { release(); } } @Override public void assign(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { if (partitions == null) { throw new IllegalArgumentException("Topic partitions collection to assign to cannot be null"); } if (partitions.isEmpty()) { unsubscribe(); return; } for (TopicPartition tp : partitions) { String topic = (tp != null) ? tp.topic() : null; if (isBlank(topic)) throw new IllegalArgumentException("Topic partitions to assign to cannot have null or empty topic"); } // Clear the buffered data which are not a part of newly assigned topics final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (partitions.contains(tp)) currentTopicPartitions.add(tp); } fetchBuffer.retainAll(currentTopicPartitions); // assignment change event will trigger autocommit if it is configured and the group id is specified. This is // to make sure offsets of topic partitions the consumer is unsubscribing from are committed since there will // be no following rebalance. // // See the ApplicationEventProcessor.process() method that handles this event for more detail. applicationEventHandler.addAndGet(new AssignmentChangeEvent( time.milliseconds(), defaultApiTimeoutDeadlineMs(), partitions )); } finally { release(); } } @Override public void unsubscribe() { acquireAndEnsureOpen(); try { fetchBuffer.retainAll(Collections.emptySet()); Timer timer = time.timer(defaultApiTimeoutMs); UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(calculateDeadlineMs(timer)); applicationEventHandler.add(unsubscribeEvent); log.info("Unsubscribing all topics or patterns and assigned partitions {}", subscriptions.assignedPartitions()); try { // If users have fatal error, they will get some exceptions in the background queue. // When running unsubscribe, these exceptions should be ignored, or users can't unsubscribe successfully. processBackgroundEvents(unsubscribeEvent.future(), timer, e -> (e instanceof GroupAuthorizationException || e instanceof TopicAuthorizationException)); log.info("Unsubscribed all topics or patterns and assigned partitions"); } catch (TimeoutException e) { log.error("Failed while waiting for the unsubscribe event to complete"); } resetGroupMetadata(); } catch (Exception e) { log.error("Unsubscribe failed", e); throw e; } finally { release(); } } private void resetGroupMetadata() { groupMetadata.updateAndGet( oldGroupMetadataOptional -> oldGroupMetadataOptional .map(oldGroupMetadata -> initializeConsumerGroupMetadata( oldGroupMetadata.groupId(), oldGroupMetadata.groupInstanceId() )) ); } // Visible for testing WakeupTrigger wakeupTrigger() { return wakeupTrigger; } private Fetch<K, V> pollForFetches(Timer timer) { long pollTimeout = isCommittedOffsetsManagementEnabled() ? Math.min(applicationEventHandler.maximumTimeToWait(), timer.remainingMs()) : timer.remainingMs(); // if data is available already, return it immediately final Fetch<K, V> fetch = collectFetch(); if (!fetch.isEmpty()) { return fetch; } // With the non-blocking poll design, it's possible that at this point the background thread is // concurrently working to update positions. Therefore, a _copy_ of the current assignment is retrieved // and iterated looking for any partitions with invalid positions. This is done to avoid being stuck // in poll for an unnecessarily long amount of time if we are missing some positions since the offset // lookup may be backing off after a failure. if (pollTimeout > retryBackoffMs) { Set<TopicPartition> partitions = subscriptions.assignedPartitions(); if (partitions.isEmpty()) { // If there aren't any assigned partitions, this could mean that this consumer's group membership // has not been established or assignments have been removed and not yet reassigned. In either case, // reduce the poll time for the fetch buffer wait. pollTimeout = retryBackoffMs; } else { for (TopicPartition tp : partitions) { if (!subscriptions.hasValidPosition(tp)) { pollTimeout = retryBackoffMs; break; } } } } log.trace("Polling for fetches with timeout {}", pollTimeout); Timer pollTimer = time.timer(pollTimeout); wakeupTrigger.setFetchAction(fetchBuffer); // Wait a bit for some fetched data to arrive, as there may not be anything immediately available. Note the // use of a shorter, dedicated "pollTimer" here which updates "timer" so that calling method (poll) will // correctly handle the overall timeout. try { fetchBuffer.awaitWakeup(pollTimer); } catch (InterruptException e) { log.trace("Interrupt during fetch", e); throw e; } finally { timer.update(pollTimer.currentTimeMs()); wakeupTrigger.clearTask(); } return collectFetch(); } /** * Perform the "{@link FetchCollector#collectFetch(FetchBuffer) fetch collection}" step by reading raw data out * of the {@link #fetchBuffer}, converting it to a well-formed {@link CompletedFetch}, validating that it and * the internal {@link SubscriptionState state} are correct, and then converting it all into a {@link Fetch} * for returning. */ private Fetch<K, V> collectFetch() { // With the non-blocking async poll, it's critical that the application thread wait until the background // thread has completed the stage of validating positions. This prevents a race condition where both // threads may attempt to update the SubscriptionState.position() for a given partition. So if the background // thread has not completed that stage for the inflight event, don't attempt to collect data from the fetch // buffer. If the inflight event was nulled out by checkInflightPoll(), that implies that it is safe to // attempt to collect data from the fetch buffer. if (positionsValidator.canSkipUpdateFetchPositions()) { return fetchCollector.collectFetch(fetchBuffer); } if (inflightPoll != null && !inflightPoll.isValidatePositionsComplete()) { return Fetch.empty(); } return fetchCollector.collectFetch(fetchBuffer); } /** * Set the fetch position to the committed position (if there is one) * or reset it using the offset reset policy the user has configured. * * @return true iff the operation completed without timing out * @throws AuthenticationException If authentication fails. See the exception for more details * @throws NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is * defined */ private boolean updateFetchPositions(final Timer timer) { try { CheckAndUpdatePositionsEvent checkAndUpdatePositionsEvent = new CheckAndUpdatePositionsEvent(calculateDeadlineMs(timer)); wakeupTrigger.setActiveTask(checkAndUpdatePositionsEvent.future()); applicationEventHandler.addAndGet(checkAndUpdatePositionsEvent); } catch (TimeoutException e) { return false; } finally { wakeupTrigger.clearTask(); } return true; } /** * * Indicates if the consumer is using the Kafka-based offset management strategy, * according to config {@link CommonClientConfigs#GROUP_ID_CONFIG} */ private boolean isCommittedOffsetsManagementEnabled() { return groupMetadata.get().isPresent(); } /** * This method signals the background thread to {@link CreateFetchRequestsEvent create fetch requests} for the * pre-fetch case, i.e. right before {@link #poll(Duration)} exits. In the pre-fetch case, the application thread * will not wait for confirmation of the request creation before continuing. * * <p/> * * At the point this method is called, {@link KafkaConsumer#poll(Duration)} has data ready to return to the user, * which means the consumed position was already updated. In order to prevent potential gaps in records, this * method is designed to suppress all exceptions. * * @param timer Provides an upper bound for the event and its {@link CompletableFuture future} */ private void sendPrefetches(Timer timer) { try { applicationEventHandler.add(new CreateFetchRequestsEvent(calculateDeadlineMs(timer))); } catch (Throwable t) { // Any unexpected errors will be logged for troubleshooting, but not thrown. log.warn("An unexpected error occurred while pre-fetching data in Consumer.poll(), but was suppressed", t); } } @Override public boolean updateAssignmentMetadataIfNeeded(Timer timer) { offsetCommitCallbackInvoker.executeCallbacks(); if (subscriptions.hasPatternSubscription()) { try { applicationEventHandler.addAndGet(new UpdatePatternSubscriptionEvent(calculateDeadlineMs(timer))); } catch (TimeoutException e) { return false; } finally { timer.update(); } } processBackgroundEvents(); return updateFetchPositions(timer); } @Override public void subscribe(Collection<String> topics) { subscribeInternal(topics, Optional.empty()); } @Override public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); subscribeInternal(topics, Optional.of(listener)); } public void subscribe(Collection<String> topics, StreamsRebalanceListener streamsRebalanceListener) { streamsRebalanceListenerInvoker .orElseThrow(() -> new IllegalStateException("Consumer was not created to be used with Streams rebalance protocol events")) .setRebalanceListener(streamsRebalanceListener); subscribeInternal(topics, Optional.empty()); } @Override public void subscribe(Pattern pattern) { subscribeInternal(pattern, Optional.empty()); } @Override public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); subscribeToRegex(pattern, Optional.of(listener)); } @Override public void subscribe(SubscriptionPattern pattern) { subscribeToRegex(pattern, Optional.empty()); } @Override public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); subscribeInternal(pattern, Optional.of(listener)); } /** * Acquire the light lock and ensure that the consumer hasn't been closed. * * @throws IllegalStateException If the consumer has been closed */ private void acquireAndEnsureOpen() { acquire(); if (this.closed) { release(); throw new IllegalStateException("This consumer has already been closed."); } } /** * Acquire the light lock protecting this consumer from multithreaded access. Instead of blocking * when the lock is not available, however, we just throw an exception (since multithreaded usage is not * supported). * * @throws ConcurrentModificationException if another thread already has the lock */ private void acquire() { final Thread thread = Thread.currentThread(); final long threadId = thread.getId(); if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId)) throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access. " + "currentThread(name: " + thread.getName() + ", id: " + threadId + ")" + " otherThread(id: " + currentThread.get() + ")" ); refCount.incrementAndGet(); } /** * Release the light lock protecting the consumer from multithreaded access. */ private void release() { if (refCount.decrementAndGet() == 0) currentThread.set(NO_CURRENT_THREAD); } private void subscribeInternal(Pattern pattern, Optional<ConsumerRebalanceListener> listener) { acquireAndEnsureOpen(); try { throwIfGroupIdNotDefined(); if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); log.info("Subscribed to pattern: '{}'", pattern); applicationEventHandler.addAndGet(new TopicPatternSubscriptionChangeEvent( pattern, listener, defaultApiTimeoutDeadlineMs() )); } finally { release(); } } /** * Subscribe to the RE2/J pattern. This will generate an event to update the pattern in the * subscription state, so it's included in the next heartbeat request sent to the broker. * No validation of the pattern is performed by the client (other than null/empty checks). */ private void subscribeToRegex(SubscriptionPattern pattern, Optional<ConsumerRebalanceListener> listener) { acquireAndEnsureOpen(); try { throwIfGroupIdNotDefined(); throwIfSubscriptionPatternIsInvalid(pattern); log.info("Subscribing to regular expression {}", pattern); applicationEventHandler.addAndGet(new TopicRe2JPatternSubscriptionChangeEvent( pattern, listener, calculateDeadlineMs(time.timer(defaultApiTimeoutMs)))); } finally { release(); } } private void throwIfSubscriptionPatternIsInvalid(SubscriptionPattern subscriptionPattern) { if (subscriptionPattern == null) { throw new IllegalArgumentException("Topic pattern to subscribe to cannot be null"); } if (subscriptionPattern.pattern().isEmpty()) { throw new IllegalArgumentException("Topic pattern to subscribe to cannot be empty"); } } private void subscribeInternal(Collection<String> topics, Optional<ConsumerRebalanceListener> listener) { acquireAndEnsureOpen(); try { throwIfGroupIdNotDefined(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { // treat subscribing to empty topic list as the same as unsubscribing unsubscribe(); } else { for (String topic : topics) { if (isBlank(topic)) throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic"); } // Clear the buffered data which are not a part of newly assigned topics final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (topics.contains(tp.topic())) currentTopicPartitions.add(tp); } fetchBuffer.retainAll(currentTopicPartitions); log.info("Subscribed to topic(s): {}", String.join(", ", topics)); applicationEventHandler.addAndGet(new TopicSubscriptionChangeEvent( new HashSet<>(topics), listener, defaultApiTimeoutDeadlineMs() )); } } finally { release(); } } /** * Process the events-if any-that were produced by the {@link ConsumerNetworkThread network thread}. * It is possible that {@link ErrorEvent an error} * could occur when processing the events. In such cases, the processor will take a reference to the first * error, continue to process the remaining events, and then throw the first error that occurred. * * Visible for testing. */ boolean processBackgroundEvents() { AtomicReference<KafkaException> firstError = new AtomicReference<>(); List<BackgroundEvent> events = backgroundEventHandler.drainEvents(); if (!events.isEmpty()) { long startMs = time.milliseconds(); for (BackgroundEvent event : events) { asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent<?>) event); backgroundEventProcessor.process(event); } catch (Throwable t) { KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); if (!firstError.compareAndSet(null, e)) log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); if (firstError.get() != null) throw firstError.get(); return !events.isEmpty(); } /** * This method can be used by cases where the caller has an event that needs to both block for completion but * also process background events. For some events, in order to fully process the associated logic, the * {@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete. * If the application thread simply blocked on the event after submitting it, the processing would deadlock. * The logic herein is basically a loop that performs two tasks in each iteration: * * <ol> * <li>Process background events, if any</li> * <li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li> * </ol> * * <p/> * * Each iteration gives the application thread an opportunity to process background events, which may be * necessary to complete the overall processing. * * <p/> * * As an example, take {@link #unsubscribe()}. To start unsubscribing, the application thread enqueues an * {@link UnsubscribeEvent} on the application event queue. That event will eventually trigger the * rebalancing logic in the background thread. Critically, as part of this rebalancing work, the * {@link ConsumerRebalanceListener#onPartitionsRevoked(Collection)} callback needs to be invoked for any * partitions the consumer owns. However, * this callback must be executed on the application thread. To achieve this, the background thread enqueues a * {@link ConsumerRebalanceListenerCallbackNeededEvent} on its background event queue. That event queue is * periodically queried by the application thread to see if there's work to be done. When the application thread * sees {@link ConsumerRebalanceListenerCallbackNeededEvent}, it is processed, and then a * {@link ConsumerRebalanceListenerCallbackCompletedEvent} is then enqueued by the application thread on the * application event queue. Moments later, the background thread will see that event, process it, and continue * execution of the rebalancing logic. The rebalancing logic cannot complete until the * {@link ConsumerRebalanceListener} callback is performed. * * @param future Event that contains a {@link CompletableFuture}; it is on this future that the * application thread will wait for completion * @param timer Overall timer that bounds how long to wait for the event to complete * @param ignoreErrorEventException Predicate to ignore background errors. * Any exceptions found while processing background events that match the predicate won't be propagated. * @return {@code true} if the event completed within the timeout, {@code false} otherwise */ // Visible for testing <T> T processBackgroundEvents(Future<T> future, Timer timer, Predicate<Exception> ignoreErrorEventException) { do { boolean hadEvents = false; try { hadEvents = processBackgroundEvents(); } catch (Exception e) { if (!ignoreErrorEventException.test(e)) throw e; } try { if (future.isDone()) { // If the event is done (either successfully or otherwise), go ahead and attempt to return // without waiting. We use the ConsumerUtils.getResult() method here to handle the conversion // of the exception types. return ConsumerUtils.getResult(future); } else if (!hadEvents) { // If the above processing yielded no events, then let's sit tight for a bit to allow the // background thread to either finish the task, or populate the background event // queue with things to process in our next loop. Timer pollInterval = time.timer(100L); return ConsumerUtils.getResult(future, pollInterval); } } catch (TimeoutException swallow) { // Ignore this as we will retry the event until the timeout expires. } finally { timer.update(); } } while (timer.notExpired()); throw new TimeoutException("Operation timed out before completion"); } static ConsumerRebalanceListenerCallbackCompletedEvent invokeRebalanceCallbacks(ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, ConsumerRebalanceListenerMethodName methodName, SortedSet<TopicPartition> partitions, CompletableFuture<Void> future) { Exception e; try { switch (methodName) { case ON_PARTITIONS_REVOKED: e = rebalanceListenerInvoker.invokePartitionsRevoked(partitions); break; case ON_PARTITIONS_ASSIGNED: e = rebalanceListenerInvoker.invokePartitionsAssigned(partitions); break; case ON_PARTITIONS_LOST: e = rebalanceListenerInvoker.invokePartitionsLost(partitions); break; default: throw new IllegalArgumentException("The method " + methodName.fullyQualifiedMethodName() + " to invoke was not expected"); } } catch (WakeupException | InterruptException ex) { e = ex; } final Optional<KafkaException> error; if (e != null) error = Optional.of(ConsumerUtils.maybeWrapAsKafkaException(e, "User rebalance callback throws an error")); else error = Optional.empty(); return new ConsumerRebalanceListenerCallbackCompletedEvent(methodName, future, error); } @Override public String clientId() { return clientId; } @Override public Metrics metricsRegistry() { return metrics; } @Override public KafkaConsumerMetrics kafkaConsumerMetrics() { return kafkaConsumerMetrics; } AsyncConsumerMetrics asyncConsumerMetrics() { return asyncConsumerMetrics; } // Visible for testing SubscriptionState subscriptions() { return subscriptions; } private long defaultApiTimeoutDeadlineMs() { return calculateDeadlineMs(time, defaultApiTimeoutMs); } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
""" Implement the game board This module defines the stone colors `empty`, `black` and `white` """ import numpy as np # the colors empty = 0 black = +1 white = -1 class InvalidMoveError(Exception): pass class Board(object): """ Gomoku game board of the desired size (`height`, `width`). Can access and place stones as ``self[y,x]``, where ``y`` denotes the vertical and ``x`` the horizontal index. Check if attempted moves are valid. Coordinate system: -------------> x | | | | V y """ def __init__(self, height, width): self.height = int(height) self.width = int(width) self.shape = (self.height, self.width) self.board = np.zeros(self.shape, dtype='int8') self.reset() def reset(self): self.board[:] = empty self.moves_left = self.height * self.width self.in_turn = white if hasattr(self, 'lastmove'): del self.lastmove self.log = [] def __getitem__(self, key): return self.board[key] def __setitem__(self, key, value): if value == self.in_turn and self[key] == empty: self.in_turn = - self.in_turn assert self.moves_left > 0 self.moves_left -= 1 self.board[key] = value self.lastmove = key self.log.append(key) else: # invalid move if self[key] != empty: raise InvalidMoveError('Position %s is already taken' % ((key),)) elif self.in_turn == black: raise InvalidMoveError('Black is in turn') elif self.in_turn == white: raise InvalidMoveError('White is in turn') else: raise RuntimeError('FATAL ERROR!') def full(self): "Return bool that indicates if the board has empty fields left" if self.moves_left: return False else: return True get_line_functions_docstring = """ Return an array from the position passed via `x` and `y` of length 5 and a list of the coordinates of that line. .. note:: The returned array is bound to the instance and NOT a copy. :param y, x: The indices of the left hand position to start the line. The direction is defined by the function name. """ def get_column(self, y, x, length=5): __doc__ = self.get_line_functions_docstring line = np.empty(length, dtype='int8') for i in range(length): line[i] = self[y+i,x] return line, [(y+i,x) for i in range(length)] def get_row(self, y, x, length=5): __doc__ = self.get_line_functions_docstring line = np.empty(length, dtype='int8') for i in range(length): line[i] = self[y,x+i] return line, [(y,x+i) for i in range(length)] def get_diagonal_upleft_to_lowright(self, y, x, length=5): __doc__ = self.get_line_functions_docstring line = np.empty(length, dtype='int8') for i in range(length): line[i] = self[y+i,x+i] return line, [(y+i,x+i) for i in range(length)] def get_diagonal_lowleft_to_upright(self, y, x, length=5): __doc__ = self.get_line_functions_docstring line = np.empty(length, dtype='int8') if y < length - 1: raise IndexError for i in range(length): line[i] = self[y-i,x+i] return line, [(y-i,x+i) for i in range(length)] def winner(self): """ Return the winner and the positions of the five in a line or None. .. note:: If there are multiple lines of five, the first line that is found will be designated as winner. Therefore you should check for winner after EVERY move. """ for i in range(self.height): for j in range(self.width): for getter_function in (self.get_row, self.get_column, self.get_diagonal_lowleft_to_upright, self.get_diagonal_upleft_to_lowright): try: line, positions = getter_function(i,j) except IndexError: continue if abs(line.sum()) == 5: return line[0], positions return None, []
unknown
codeparrot/codeparrot-clean
from typing import TYPE_CHECKING, Any from langchain_classic._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import ArangoGraph from langchain_community.graphs.arangodb_graph import get_arangodb_client # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "ArangoGraph": "langchain_community.graphs", "get_arangodb_client": "langchain_community.graphs.arangodb_graph", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "ArangoGraph", "get_arangodb_client", ]
python
github
https://github.com/langchain-ai/langchain
libs/langchain/langchain_classic/graphs/arangodb_graph.py
############################################################################# ## ## Copyright (c) 2014 Riverbank Computing Limited <info@riverbankcomputing.com> ## ## This file is part of PyQt. ## ## This file may be used under the terms of the GNU General Public ## License versions 2.0 or 3.0 as published by the Free Software ## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3 ## included in the packaging of this file. Alternatively you may (at ## your option) use any later version of the GNU General Public ## License if such license has been publicly approved by Riverbank ## Computing Limited (or its successors, if any) and the KDE Free Qt ## Foundation. In addition, as a special exception, Riverbank gives you ## certain additional rights. These rights are described in the Riverbank ## GPL Exception version 1.1, which can be found in the file ## GPL_EXCEPTION.txt in this package. ## ## If you are unsure which license is appropriate for your use, please ## contact the sales department at sales@riverbankcomputing.com. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################# from PyQt4.uic.exceptions import NoSuchWidgetError def invoke(driver): """ Invoke the given command line driver. Return the exit status to be passed back to the parent process. """ exit_status = 1 try: exit_status = driver.invoke() except IOError, e: driver.on_IOError(e) except SyntaxError, e: driver.on_SyntaxError(e) except NoSuchWidgetError, e: driver.on_NoSuchWidgetError(e) except Exception, e: driver.on_Exception(e) return exit_status
unknown
codeparrot/codeparrot-clean
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 12 12" fill="none" {{ $attributes }}> <g clip-path="url(#clip0_14732_6079)"> <path d="M4.25 4.25012V1.25012H10.75V7.75012H7.75M7.75 4.25012H1.25V10.7501H7.75V4.25012Z" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round"/> </g> <defs> <clipPath id="clip0_14732_6079"> <rect width="12" height="12" /> </clipPath> </defs> </svg>
php
github
https://github.com/laravel/framework
src/Illuminate/Foundation/resources/exceptions/renderer/components/icons/copy.blade.php
#!/usr/bin/env python import argparse import json import logging import os import eutils logging.basicConfig(level=logging.INFO) if __name__ == '__main__': parser = argparse.ArgumentParser(description='EFetch', epilog='') parser.add_argument('db', help='Database to use, sometimes "none" (e.g. *check)') parser.add_argument('dbfrom', help='Database containing input UIDs') parser.add_argument('cmd', choices=['neighbor', 'neighbor_score', 'neighbor_history', 'acheck', 'ncheck', 'lcheck', 'llinks', 'llinkslib', 'prlinks'], help='ELink command mode') parser.add_argument('--version', action='version', version=eutils.Client.getVersion(), help='Version (reports Biopython version)') parser.add_argument('--user_email', help="User email") parser.add_argument('--admin_email', help="Admin email") # ID Sources parser.add_argument('--id_xml', help='list of ids in an xml file as returned by esearch or elink') parser.add_argument('--id_json', help='list of ids in a json file as returned by esearch or elink') parser.add_argument('--id_list', help='list of ids') parser.add_argument('--id', help='Comma separated individual IDs') parser.add_argument('--history_file', help='Fetch results from previous query') parser.add_argument('--history_xml', help='Fetch results from previous query') # Optional parser.add_argument('--linkname', help='Restrict results to a specific link source') parser.add_argument('--retmode', choices=['xml', 'json', 'uilist'], help='Output format') # TODO: dates, linkname, term, holding # neighbor or neighbor_history and dbfrom is pubmed # parser.add_argument('--datetype', help='Date type') # parser.add_argument('--reldate', help='In past N days') # parser.add_argument('--mindate', help='Minimum date') # parser.add_argument('--maxdate', help='maximum date') # Output args = parser.parse_args() c = eutils.Client(history_file=args.history_file, user_email=args.user_email, admin_email=args.admin_email) payload = { 'dbfrom': args.dbfrom, 'cmd': args.cmd, } # DB can be 'none' in a few cases. if args.db != "none": payload['db'] = args.db if args.linkname is not None: payload['linkname'] = args.linkname results = [] qkeys = [] if args.history_file is not None or args.history_xml is not None: payload['retmode'] = args.retmode if args.history_file is not None: input_histories = c.get_histories() else: input_histories = c.extract_histories_from_xml_file(args.history_xml) for hist in input_histories: qkeys += [hist['query_key']] tmp_payload = payload tmp_payload.update(hist) results += [c.link(**tmp_payload)] else: # There is no uilist retmode if args.retmode == "uilist": payload['retmode'] = 'xml' else: payload['retmode'] = args.retmode merged_ids = c.parse_ids(args.id_list, args.id, args.history_file, args.id_xml, args.id_json) payload['id'] = ','.join(merged_ids) qkeys += [1] results += [c.link(**payload)] # There could be multiple sets of results if a history was supplied if args.history_file is not None or args.history_xml is not None: # Multiple result sets can be returned # Create a directory for the output files current_directory = os.getcwd() final_directory = os.path.join(current_directory, r'downloads') if not os.path.exists(final_directory): os.makedirs(final_directory) logging.info("Writing files:") # When rettype is uilist, convert to text format (which elink does not do) count = 0 if args.retmode == 'uilist': for result in results: qkey = qkeys[count] count += 1 ids = c.xmlstring2UIlist(result) file_path = os.path.join('downloads', '%s-querykey%s.tabular' % (args.db, qkey)) logging.info('%s.tabular' % (args.db)) with open(file_path, 'w') as handle: for id in ids: handle.write(id) handle.write(os.linesep) elif args.retmode == 'json': for result in results: qkey = qkeys[count] count += 1 file_path = os.path.join('downloads', '%s-querykey%s.json' % (args.db, qkey)) logging.info('%s-link%s.json' % (args.db, count)) with open(file_path, 'w') as handle: json_data = c.jsonstring2jsondata(result) handle.write(json.dumps(json_data, indent=4)) else: for result in results: qkey = qkeys[count] count += 1 file_path = os.path.join('downloads', '%s-querykey%s.xml' % (args.db, qkey)) logging.info('%s-link%s.xml' % (args.db, count)) with open(file_path, 'w') as handle: handle.write(result) else: # When rettype is uilist, convert to text format (which elink does not do) if args.retmode == 'uilist': ids = c.xmlstring2UIlist(results[0]) for id in ids: print(id) elif args.retmode == 'json': json_data = c.jsonstring2jsondata(results[0]) print(json.dumps(json_data, indent=4)) else: print(results[0])
unknown
codeparrot/codeparrot-clean
__author__ = 'binary' #coding=utf-8 # Note: # All code below needs improvement! import sys import shutil import UYRUtils try: import stepic except ImportError: print "Hide Module: Sorry module python-stepic not found" try: from PIL import Image except ImportError: print "Hide Module: Sorry module python-pil not found" try: import cv2 except ImportError: print "Hide Module: Sorry cv2 module not found" try: import cv2.cv as cv except ImportError: print "Hide Module: Sorry module cv2.cv not found" positionList = [] ########################################################################################### # Function to search for char in frame ########################################################################################### def foundChar(msgC, Fnum): k = 0 imageFile = "tmp/frame%d.jpg" % Fnum fh = UYRUtils.hOpenImgBinary(imageFile) data = UYRUtils.readImgContent(fh) Num = ord(msgC) hexNum = hex(Num)[2:] for ch in data: # make a hex byte byt = "%02X" % ord(ch) if ord(ch) == Num: loc = k positionList.append((Fnum + k, ",")) # should remove byte return True k = k+1 positionList.append((-1, ",")) # if the frame doesn't contain the identical value, add -1 to skip it #print positionList return False ####################################################################################################### # Message Hiding Function # ###################################################################################################### def hideMessage(secretMessage, path2Video, path2Image, msgLength=0): # Read Video and check if video frame number is enough enoughSize = True while enoughSize: # extracting frames vidcap = cv2.VideoCapture(path2Video) #print ("success") frameNumbers = vidcap.get(cv.CV_CAP_PROP_FRAME_COUNT) #print ( frameNumbers ) fps = vidcap.get(cv.CV_CAP_PROP_FPS) #print ( fps ) videoLength = frameNumbers / fps # seconds #print ( videoLength ) count = 0 if videoLength < (msgLength / (fps*2)): print ("Sorry!!! not enough space to embed the message, try another video (more time)") n= raw_input('press n to try another video or any other key to exit:') if n != "n": sys.exit() else: # extracting frames enoughSize = False success, image = vidcap.read() while success: success, image = vidcap.read() cv2.imwrite("tmp/frame%d.jpg" % count, image) # save frame as JPEG file count += 1 #split msg char, search and create positions for chars msgList = list(secretMessage) c = 0 found = False #infinite loop here for msgChar in msgList: while not(found): found = foundChar(msgChar, c) c = (c+1) % count found = False # Write positions list to a text file tmpFile = open("tmp/tmpFile.txt", "w") tmpFile.write('\n'.join('%s %s' % x for x in positionList)) tmpFile.close() # read from file and save in string dataEm ="" for line in open("tmp/tmpFile.txt"): words = line.strip().split(',') dataEm = dataEm + " " + words[0] dataEm = dataEm + "\n" pathList = path2Image.split("/") imgNameJPG = pathList[-1] imgName = imgNameJPG.split(".") #print "this is the data to be hidden: \n", dataEm carrier = Image.open(path2Image) stegoIM2 = stepic.encode(carrier, dataEm) #stegoIM2.save('./charmap.png', 'PNG') imgNamePNG = imgName[0]+".png" stegoIM2.save(imgNamePNG, 'PNG') #print "[+] {0:<50} [+]".format("./charmap.png is the character map (from fun)") return imgNamePNG ###################################################################################################### # Message Extracting Function ###################################################################################################### def extractMSG(path2Image, path2Video): # Extract from picture im = Image.open(path2Image) s = stepic.decode(im) tmpFile = open("tmp/PositionsList.txt", "w") tmpFile.write(s) tmpFile.close() #Read Video file frames absPath = path2Video vidcap = cv2.VideoCapture(absPath) count = 0 success, image = vidcap.read() while success: success, image = vidcap.read() cv2.imwrite("tmp/frame%d.jpg" % count, image) # save frame as JPEG file count += 1 # extract message from frames fNum = 0 secMSG = "" for line in open("tmp/PositionsList.txt"): word = int(line.strip()) #print "The word is now: ", word if word != -1: cPos = word - fNum imgName = "tmp/frame%d.jpg" % fNum try: imageFile = imgName data = open(imageFile, "rb").read() except IOError: print "Image file %s not found" % imageFile raise SystemExit msgOrd = data[cPos] secMSG = secMSG + msgOrd fNum = (fNum + 1) % count #shutil.rmtree('tmp') return secMSG
unknown
codeparrot/codeparrot-clean
- Path: /name/add - Path Regex: #PATH_REGEX# - Host: localhost - Host Regex: #HOST_REGEX# - Scheme: http|https - Method: PUT|POST - Class: Symfony\Bundle\FrameworkBundle\Tests\Console\Descriptor\RouteStub - Defaults: NONE - Requirements: NO CUSTOM - Options: - `compiler_class`: Symfony\Component\Routing\RouteCompiler - `opt1`: val1 - `opt2`: val2 - Condition: context.getMethod() in ['GET', 'HEAD', 'POST']
unknown
github
https://github.com/symfony/symfony
src/Symfony/Bundle/FrameworkBundle/Tests/Fixtures/Descriptor/route_2.md
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) %YAML 1.2 --- $id: http://devicetree.org/schemas/input/ti,nspire-keypad.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: TI-NSPIRE Keypad maintainers: - Andrew Davis <afd@ti.com> allOf: - $ref: input.yaml# - $ref: matrix-keymap.yaml# properties: compatible: enum: - ti,nspire-keypad reg: maxItems: 1 interrupts: maxItems: 1 clocks: maxItems: 1 scan-interval: $ref: /schemas/types.yaml#/definitions/uint32 description: How often to scan in us. Based on a APB speed of 33MHz, the maximum and minimum delay time is ~2000us and ~500us respectively row-delay: $ref: /schemas/types.yaml#/definitions/uint32 description: How long to wait between scanning each row in us. active-low: description: Specify that the keypad is active low. required: - compatible - reg - interrupts - clocks - scan-interval - row-delay - linux,keymap unevaluatedProperties: false examples: - | #include <dt-bindings/input/input.h> keypad@900e0000 { compatible = "ti,nspire-keypad"; reg = <0x900e0000 0x1000>; interrupts = <16>; clocks = <&apb_pclk>; scan-interval = <1000>; row-delay = <200>; linux,keymap = < MATRIX_KEY(0, 0, KEY_ENTER) MATRIX_KEY(0, 1, KEY_ENTER) MATRIX_KEY(0, 4, KEY_SPACE) MATRIX_KEY(0, 5, KEY_Z) MATRIX_KEY(0, 6, KEY_Y) MATRIX_KEY(0, 7, KEY_0) >; };
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/input/ti,nspire-keypad.yaml
import os import sys import csv import json import pprint from collections import OrderedDict from pymongo import MongoClient # Write to MongoDB # atomizer = MongoAtomizer('knoedler') # atomizer.make_mongo_writer() # atomizer.atomize('data/input.csv') class MongoAtomizer: def __init__(self, collection, database='dev_database', address='mongodb://localhost:27017/'): self.collection = collection self.database = database self.address = address self.writer = self.make_mongo_writer() def make_mongo_writer(self): client = MongoClient(self.address) db = client[self.database] db_collection = db[self.collection] def mongo_writer(doc, doc_id): # Does not insert duplicate doc IDs if not db_collection.find_one({'_id': doc_id}): doc['_id'] = doc_id db_collection.insert_one(doc) return mongo_writer def atomize(self, input_path, writer=None): if not writer: writer = self.writer if not os.path.exists(input_path): raise FileNotFoundError(input_path) with open(input_path, encoding='utf-8') as csvfile: csvreader = csv.DictReader(csvfile) # generate standarized keys for each csv field field_keys = OrderedDict() for field in csvreader.fieldnames: field_keys[field] = field.lower().replace(' ','_') # turn csv rows into documents pp = pprint.PrettyPrinter(indent=4) for row in csvreader: doc = OrderedDict() doc['flags'] = {} doc['log'] = [] doc['derived'] = OrderedDict() doc['source'] = OrderedDict() for field, key in field_keys.items(): doc['source'][key] = row[field] writer(doc, doc['source']['star_record_no']) # REPLACE WITH VARIABLE pp.pprint(doc)
unknown
codeparrot/codeparrot-clean
############################################################################# ## ## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies). ## Contact: http://www.qt-project.org/legal ## ## This file is part of Qt Creator. ## ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Digia. For licensing terms and ## conditions see http://www.qt.io/licensing. For further information ## use the contact form at http://www.qt.io/contact-us. ## ## GNU Lesser General Public License Usage ## Alternatively, this file may be used under the terms of the GNU Lesser ## General Public License version 2.1 or version 3 as published by the Free ## Software Foundation and appearing in the file LICENSE.LGPLv21 and ## LICENSE.LGPLv3 included in the packaging of this file. Please review the ## following information to ensure the GNU Lesser General Public License ## requirements will be met: https://www.gnu.org/licenses/lgpl.html and # http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ## ## In addition, as a special exception, Digia gives you certain additional ## rights. These rights are described in the Digia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ############################################################################# source("../../shared/qtcreator.py") def ensureSaveBeforeBuildChecked(shouldBeChecked): invokeMenuItem("Tools", "Options...") waitForObjectItem(":Options_QListView", "Build & Run") clickItem(":Options_QListView", "Build & Run", 14, 15, 0, Qt.LeftButton) clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "General") if test.compare(waitForObject(":Build and Run.Save all files before build_QCheckBox").checked, shouldBeChecked, "'Save all files before build' should be %s" % str(shouldBeChecked)): clickButton(waitForObject(":Options.Cancel_QPushButton")) else: ensureChecked(":Build and Run.Save all files before build_QCheckBox", shouldBeChecked) clickButton(waitForObject(":Options.OK_QPushButton")) def main(): startApplication("qtcreator" + SettingsPath) if not startedWithoutPluginError(): return ensureSaveBeforeBuildChecked(False) # create qt quick application createNewQtQuickApplication(tempDir(), "SampleApp", qtQuickVersion="2.2") for expectDialog in [True, False]: files = ["SampleApp.SampleApp\\.pro", "SampleApp.deployment.deployment\\.pri", "SampleApp.Sources.main\\.cpp", "SampleApp.Resources.qml\.qrc./.main\\.qml"] for i, file in enumerate(files): if not openDocument(file): test.fatal("Could not open file '%s'" % simpleFileName(file)) continue test.log("Changing file '%s'" % simpleFileName(file)) typeLines(getEditorForFileSuffix(file, True), "") # try to compile clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton")) try: ensureChecked(":Save Changes.Always save files before build_QCheckBox", i == len(files) - 1, 5000) # At the last iteration, check the box clickButton(waitForObject(":Save Changes.Save All_QPushButton")) test.verify(expectDialog, "The 'Save Changes' dialog was shown.") except: test.verify(not expectDialog, "The 'Save Changes' dialog was not shown.") waitForCompile() ensureSaveBeforeBuildChecked(True) invokeMenuItem("File", "Exit")
unknown
codeparrot/codeparrot-clean
import { test } from '../../test'; export default test({ test({ component }) { component.flag = false; } });
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/tests/runtime-legacy/samples/await-in-dynamic-component/_config.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; /** * Utility methods for strategies which use reflection to access methods without requiring them at compile-time. */ class ReflectiveStrategy { static Object invoke(Method method, Object obj, Object... args) { try { return method.invoke(obj, args); } catch (IllegalAccessException e) { throw new UnsupportedOperationException(e); } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else { throw new RuntimeException(cause); } } } static <T extends Exception> Object invokeChecked(Method method, Class<T> ex, Object obj, Object... args) throws T { try { return method.invoke(obj, args); } catch (IllegalAccessException e) { throw new UnsupportedOperationException(e); } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (ex.isInstance(cause)) { throw ex.cast(cause); } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else { throw new RuntimeException(cause); } } } /** * Interface to allow mocking out classloading infrastructure. This is used to test reflective operations. */ interface Loader { Class<?> loadClass(String className) throws ClassNotFoundException; static Loader forName() { return className -> Class.forName(className, true, Loader.class.getClassLoader()); } } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/internals/ReflectiveStrategy.java
# -*- coding: utf-8 -*- ########################################################################## # # # Eddy: a graphical editor for the specification of Graphol ontologies # # Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # # ##################### ##################### # # # # Graphol is developed by members of the DASI-lab group of the # # Dipartimento di Ingegneria Informatica, Automatica e Gestionale # # A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it # # # # - Domenico Lembo <lembo@dis.uniroma1.it> # # - Valerio Santarelli <santarelli@dis.uniroma1.it> # # - Domenico Fabio Savo <savo@dis.uniroma1.it> # # - Daniele Pantaleone <pantaleone@dis.uniroma1.it> # # - Marco Console <console@dis.uniroma1.it> # # # ##########################################################################
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from __future__ import (unicode_literals, division, absolute_import, print_function) store_version = 1 # Needed for dynamic plugin loading __license__ = 'GPL 3' __copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>' __docformat__ = 'restructuredtext en' import random import re import urllib2 from contextlib import closing from lxml import etree from PyQt4.Qt import QUrl from calibre import browser, url_slash_cleaner, prints from calibre.ebooks.chardet import xml_to_unicode from calibre.gui2 import open_url from calibre.gui2.store import StorePlugin from calibre.gui2.store.basic_config import BasicStoreConfig from calibre.gui2.store.search_result import SearchResult from calibre.gui2.store.web_store_dialog import WebStoreDialog class LitResStore(BasicStoreConfig, StorePlugin): shop_url = u'http://www.litres.ru' #http://robot.litres.ru/pages/biblio_book/?art=174405 def open(self, parent=None, detail_item=None, external=False): aff_id = u'?' + _get_affiliate_id() url = self.shop_url + aff_id detail_url = None if detail_item: # http://www.litres.ru/pages/biblio_book/?art=157074 detail_url = self.shop_url + u'/pages/biblio_book/' + aff_id +\ u'&art=' + urllib2.quote(detail_item) if external or self.config.get('open_external', False): open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url))) else: d = WebStoreDialog(self.gui, url, parent, detail_url) d.setWindowTitle(self.name) d.set_tags(self.config.get('tags', '')) d.exec_() def search(self, query, max_results=10, timeout=60): search_url = u'http://robot.litres.ru/pages/catalit_browser/?checkpoint=2000-01-02&'\ 'search=%s&limit=0,%s' search_url = search_url % (urllib2.quote(query), max_results) counter = max_results br = browser() br.addheaders.append( ['Accept-Encoding','gzip'] ) with closing(br.open(search_url, timeout=timeout)) as r: ungzipResponse(r,br) raw= xml_to_unicode(r.read(), strip_encoding_pats=True, assume_utf8=True)[0] parser = etree.XMLParser(recover=True, no_network=True) doc = etree.fromstring(raw, parser=parser) for data in doc.xpath('//*[local-name() = "fb2-book"]'): if counter <= 0: break counter -= 1 try: sRes = self.create_search_result(data) except Exception as e: prints('ERROR: cannot parse search result #%s: %s'%(max_results - counter + 1, e)) continue yield sRes def get_details(self, search_result, timeout=60): pass def create_search_result(self, data): xp_template = 'normalize-space(@{0})' sRes = SearchResult() sRes.drm = SearchResult.DRM_UNLOCKED sRes.detail_item = data.xpath(xp_template.format('hub_id')) sRes.title = data.xpath('string(.//title-info/book-title/text()|.//publish-info/book-name/text())') #aut = concat('.//title-info/author/first-name', ' ') authors = data.xpath('.//title-info/author/first-name/text()|'\ './/title-info/author/middle-name/text()|'\ './/title-info/author/last-name/text()') sRes.author = u' '.join(map(unicode, authors)) sRes.price = data.xpath(xp_template.format('price')) # cover vs cover_preview sRes.cover_url = data.xpath(xp_template.format('cover_preview')) sRes.price = format_price_in_RUR(sRes.price) types = data.xpath('//fb2-book//files/file/@type') fmt_set = _parse_ebook_formats(' '.join(types)) sRes.formats = ', '.join(fmt_set) return sRes def format_price_in_RUR(price): ''' Try to format price according ru locale: '12 212,34 руб.' @param price: price in format like 25.99 @return: formatted price if possible otherwise original value @rtype: unicode ''' if price and re.match("^\d*?\.\d*?$", price): try: price = u'{:,.2F} руб.'.format(float(price)) price = price.replace(',', ' ').replace('.', ',', 1) except: pass return price def ungzipResponse(r,b): headers = r.info() if headers['Content-Encoding']=='gzip': import gzip gz = gzip.GzipFile(fileobj=r, mode='rb') data = gz.read() gz.close() #headers["Content-type"] = "text/html; charset=utf-8" r.set_data( data ) b.set_response(r) def _get_affiliate_id(): aff_id = u'3623565' # Use Kovid's affiliate id 30% of the time. if random.randint(1, 10) in (1, 2, 3): aff_id = u'4084465' return u'lfrom=' + aff_id def _parse_ebook_formats(formatsStr): ''' Creates a set with displayable names of the formats :param formatsStr: string with comma separated book formats as it provided by ozon.ru :return: a list with displayable book formats ''' formatsUnstruct = formatsStr.lower() formats = set() if 'fb2' in formatsUnstruct: formats.add('FB2') if 'html' in formatsUnstruct: formats.add('HTML') if 'txt' in formatsUnstruct: formats.add('TXT') if 'rtf' in formatsUnstruct: formats.add('RTF') if 'pdf' in formatsUnstruct: formats.add('PDF') if 'prc' in formatsUnstruct: formats.add('PRC') if 'lit' in formatsUnstruct: formats.add('PRC') if 'epub' in formatsUnstruct: formats.add('ePub') if 'rb' in formatsUnstruct: formats.add('RB') if 'isilo3' in formatsUnstruct: formats.add('ISILO3') if 'lrf' in formatsUnstruct: formats.add('LRF') if 'jar' in formatsUnstruct: formats.add('JAR') return formats
unknown
codeparrot/codeparrot-clean
package daemon import ( "context" "errors" "fmt" "maps" "runtime" "time" containertypes "github.com/moby/moby/api/types/container" networktypes "github.com/moby/moby/api/types/network" "github.com/moby/moby/api/types/storage" "github.com/moby/moby/v2/daemon/config" "github.com/moby/moby/v2/daemon/container" "github.com/moby/moby/v2/daemon/server/backend" "github.com/moby/moby/v2/errdefs" ) // ContainerInspect returns low-level information about a // container. Returns an error if the container cannot be found, or if // there is an error getting the data. func (daemon *Daemon) ContainerInspect(ctx context.Context, name string, options backend.ContainerInspectOptions) (_ *containertypes.InspectResponse, desiredMACAddress networktypes.HardwareAddr, _ error) { ctr, err := daemon.GetContainer(name) if err != nil { return nil, nil, err } ctr.Lock() base, desiredMACAddress, err := daemon.getInspectData(&daemon.config().Config, ctr) if err != nil { ctr.Unlock() return nil, nil, err } // TODO(thaJeztah): do we need a deep copy here? Otherwise we could use maps.Clone (see https://github.com/moby/moby/commit/7917a36cc787ada58987320e67cc6d96858f3b55) ports := make(networktypes.PortMap, len(ctr.NetworkSettings.Ports)) maps.Copy(ports, ctr.NetworkSettings.Ports) apiNetworks := make(map[string]*networktypes.EndpointSettings) for nwName, epConf := range ctr.NetworkSettings.Networks { if epConf.EndpointSettings != nil { // We must make a copy of this pointer object otherwise it can race with other operations apiNetworks[nwName] = epConf.EndpointSettings.Copy() } } networkSettings := &containertypes.NetworkSettings{ SandboxID: ctr.NetworkSettings.SandboxID, SandboxKey: ctr.NetworkSettings.SandboxKey, Ports: ports, Networks: apiNetworks, } mountPoints := ctr.GetMountPoints() // Don’t hold container lock for size calculation (see https://github.com/moby/moby/issues/31158) ctr.Unlock() if options.Size { sizeRw, sizeRootFs, err := daemon.imageService.GetContainerLayerSize(ctx, base.ID) if err != nil { return nil, nil, err } base.SizeRw = &sizeRw base.SizeRootFs = &sizeRootFs } imageManifest := ctr.ImageManifest if imageManifest != nil && imageManifest.Platform == nil { // Copy the image manifest to avoid mutating the original c := *imageManifest imageManifest = &c imageManifest.Platform = &ctr.ImagePlatform } base.Mounts = mountPoints base.NetworkSettings = networkSettings base.ImageManifestDescriptor = imageManifest return base, desiredMACAddress, nil } func (daemon *Daemon) getInspectData(daemonCfg *config.Config, ctr *container.Container) (_ *containertypes.InspectResponse, desiredMACAddress networktypes.HardwareAddr, _ error) { // make a copy to play with hostConfig := *ctr.HostConfig // Add information for legacy links children := daemon.linkIndex.children(ctr) hostConfig.Links = nil // do not expose the internal structure for linkAlias, child := range children { hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } // We merge the Ulimits from hostConfig with daemon default daemon.mergeUlimits(&hostConfig, daemonCfg) // Migrate the container's default network's MacAddress to the top-level // Config.MacAddress field for older API versions (< 1.44). We set it here // unconditionally, to keep backward compatibility with clients that use // unversioned API endpoints. var macAddress networktypes.HardwareAddr if ctr.Config != nil { if nwm := hostConfig.NetworkMode; nwm.IsBridge() || nwm.IsUserDefined() { if epConf, ok := ctr.NetworkSettings.Networks[nwm.NetworkName()]; ok { macAddress = epConf.DesiredMacAddress } } } var containerHealth *containertypes.Health if ctr.State.Health != nil { containerHealth = &containertypes.Health{ Status: ctr.State.Health.Status(), FailingStreak: ctr.State.Health.Health.FailingStreak, Log: append([]*containertypes.HealthcheckResult{}, ctr.State.Health.Health.Log...), } } inspectResponse := &containertypes.InspectResponse{ ID: ctr.ID, Created: ctr.Created.Format(time.RFC3339Nano), Path: ctr.Path, Args: ctr.Args, State: &containertypes.State{ Status: ctr.State.State(), Running: ctr.State.Running, Paused: ctr.State.Paused, Restarting: ctr.State.Restarting, OOMKilled: ctr.State.OOMKilled, Dead: ctr.State.Dead, Pid: ctr.State.Pid, ExitCode: ctr.State.ExitCode, Error: ctr.State.ErrorMsg, StartedAt: ctr.State.StartedAt.Format(time.RFC3339Nano), FinishedAt: ctr.State.FinishedAt.Format(time.RFC3339Nano), Health: containerHealth, }, Image: ctr.ImageID.String(), LogPath: ctr.LogPath, Name: ctr.Name, RestartCount: ctr.RestartCount, Driver: ctr.Driver, Platform: ctr.ImagePlatform.OS, MountLabel: ctr.MountLabel, ProcessLabel: ctr.ProcessLabel, ExecIDs: ctr.GetExecIDs(), HostConfig: &hostConfig, Config: ctr.Config, } // Now set any platform-specific fields inspectResponse = setPlatformSpecificContainerFields(ctr, inspectResponse) if daemon.UsesSnapshotter() { inspectResponse.Storage = &storage.Storage{ RootFS: &storage.RootFSStorage{ Snapshot: &storage.RootFSStorageSnapshot{ Name: ctr.Driver, }, }, } // Additional information only applies to graphDrivers, so we're done. return inspectResponse, macAddress, nil } inspectResponse.GraphDriver = &storage.DriverData{ Name: ctr.Driver, } if ctr.RWLayer == nil { if ctr.State.Dead { return inspectResponse, macAddress, nil } return nil, nil, errdefs.System(errors.New("RWLayer of container " + ctr.ID + " is unexpectedly nil")) } graphDriverData, err := ctr.RWLayer.Metadata() if err != nil { if ctr.State.Dead { // container is marked as Dead, and its graphDriver metadata may // have been removed; we can ignore errors. return inspectResponse, macAddress, nil } return nil, nil, errdefs.System(err) } inspectResponse.GraphDriver.Data = graphDriverData return inspectResponse, macAddress, nil } // ContainerExecInspect returns low-level information about the exec // command. An error is returned if the exec cannot be found. func (daemon *Daemon) ContainerExecInspect(id string) (*containertypes.ExecInspectResponse, error) { e := daemon.execCommands.Get(id) if e == nil { return nil, errExecNotFound(id) } if ctr := daemon.containers.Get(e.Container.ID); ctr == nil { return nil, errExecNotFound(id) } e.Lock() defer e.Unlock() var pid int if e.Process != nil { pid = int(e.Process.Pid()) } var privileged *bool if runtime.GOOS != "windows" || e.Privileged { // Privileged is not used on Windows, so should always be false // (and omitted in the response), but set it if it happened to // be true. On non-Windows, we always set it, and the field should // not be omitted. privileged = &e.Privileged } return &containertypes.ExecInspectResponse{ ID: e.ID, Running: e.Running, ExitCode: e.ExitCode, ProcessConfig: &containertypes.ExecProcessConfig{ Tty: e.Tty, Entrypoint: e.Entrypoint, Arguments: e.Args, Privileged: privileged, // Privileged is not used on Windows User: e.User, // User is not used on Windows }, OpenStdin: e.OpenStdin, OpenStdout: e.OpenStdout, OpenStderr: e.OpenStderr, CanRemove: e.CanRemove, ContainerID: e.Container.ID, DetachKeys: e.DetachKeys, Pid: pid, }, nil }
go
github
https://github.com/moby/moby
daemon/inspect.go
services: next-app: container_name: next-app build: context: ./next-app dockerfile: prod-without-multistage.Dockerfile args: ENV_VARIABLE: ${ENV_VARIABLE} NEXT_PUBLIC_ENV_VARIABLE: ${NEXT_PUBLIC_ENV_VARIABLE} restart: always ports: - 3000:3000 networks: - my_network # Add more containers below (nginx, postgres, etc.) # Define a network, which allows containers to communicate # with each other, by using their container name as a hostname networks: my_network: external: true
unknown
github
https://github.com/vercel/next.js
examples/with-docker-compose/compose.prod-without-multistage.yaml
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import pyauto_functional import pyauto class ChromeosSecurity(pyauto.PyUITest): """Security tests for chrome on ChromeOS. Requires ChromeOS to be logged in. """ def setUp(self): pyauto.PyUITest.setUp(self) baseline_file = os.path.abspath(os.path.join( pyauto.PyUITest.DataDir(), 'pyauto_private', 'chromeos', 'security', 'extension_permission_baseline.txt')) self.assertTrue(os.path.exists(baseline_file), msg='Baseline info file does not exist.') baseline_info = self.EvalDataFrom(baseline_file) self._bundled_crx_directory = baseline_info['BUNDLED_CRX_DIRECTORY'] self._bundled_crx_baseline = baseline_info['BUNDLED_CRX_BASELINE'] self._component_extension_baseline = ( baseline_info['COMPONENT_EXTENSION_BASELINE']) if self.GetBrowserInfo()['properties']['is_official']: self._component_extension_baseline.extend( baseline_info['OFFICIAL_COMPONENT_EXTENSIONS']) def ExtraChromeFlags(self): """Override default list of extra flags typically used with automation. See the default flags used with automation in pyauto.py. Chrome flags for this test should be as close to reality as possible. """ return [ '--homepage=about:blank', ] def testCannotViewLocalFiles(self): """Verify that local files cannot be accessed from the browser.""" urls_and_titles = { 'file:///': 'Index of /', 'file:///etc/': 'Index of /etc/', self.GetFileURLForDataPath('title2.html'): 'Title Of Awesomeness', } for url, title in urls_and_titles.iteritems(): self.NavigateToURL(url) self.assertNotEqual(title, self.GetActiveTabTitle(), msg='Could access local file %s.' % url) def _AssertPermissionSetIsExpected(self, expected_set, actual_set, perm_type, full_expected_info, full_actual_info): """Asserts that the set of permissions for an extension is expected. Args: expected_set: A set of permissions that are expected to be present. actual_set: A set of permissions that are actually present. perm_type: A string describing the type of permission involved. full_expected_info: A dictionary fully describing the expected information associated with the given extension. full_actual_info: A dictionary fully describing the actual information associated with the given extension. """ def _GetSetDifferenceMessage(expected_set, actual_set): strings = [] for missing_item in expected_set.difference(actual_set): strings.append('Missing item: "%s"' % missing_item) for extra_item in actual_set.difference(expected_set): strings.append('Unexpected (extra) item: "%s"' % extra_item) return '\n'.join(strings) self.assertEqual( expected_set, actual_set, msg=('%s do not match for "%s".\n' '%s\n' 'Expected extension info:\n%s' '\nActual extension info:\n%s' % (perm_type, full_expected_info['name'], _GetSetDifferenceMessage(expected_set, actual_set), self.pformat(full_expected_info), self.pformat(full_actual_info)))) def _AssertExtensionNamesAreExpected(self, expected_set, actual_set, ext_type, full_expected_info, full_actual_info): """Asserts that a set of extensions is expected. Args: expected_set: A set of extension names that are expected to be present. actual_set: A set of extension names that are actually present. ext_type: A string describing the type of extensions involved. full_expected_info: A list of dictionaries describing the expected information for all extensions. full_actual_info: A list of dictionaries describing the actual information for all extensions. """ # Skip the Web Store and Bookmark Manager; they are integral to Chrome and # are redundant to check on ChromeOS. This can reduce the number of times # we have to update the baseline for this test. for extension_name in ['Chrome Web Store', 'Bookmark Manager']: actual_set.discard(extension_name) def _GetSetDifferenceMessage(expected_set, actual_set): strings = [] for missing_item in expected_set.difference(actual_set): strings.append('Missing item: "%s"' % missing_item) located_ext_info = [info for info in full_expected_info if info['name'] == missing_item][0] strings.append(self.pformat(located_ext_info)) for extra_item in actual_set.difference(expected_set): strings.append('Unexpected (extra) item: "%s"' % extra_item) located_ext_info = [info for info in full_actual_info if info['name'] == extra_item][0] strings.append(self.pformat(located_ext_info)) return '\n'.join(strings) self.assertEqual( expected_set, actual_set, msg='%s names do not match the baseline.\n' '%s\n' % (ext_type, _GetSetDifferenceMessage(expected_set, actual_set))) def _VerifyExtensionPermissions(self, baseline): """Ensures extension permissions in the baseline match actual info. This function will fail the current test if either (1) an extension named in the baseline is not currently installed in Chrome; or (2) the api permissions or effective host permissions of an extension in the baseline do not match the actual permissions associated with the extension in Chrome. Args: baseline: A dictionary of expected extension information, containing extension names and api/effective host permission info. """ full_ext_actual_info = self.GetExtensionsInfo() for ext_expected_info in baseline: located_ext_info = [info for info in full_ext_actual_info if info['name'] == ext_expected_info['name']] self.assertTrue( located_ext_info, msg=('Cannot locate extension info for "%s".\n' 'Expected extension info:\n%s' % (ext_expected_info['name'], self.pformat(ext_expected_info)))) ext_actual_info = located_ext_info[0] self._AssertPermissionSetIsExpected( set(ext_expected_info['effective_host_permissions']), set(ext_actual_info['effective_host_permissions']), 'Effective host permissions', ext_expected_info, ext_actual_info) self._AssertPermissionSetIsExpected( set(ext_expected_info['api_permissions']), set(ext_actual_info['api_permissions']), 'API permissions', ext_expected_info, ext_actual_info) def testComponentExtensionPermissions(self): """Ensures component extension permissions are as expected.""" expected_names = [ext['name'] for ext in self._component_extension_baseline] ext_actual_info = self.GetExtensionsInfo() actual_names = [ext['name'] for ext in ext_actual_info if ext['is_component']] self._AssertExtensionNamesAreExpected( set(expected_names), set(actual_names), 'Component extension', self._component_extension_baseline, ext_actual_info) self._VerifyExtensionPermissions(self._component_extension_baseline) def testBundledCrxPermissions(self): """Ensures bundled CRX permissions are as expected.""" # Verify that each bundled CRX on the device is expected, then install it. for file_name in os.listdir(self._bundled_crx_directory): if file_name.endswith('.crx'): self.assertTrue( file_name in [x['crx_file'] for x in self._bundled_crx_baseline], msg='Unexpected CRX file: ' + file_name) crx_file = os.path.join(self._bundled_crx_directory, file_name) self.InstallExtension(crx_file) # Verify that the permissions information in the baseline matches the # permissions associated with the installed bundled CRX extensions. self._VerifyExtensionPermissions(self._bundled_crx_baseline) def testNoUnexpectedExtensions(self): """Ensures there are no unexpected bundled or component extensions.""" # Install all bundled extensions on the device. for file_name in os.listdir(self._bundled_crx_directory): if file_name.endswith('.crx'): crx_file = os.path.join(self._bundled_crx_directory, file_name) self.InstallExtension(crx_file) # Ensure that the set of installed extension names precisely matches the # baseline. expected_names = [ext['name'] for ext in self._component_extension_baseline] expected_names.extend([ext['name'] for ext in self._bundled_crx_baseline]) ext_actual_info = self.GetExtensionsInfo() installed_names = [ext['name'] for ext in ext_actual_info] self._AssertExtensionNamesAreExpected( set(expected_names), set(installed_names), 'Installed extension', self._component_extension_baseline + self._bundled_crx_baseline, ext_actual_info) if __name__ == '__main__': pyauto_functional.Main()
unknown
codeparrot/codeparrot-clean
// Copyright Joyent, Inc. and other Node contributors. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to permit // persons to whom the Software is furnished to do so, subject to the // following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef SRC_ENV_INL_H_ #define SRC_ENV_INL_H_ #if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS #include "aliased_buffer-inl.h" #include "callback_queue-inl.h" #include "env.h" #include "node.h" #include "node_context_data.h" #include "node_internals.h" #include "node_perf_common.h" #include "node_realm-inl.h" #include "util-inl.h" #include "uv.h" #include "v8-cppgc.h" #include "v8.h" #include <cstddef> #include <cstdint> #include <utility> namespace node { inline v8::Isolate* IsolateData::isolate() const { return isolate_; } inline uv_loop_t* IsolateData::event_loop() const { return event_loop_; } inline uint16_t* IsolateData::embedder_id_for_cppgc() const { return &(wrapper_data_->cppgc_id); } inline uint16_t* IsolateData::embedder_id_for_non_cppgc() const { return &(wrapper_data_->non_cppgc_id); } inline NodeArrayBufferAllocator* IsolateData::node_allocator() const { return node_allocator_; } inline MultiIsolatePlatform* IsolateData::platform() const { return platform_; } inline const SnapshotData* IsolateData::snapshot_data() const { return snapshot_data_; } inline void IsolateData::set_worker_context(worker::Worker* context) { CHECK_NULL(worker_context_); // Should be set only once. worker_context_ = context; } inline worker::Worker* IsolateData::worker_context() const { return worker_context_; } inline v8::Local<v8::String> IsolateData::async_wrap_provider(int index) const { return async_wrap_providers_[index].Get(isolate_); } inline AliasedUint32Array& AsyncHooks::fields() { return fields_; } inline AliasedFloat64Array& AsyncHooks::async_id_fields() { return async_id_fields_; } inline AliasedFloat64Array& AsyncHooks::async_ids_stack() { return async_ids_stack_; } v8::Local<v8::Array> AsyncHooks::js_execution_async_resources() { if (js_execution_async_resources_.IsEmpty()) [[unlikely]] { js_execution_async_resources_.Reset( env()->isolate(), v8::Array::New(env()->isolate())); } return PersistentToLocal::Strong(js_execution_async_resources_); } v8::Local<v8::Object> AsyncHooks::native_execution_async_resource(size_t i) { if (i >= native_execution_async_resources_.size()) return {}; auto resource = native_execution_async_resources_[i]; if (std::holds_alternative<v8::Global<v8::Object>*>(resource)) [[unlikely]] { auto* global = std::get<v8::Global<v8::Object>*>(resource); if (global == nullptr) [[unlikely]] return {}; return global->Get(env()->isolate()); } else { auto* local = std::get<v8::Local<v8::Object>*>(resource); if (local == nullptr) [[unlikely]] return {}; return *local; } } inline v8::Local<v8::String> AsyncHooks::provider_string(int idx) { return env()->isolate_data()->async_wrap_provider(idx); } inline void AsyncHooks::no_force_checks() { fields_[kCheck] -= 1; } inline Environment* AsyncHooks::env() { return Environment::ForAsyncHooks(this); } Environment* Environment::ForAsyncHooks(AsyncHooks* hooks) { return ContainerOf(&Environment::async_hooks_, hooks); } inline size_t Environment::async_callback_scope_depth() const { return async_callback_scope_depth_; } inline void Environment::PushAsyncCallbackScope() { async_callback_scope_depth_++; } inline void Environment::PopAsyncCallbackScope() { async_callback_scope_depth_--; } inline AliasedUint32Array& ImmediateInfo::fields() { return fields_; } inline uint32_t ImmediateInfo::count() const { return fields_[kCount]; } inline uint32_t ImmediateInfo::ref_count() const { return fields_[kRefCount]; } inline bool ImmediateInfo::has_outstanding() const { return fields_[kHasOutstanding] == 1; } inline void ImmediateInfo::ref_count_inc(uint32_t increment) { fields_[kRefCount] += increment; } inline void ImmediateInfo::ref_count_dec(uint32_t decrement) { fields_[kRefCount] -= decrement; } inline AliasedUint8Array& TickInfo::fields() { return fields_; } inline bool TickInfo::has_tick_scheduled() const { return fields_[kHasTickScheduled] == 1; } inline bool TickInfo::has_rejection_to_warn() const { return fields_[kHasRejectionToWarn] == 1; } inline Environment* Environment::GetCurrent(v8::Isolate* isolate) { if (!isolate->InContext()) [[unlikely]] return nullptr; v8::HandleScope handle_scope(isolate); return GetCurrent(isolate->GetCurrentContext()); } inline Environment* Environment::GetCurrent(v8::Local<v8::Context> context) { if (!ContextEmbedderTag::IsNodeContext(context)) [[unlikely]] { return nullptr; } return static_cast<Environment*>(context->GetAlignedPointerFromEmbedderData( ContextEmbedderIndex::kEnvironment, EmbedderDataTag::kPerContextData)); } inline Environment* Environment::GetCurrent( const v8::FunctionCallbackInfo<v8::Value>& info) { return GetCurrent(info.GetIsolate()->GetCurrentContext()); } template <typename T> inline Environment* Environment::GetCurrent( const v8::PropertyCallbackInfo<T>& info) { return GetCurrent(info.GetIsolate()->GetCurrentContext()); } inline v8::Isolate* Environment::isolate() const { return isolate_; } inline cppgc::AllocationHandle& Environment::cppgc_allocation_handle() const { return isolate_->GetCppHeap()->GetAllocationHandle(); } inline v8::ExternalMemoryAccounter* Environment::external_memory_accounter() const { return external_memory_accounter_; } inline Environment* Environment::from_timer_handle(uv_timer_t* handle) { return ContainerOf(&Environment::timer_handle_, handle); } inline uv_timer_t* Environment::timer_handle() { return &timer_handle_; } inline Environment* Environment::from_immediate_check_handle( uv_check_t* handle) { return ContainerOf(&Environment::immediate_check_handle_, handle); } inline uv_check_t* Environment::immediate_check_handle() { return &immediate_check_handle_; } inline uv_idle_t* Environment::immediate_idle_handle() { return &immediate_idle_handle_; } template <typename T, typename OnCloseCallback> inline void Environment::CloseHandle(T* handle, OnCloseCallback callback) { handle_cleanup_waiting_++; static_assert(sizeof(T) >= sizeof(uv_handle_t), "T is a libuv handle"); static_assert(offsetof(T, data) == offsetof(uv_handle_t, data), "T is a libuv handle"); static_assert(offsetof(T, close_cb) == offsetof(uv_handle_t, close_cb), "T is a libuv handle"); struct CloseData { Environment* env; OnCloseCallback callback; void* original_data; }; handle->data = new CloseData { this, callback, handle->data }; uv_close(reinterpret_cast<uv_handle_t*>(handle), [](uv_handle_t* handle) { std::unique_ptr<CloseData> data { static_cast<CloseData*>(handle->data) }; data->env->handle_cleanup_waiting_--; handle->data = data->original_data; data->callback(reinterpret_cast<T*>(handle)); }); } void Environment::IncreaseWaitingRequestCounter() { request_waiting_++; } void Environment::DecreaseWaitingRequestCounter() { request_waiting_--; CHECK_GE(request_waiting_, 0); } inline uv_loop_t* Environment::event_loop() const { return isolate_data()->event_loop(); } #if HAVE_INSPECTOR inline bool Environment::is_in_inspector_console_call() const { return is_in_inspector_console_call_; } inline void Environment::set_is_in_inspector_console_call(bool value) { is_in_inspector_console_call_ = value; } #endif inline AsyncHooks* Environment::async_hooks() { return &async_hooks_; } inline ImmediateInfo* Environment::immediate_info() { return &immediate_info_; } inline AliasedInt32Array& Environment::timeout_info() { return timeout_info_; } inline TickInfo* Environment::tick_info() { return &tick_info_; } inline permission::Permission* Environment::permission() { return &permission_; } inline uint64_t Environment::timer_base() const { return timer_base_; } inline std::shared_ptr<KVStore> Environment::env_vars() { return env_vars_; } inline void Environment::set_env_vars(std::shared_ptr<KVStore> env_vars) { env_vars_ = env_vars; } inline bool Environment::printed_error() const { return printed_error_; } inline void Environment::set_printed_error(bool value) { printed_error_ = value; } inline void Environment::set_trace_sync_io(bool value) { trace_sync_io_ = value; } inline bool Environment::abort_on_uncaught_exception() const { return options_->abort_on_uncaught_exception; } inline void Environment::set_force_context_aware(bool value) { options_->force_context_aware = value; } inline bool Environment::force_context_aware() const { return options_->force_context_aware; } inline void Environment::set_exiting(bool value) { exit_info_[kExiting] = value ? 1 : 0; } inline bool Environment::exiting() const { return exit_info_[kExiting] == 1; } inline ExitCode Environment::exit_code(const ExitCode default_code) const { return exit_info_[kHasExitCode] == 0 ? default_code : static_cast<ExitCode>(exit_info_[kExitCode]); } inline void Environment::set_exit_code(const ExitCode code) { exit_info_[kExitCode] = static_cast<int>(code); exit_info_[kHasExitCode] = 1; } inline AliasedInt32Array& Environment::exit_info() { return exit_info_; } inline void Environment::set_abort_on_uncaught_exception(bool value) { options_->abort_on_uncaught_exception = value; } inline AliasedUint32Array& Environment::should_abort_on_uncaught_toggle() { return should_abort_on_uncaught_toggle_; } inline AliasedInt32Array& Environment::stream_base_state() { return stream_base_state_; } ShouldNotAbortOnUncaughtScope::ShouldNotAbortOnUncaughtScope( Environment* env) : env_(env) { env_->PushShouldNotAbortOnUncaughtScope(); } ShouldNotAbortOnUncaughtScope::~ShouldNotAbortOnUncaughtScope() { Close(); } void ShouldNotAbortOnUncaughtScope::Close() { if (env_ != nullptr) { env_->PopShouldNotAbortOnUncaughtScope(); env_ = nullptr; } } inline void Environment::PushShouldNotAbortOnUncaughtScope() { should_not_abort_scope_counter_++; } inline void Environment::PopShouldNotAbortOnUncaughtScope() { should_not_abort_scope_counter_--; } inline bool Environment::inside_should_not_abort_on_uncaught_scope() const { return should_not_abort_scope_counter_ > 0; } inline std::vector<double>* Environment::destroy_async_id_list() { return &destroy_async_id_list_; } inline builtins::BuiltinLoader* Environment::builtin_loader() { return &builtin_loader_; } inline const EmbedderPreloadCallback& Environment::embedder_preload() const { return embedder_preload_; } inline void Environment::set_embedder_preload(EmbedderPreloadCallback fn) { embedder_preload_ = std::move(fn); } inline double Environment::new_async_id() { async_hooks()->async_id_fields()[AsyncHooks::kAsyncIdCounter] += 1; return async_hooks()->async_id_fields()[AsyncHooks::kAsyncIdCounter]; } inline double Environment::execution_async_id() { return async_hooks()->async_id_fields()[AsyncHooks::kExecutionAsyncId]; } inline double Environment::trigger_async_id() { return async_hooks()->async_id_fields()[AsyncHooks::kTriggerAsyncId]; } inline double Environment::get_default_trigger_async_id() { double default_trigger_async_id = async_hooks()->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId]; // If defaultTriggerAsyncId isn't set, use the executionAsyncId if (default_trigger_async_id < 0) default_trigger_async_id = execution_async_id(); return default_trigger_async_id; } inline int64_t Environment::stack_trace_limit() const { return isolate_data_->options()->stack_trace_limit; } inline std::shared_ptr<EnvironmentOptions> Environment::options() { return options_; } inline const std::vector<std::string>& Environment::argv() { return argv_; } inline const std::vector<std::string>& Environment::exec_argv() { return exec_argv_; } inline const std::string& Environment::exec_path() const { return exec_path_; } inline CompileCacheHandler* Environment::compile_cache_handler() { auto* result = compile_cache_handler_.get(); DCHECK_NOT_NULL(result); return result; } inline bool Environment::use_compile_cache() const { return compile_cache_handler_.get() != nullptr; } #if HAVE_INSPECTOR inline void Environment::set_coverage_directory(const char* dir) { coverage_directory_ = std::string(dir); } inline void Environment::set_coverage_connection( std::unique_ptr<profiler::V8CoverageConnection> connection) { CHECK_NULL(coverage_connection_); std::swap(coverage_connection_, connection); } inline profiler::V8CoverageConnection* Environment::coverage_connection() { return coverage_connection_.get(); } inline const std::string& Environment::coverage_directory() const { return coverage_directory_; } inline void Environment::set_cpu_profiler_connection( std::unique_ptr<profiler::V8CpuProfilerConnection> connection) { CHECK_NULL(cpu_profiler_connection_); std::swap(cpu_profiler_connection_, connection); } inline profiler::V8CpuProfilerConnection* Environment::cpu_profiler_connection() { return cpu_profiler_connection_.get(); } inline void Environment::set_cpu_prof_interval(uint64_t interval) { cpu_prof_interval_ = interval; } inline uint64_t Environment::cpu_prof_interval() const { return cpu_prof_interval_; } inline void Environment::set_cpu_prof_name(const std::string& name) { cpu_prof_name_ = name; } inline const std::string& Environment::cpu_prof_name() const { return cpu_prof_name_; } inline void Environment::set_cpu_prof_dir(const std::string& dir) { cpu_prof_dir_ = dir; } inline const std::string& Environment::cpu_prof_dir() const { return cpu_prof_dir_; } inline void Environment::set_heap_profiler_connection( std::unique_ptr<profiler::V8HeapProfilerConnection> connection) { CHECK_NULL(heap_profiler_connection_); std::swap(heap_profiler_connection_, connection); } inline profiler::V8HeapProfilerConnection* Environment::heap_profiler_connection() { return heap_profiler_connection_.get(); } inline void Environment::set_heap_prof_name(const std::string& name) { heap_prof_name_ = name; } inline const std::string& Environment::heap_prof_name() const { return heap_prof_name_; } inline void Environment::set_heap_prof_dir(const std::string& dir) { heap_prof_dir_ = dir; } inline const std::string& Environment::heap_prof_dir() const { return heap_prof_dir_; } inline void Environment::set_heap_prof_interval(uint64_t interval) { heap_prof_interval_ = interval; } inline uint64_t Environment::heap_prof_interval() const { return heap_prof_interval_; } #endif // HAVE_INSPECTOR inline std::shared_ptr<ExclusiveAccess<HostPort>> Environment::inspector_host_port() { return inspector_host_port_; } inline std::shared_ptr<PerIsolateOptions> IsolateData::options() { return options_; } template <typename Fn> void Environment::SetImmediate(Fn&& cb, CallbackFlags::Flags flags) { auto callback = native_immediates_.CreateCallback(std::move(cb), flags); native_immediates_.Push(std::move(callback)); if (flags & CallbackFlags::kRefed) { if (immediate_info()->ref_count() == 0) ToggleImmediateRef(true); immediate_info()->ref_count_inc(1); } } template <typename Fn> void Environment::SetImmediateThreadsafe(Fn&& cb, CallbackFlags::Flags flags) { auto callback = native_immediates_threadsafe_.CreateCallback( std::move(cb), flags); { Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); native_immediates_threadsafe_.Push(std::move(callback)); if (task_queues_async_initialized_) uv_async_send(&task_queues_async_); } } template <typename Fn> void Environment::RequestInterrupt(Fn&& cb) { auto callback = native_immediates_interrupts_.CreateCallback( std::move(cb), CallbackFlags::kRefed); { Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); native_immediates_interrupts_.Push(std::move(callback)); if (task_queues_async_initialized_) uv_async_send(&task_queues_async_); } RequestInterruptFromV8(); } inline bool Environment::can_call_into_js() const { return can_call_into_js_ && !is_stopping(); } inline void Environment::set_can_call_into_js(bool can_call_into_js) { can_call_into_js_ = can_call_into_js; } inline bool Environment::has_run_bootstrapping_code() const { return principal_realm_->has_run_bootstrapping_code(); } inline bool Environment::has_serialized_options() const { return has_serialized_options_; } inline void Environment::set_has_serialized_options(bool value) { has_serialized_options_ = value; } inline bool Environment::is_main_thread() const { return worker_context() == nullptr; } inline bool Environment::no_native_addons() const { return (flags_ & EnvironmentFlags::kNoNativeAddons) || !options_->allow_native_addons; } inline bool Environment::should_not_register_esm_loader() const { return flags_ & EnvironmentFlags::kNoRegisterESMLoader; } inline bool Environment::owns_process_state() const { return flags_ & EnvironmentFlags::kOwnsProcessState; } inline bool Environment::owns_inspector() const { return flags_ & EnvironmentFlags::kOwnsInspector; } inline bool Environment::should_create_inspector() const { return (flags_ & EnvironmentFlags::kNoCreateInspector) == 0 && !(options_->test_runner && options_->test_isolation == "process") && !options_->watch_mode; } inline bool Environment::should_wait_for_inspector_frontend() const { return (flags_ & EnvironmentFlags::kNoWaitForInspectorFrontend) == 0; } inline bool Environment::tracks_unmanaged_fds() const { return flags_ & EnvironmentFlags::kTrackUnmanagedFds; } inline bool Environment::hide_console_windows() const { return flags_ & EnvironmentFlags::kHideConsoleWindows; } inline bool Environment::no_global_search_paths() const { return (flags_ & EnvironmentFlags::kNoGlobalSearchPaths) || !options_->global_search_paths; } inline bool Environment::should_start_debug_signal_handler() const { return ((flags_ & EnvironmentFlags::kNoStartDebugSignalHandler) == 0) && !options_->disable_sigusr1; } inline bool Environment::no_browser_globals() const { // configure --no-browser-globals #ifdef NODE_NO_BROWSER_GLOBALS return true; #else return flags_ & EnvironmentFlags::kNoBrowserGlobals; #endif } void Environment::set_source_maps_enabled(bool on) { source_maps_enabled_ = on; } bool Environment::source_maps_enabled() const { return source_maps_enabled_; } inline uint64_t Environment::thread_id() const { return thread_id_; } inline std::string_view Environment::thread_name() const { return thread_name_; } inline worker::Worker* Environment::worker_context() const { return isolate_data()->worker_context(); } inline void Environment::add_sub_worker_context(worker::Worker* context) { sub_worker_contexts_.insert(context); } inline void Environment::remove_sub_worker_context(worker::Worker* context) { sub_worker_contexts_.erase(context); } template <typename Fn> inline void Environment::ForEachWorker(Fn&& iterator) { for (worker::Worker* w : sub_worker_contexts_) iterator(w); } inline bool Environment::is_stopping() const { return is_stopping_.load(); } inline void Environment::set_stopping(bool value) { is_stopping_.store(value); } inline std::list<node_module>* Environment::extra_linked_bindings() { return &extra_linked_bindings_; } inline node_module* Environment::extra_linked_bindings_head() { return extra_linked_bindings_.size() > 0 ? &extra_linked_bindings_.front() : nullptr; } inline node_module* Environment::extra_linked_bindings_tail() { return extra_linked_bindings_.size() > 0 ? &extra_linked_bindings_.back() : nullptr; } inline const Mutex& Environment::extra_linked_bindings_mutex() const { return extra_linked_bindings_mutex_; } inline performance::PerformanceState* Environment::performance_state() { return performance_state_.get(); } inline IsolateData* Environment::isolate_data() const { return isolate_data_; } template <typename T> inline void Environment::ForEachRealm(T&& iterator) const { // TODO(legendecas): iterate over more realms bound to the environment. iterator(principal_realm()); } inline void Environment::ThrowError(const char* errmsg) { ThrowError(v8::Exception::Error, errmsg); } inline void Environment::ThrowTypeError(const char* errmsg) { ThrowError(v8::Exception::TypeError, errmsg); } inline void Environment::ThrowRangeError(const char* errmsg) { ThrowError(v8::Exception::RangeError, errmsg); } inline void Environment::ThrowError( v8::Local<v8::Value> (*fun)(v8::Local<v8::String>, v8::Local<v8::Value>), const char* errmsg) { v8::HandleScope handle_scope(isolate()); isolate()->ThrowException(fun(OneByteString(isolate(), errmsg), {})); } inline void Environment::ThrowStdErrException(std::error_code error_code, const char* syscall, const char* path) { ThrowErrnoException( error_code.value(), syscall, error_code.message().c_str(), path); } inline void Environment::ThrowErrnoException(int errorno, const char* syscall, const char* message, const char* path) { isolate()->ThrowException( ErrnoException(isolate(), errorno, syscall, message, path)); } inline void Environment::ThrowUVException(int errorno, const char* syscall, const char* message, const char* path, const char* dest) { isolate()->ThrowException( UVException(isolate(), errorno, syscall, message, path, dest)); } void Environment::AddCleanupHook(CleanupQueue::Callback fn, void* arg) { cleanup_queue_.Add(fn, arg); } void Environment::RemoveCleanupHook(CleanupQueue::Callback fn, void* arg) { cleanup_queue_.Remove(fn, arg); } void Environment::set_process_exit_handler( std::function<void(Environment*, ExitCode)>&& handler) { process_exit_handler_ = std::move(handler); } #define VP(PropertyName, StringValue) V(v8::Private, PropertyName) #define VY(PropertyName, StringValue) V(v8::Symbol, PropertyName) #define VS(PropertyName, StringValue) V(v8::String, PropertyName) #define VR(PropertyName, TypeName) V(v8::Private, per_realm_##PropertyName) #define V(TypeName, PropertyName) \ inline \ v8::Local<TypeName> IsolateData::PropertyName() const { \ return PropertyName ## _ .Get(isolate_); \ } PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) PER_ISOLATE_SYMBOL_PROPERTIES(VY) PER_ISOLATE_STRING_PROPERTIES(VS) PER_REALM_STRONG_PERSISTENT_VALUES(VR) #undef V #undef VR #undef VS #undef VY #undef VP #define VM(PropertyName) V(PropertyName##_binding_template, v8::ObjectTemplate) #define V(PropertyName, TypeName) \ inline v8::Local<TypeName> IsolateData::PropertyName() const { \ return PropertyName##_.Get(isolate_); \ } \ inline void IsolateData::set_##PropertyName(v8::Local<TypeName> value) { \ CHECK(PropertyName##_.IsEmpty()); \ PropertyName##_.Set(isolate_, value); \ } PER_ISOLATE_TEMPLATE_PROPERTIES(V) NODE_BINDINGS_WITH_PER_ISOLATE_INIT(VM) #undef V #undef VM #define VP(PropertyName, StringValue) V(v8::Private, PropertyName) #define VY(PropertyName, StringValue) V(v8::Symbol, PropertyName) #define VS(PropertyName, StringValue) V(v8::String, PropertyName) #define V(TypeName, PropertyName) \ inline v8::Local<TypeName> Environment::PropertyName() const { \ return isolate_data()->PropertyName(); \ } PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) PER_ISOLATE_SYMBOL_PROPERTIES(VY) PER_ISOLATE_STRING_PROPERTIES(VS) #undef V #undef VS #undef VY #undef VP #define V(PropertyName, TypeName) \ inline v8::Local<TypeName> Environment::PropertyName() const { \ return isolate_data()->PropertyName(); \ } \ inline void Environment::set_##PropertyName(v8::Local<TypeName> value) { \ DCHECK(isolate_data()->PropertyName().IsEmpty()); \ isolate_data()->set_##PropertyName(value); \ } PER_ISOLATE_TEMPLATE_PROPERTIES(V) #undef V #define V(PropertyName, TypeName) \ inline v8::Local<TypeName> Environment::PropertyName() const { \ DCHECK_NOT_NULL(principal_realm_); \ return principal_realm_->PropertyName(); \ } \ inline void Environment::set_##PropertyName(v8::Local<TypeName> value) { \ DCHECK_NOT_NULL(principal_realm_); \ principal_realm_->set_##PropertyName(value); \ } PER_REALM_STRONG_PERSISTENT_VALUES(V) #undef V v8::Local<v8::Context> Environment::context() const { return principal_realm()->context(); } PrincipalRealm* Environment::principal_realm() const { return principal_realm_.get(); } inline void Environment::set_heap_snapshot_near_heap_limit(uint32_t limit) { heap_snapshot_near_heap_limit_ = limit; } inline bool Environment::is_in_heapsnapshot_heap_limit_callback() const { return is_in_heapsnapshot_heap_limit_callback_; } inline bool Environment::report_exclude_env() const { return options_->report_exclude_env; } inline void Environment::AddHeapSnapshotNearHeapLimitCallback() { DCHECK(!heapsnapshot_near_heap_limit_callback_added_); heapsnapshot_near_heap_limit_callback_added_ = true; isolate_->AddNearHeapLimitCallback(Environment::NearHeapLimitCallback, this); } inline void Environment::RemoveHeapSnapshotNearHeapLimitCallback( size_t heap_limit) { DCHECK(heapsnapshot_near_heap_limit_callback_added_); heapsnapshot_near_heap_limit_callback_added_ = false; isolate_->RemoveNearHeapLimitCallback(Environment::NearHeapLimitCallback, heap_limit); } } // namespace node // These two files depend on each other. Including base_object-inl.h after this // file is the easiest way to avoid issues with that circular dependency. #include "base_object-inl.h" #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS #endif // SRC_ENV_INL_H_
c
github
https://github.com/nodejs/node
src/env-inl.h
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import cProfile import pstats import os import atexit from pyspark.accumulators import AccumulatorParam class ProfilerCollector(object): """ This class keeps track of different profilers on a per stage basis. Also this is used to create new profilers for the different stages. """ def __init__(self, profiler_cls, dump_path=None): self.profiler_cls = profiler_cls self.profile_dump_path = dump_path self.profilers = [] def new_profiler(self, ctx): """ Create a new profiler using class `profiler_cls` """ return self.profiler_cls(ctx) def add_profiler(self, id, profiler): """ Add a profiler for RDD `id` """ if not self.profilers: if self.profile_dump_path: atexit.register(self.dump_profiles, self.profile_dump_path) else: atexit.register(self.show_profiles) self.profilers.append([id, profiler, False]) def dump_profiles(self, path): """ Dump the profile stats into directory `path` """ for id, profiler, _ in self.profilers: profiler.dump(id, path) self.profilers = [] def show_profiles(self): """ Print the profile stats to stdout """ for i, (id, profiler, showed) in enumerate(self.profilers): if not showed and profiler: profiler.show(id) # mark it as showed self.profilers[i][2] = True class Profiler(object): """ .. note:: DeveloperApi PySpark supports custom profilers, this is to allow for different profilers to be used as well as outputting to different formats than what is provided in the BasicProfiler. A custom profiler has to define or inherit the following methods: profile - will produce a system profile of some sort. stats - return the collected stats. dump - dumps the profiles to a path add - adds a profile to the existing accumulated profile The profiler class is chosen when creating a SparkContext >>> from pyspark import SparkConf, SparkContext >>> from pyspark import BasicProfiler >>> class MyCustomProfiler(BasicProfiler): ... def show(self, id): ... print("My custom profiles for RDD:%s" % id) ... >>> conf = SparkConf().set("spark.python.profile", "true") >>> sc = SparkContext('local', 'test', conf=conf, profiler_cls=MyCustomProfiler) >>> sc.parallelize(range(1000)).map(lambda x: 2 * x).take(10) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> sc.parallelize(range(1000)).count() 1000 >>> sc.show_profiles() My custom profiles for RDD:1 My custom profiles for RDD:3 >>> sc.stop() """ def __init__(self, ctx): pass def profile(self, func): """ Do profiling on the function `func`""" raise NotImplemented def stats(self): """ Return the collected profiling stats (pstats.Stats)""" raise NotImplemented def show(self, id): """ Print the profile stats to stdout, id is the RDD id """ stats = self.stats() if stats: print("=" * 60) print("Profile of RDD<id=%d>" % id) print("=" * 60) stats.sort_stats("time", "cumulative").print_stats() def dump(self, id, path): """ Dump the profile into path, id is the RDD id """ if not os.path.exists(path): os.makedirs(path) stats = self.stats() if stats: p = os.path.join(path, "rdd_%d.pstats" % id) stats.dump_stats(p) class PStatsParam(AccumulatorParam): """PStatsParam is used to merge pstats.Stats""" @staticmethod def zero(value): return None @staticmethod def addInPlace(value1, value2): if value1 is None: return value2 value1.add(value2) return value1 class BasicProfiler(Profiler): """ BasicProfiler is the default profiler, which is implemented based on cProfile and Accumulator """ def __init__(self, ctx): Profiler.__init__(self, ctx) # Creates a new accumulator for combining the profiles of different # partitions of a stage self._accumulator = ctx.accumulator(None, PStatsParam) def profile(self, func): """ Runs and profiles the method to_profile passed in. A profile object is returned. """ pr = cProfile.Profile() pr.runcall(func) st = pstats.Stats(pr) st.stream = None # make it picklable st.strip_dirs() # Adds a new profile to the existing accumulated value self._accumulator.add(st) def stats(self): return self._accumulator.value if __name__ == "__main__": import doctest (failure_count, test_count) = doctest.testmod() if failure_count: exit(-1)
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Console\Scheduling; use Illuminate\Console\Application; use Illuminate\Console\Command; use Symfony\Component\Console\Attribute\AsCommand; use function Laravel\Prompts\select; #[AsCommand(name: 'schedule:test')] class ScheduleTestCommand extends Command { /** * The console command name. * * @var string */ protected $signature = 'schedule:test {--name= : The name of the scheduled command to run}'; /** * The console command description. * * @var string */ protected $description = 'Run a scheduled command'; /** * Execute the console command. * * @param \Illuminate\Console\Scheduling\Schedule $schedule * @return void */ public function handle(Schedule $schedule) { $phpBinary = Application::phpBinary(); $commands = $schedule->events(); $commandNames = []; foreach ($commands as $command) { $commandNames[] = $command->command ?? $command->getSummaryForDisplay(); } if (empty($commandNames)) { return $this->components->info('No scheduled commands have been defined.'); } if (! empty($name = $this->option('name'))) { $commandBinary = $phpBinary.' '.Application::artisanBinary(); $matches = array_filter($commandNames, function ($commandName) use ($commandBinary, $name) { return trim(str_replace($commandBinary, '', $commandName)) === $name; }); if (count($matches) !== 1) { $this->components->info('No matching scheduled command found.'); return; } $index = key($matches); } else { $index = $this->getSelectedCommandByIndex($commandNames); } $event = $commands[$index]; $summary = $event->getSummaryForDisplay(); $command = $event instanceof CallbackEvent ? $summary : trim(str_replace($phpBinary, '', $event->command)); $description = sprintf( 'Running [%s]%s', $command, $event->runInBackground ? ' normally in background' : '', ); $event->runInBackground = false; $this->components->task($description, fn () => $event->run($this->laravel)); if (! $event instanceof CallbackEvent) { $this->components->bulletList([$event->getSummaryForDisplay()]); } $this->newLine(); } /** * Get the selected command name by index. * * @param array $commandNames * @return int */ protected function getSelectedCommandByIndex(array $commandNames) { if (count($commandNames) !== count(array_unique($commandNames))) { // Some commands (likely closures) have the same name, append unique indexes to each one... $uniqueCommandNames = array_map(function ($index, $value) { return "$value [$index]"; }, array_keys($commandNames), $commandNames); $selectedCommand = select('Which command would you like to run?', $uniqueCommandNames); preg_match('/\[(\d+)\]/', $selectedCommand, $choice); return (int) $choice[1]; } else { return array_search( select('Which command would you like to run?', $commandNames), $commandNames ); } } }
php
github
https://github.com/laravel/framework
src/Illuminate/Console/Scheduling/ScheduleTestCommand.php
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova` -- Cloud IaaS Platform =================================== .. automodule:: nova :platform: Unix :synopsis: Infrastructure-as-a-Service Cloud platform. .. moduleauthor:: Jesse Andrews <jesse@ansolabs.com> .. moduleauthor:: Devin Carlen <devin.carlen@gmail.com> .. moduleauthor:: Vishvananda Ishaya <vishvananda@gmail.com> .. moduleauthor:: Joshua McKenty <joshua@cognition.ca> .. moduleauthor:: Manish Singh <yosh@gimp.org> .. moduleauthor:: Andy Smith <andy@anarkystic.com> """ import gettext gettext.install('nova', unicode=1)
unknown
codeparrot/codeparrot-clean
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Requests/Messages/UpgradePokemonMessage.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Requests/Messages/UpgradePokemonMessage.proto', package='POGOProtos.Networking.Requests.Messages', syntax='proto3', serialized_pb=_b('\nCPOGOProtos/Networking/Requests/Messages/UpgradePokemonMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"+\n\x15UpgradePokemonMessage\x12\x12\n\npokemon_id\x18\x01 \x01(\x06\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _UPGRADEPOKEMONMESSAGE = _descriptor.Descriptor( name='UpgradePokemonMessage', full_name='POGOProtos.Networking.Requests.Messages.UpgradePokemonMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pokemon_id', full_name='POGOProtos.Networking.Requests.Messages.UpgradePokemonMessage.pokemon_id', index=0, number=1, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=112, serialized_end=155, ) DESCRIPTOR.message_types_by_name['UpgradePokemonMessage'] = _UPGRADEPOKEMONMESSAGE UpgradePokemonMessage = _reflection.GeneratedProtocolMessageType('UpgradePokemonMessage', (_message.Message,), dict( DESCRIPTOR = _UPGRADEPOKEMONMESSAGE, __module__ = 'POGOProtos.Networking.Requests.Messages.UpgradePokemonMessage_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.UpgradePokemonMessage) )) _sym_db.RegisterMessage(UpgradePokemonMessage) # @@protoc_insertion_point(module_scope)
unknown
codeparrot/codeparrot-clean
''' ================== Multicolored lines ================== This example shows how to make a multi-colored line. In this example, the line is colored based on its derivative. ''' import numpy as np import matplotlib.pyplot as plt from matplotlib.collections import LineCollection from matplotlib.colors import ListedColormap, BoundaryNorm # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end x = np.linspace(0, 3 * np.pi, 500) y = np.sin(x) dydx = np.cos(0.5 * (x[:-1] + x[1:])) # first derivative # Create a set of line segments so that we can color them individually # This creates the points as a N x 1 x 2 array so that we can stack points # together easily to get the segments. The segments array for line collection # needs to be (numlines) x (points per line) x 2 (for x and y) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) fig, axs = plt.subplots(2, 1, sharex=True, sharey=True) # Create a continuous norm to map from data points to colors norm = plt.Normalize(dydx.min(), dydx.max()) lc = LineCollection(segments, cmap='viridis', norm=norm) # Set the values used for colormapping lc.set_array(dydx) lc.set_linewidth(2) line = axs[0].add_collection(lc) fig.colorbar(line, ax=axs[0]) # Use a boundary norm instead cmap = ListedColormap(['r', 'g', 'b']) norm = BoundaryNorm([-1, -0.5, 0.5, 1], cmap.N) lc = LineCollection(segments, cmap=cmap, norm=norm) lc.set_array(dydx) lc.set_linewidth(2) line = axs[1].add_collection(lc) fig.colorbar(line, ax=axs[1]) axs[0].set_xlim(x.min(), x.max()) axs[0].set_ylim(-1.1, 1.1) pltshow(plt)
unknown
codeparrot/codeparrot-clean
# Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import os.path import shutil import sys import tempfile from distutils import errors import commands C_PYTHON_DEV = """ #include <Python.h> int main(int argc, char **argv) { return 0; } """ C_PYTHON_DEV_ERROR_MESSAGE = """ Could not find <Python.h>. This could mean the following: * You're on Ubuntu and haven't run `apt-get install <PY_REPR>-dev`. * You're on RHEL/Fedora and haven't run `yum install <PY_REPR>-devel` or `dnf install <PY_REPR>-devel` (make sure you also have redhat-rpm-config installed) * You're on Mac OS X and the usual Python framework was somehow corrupted (check your environment variables or try re-installing?) * You're on Windows and your Python installation was somehow corrupted (check your environment variables or try re-installing?) """ if sys.version_info[0] == 2: PYTHON_REPRESENTATION = 'python' elif sys.version_info[0] == 3: PYTHON_REPRESENTATION = 'python3' else: raise NotImplementedError('Unsupported Python version: %s' % sys.version) C_CHECKS = { C_PYTHON_DEV: C_PYTHON_DEV_ERROR_MESSAGE.replace('<PY_REPR>', PYTHON_REPRESENTATION), } def _compile(compiler, source_string): tempdir = tempfile.mkdtemp() cpath = os.path.join(tempdir, 'a.c') with open(cpath, 'w') as cfile: cfile.write(source_string) try: compiler.compile([cpath]) except errors.CompileError as error: return error finally: shutil.rmtree(tempdir) def _expect_compile(compiler, source_string, error_message): if _compile(compiler, source_string) is not None: sys.stderr.write(error_message) raise commands.CommandError( "Diagnostics found a compilation environment issue:\n{}".format( error_message)) def diagnose_compile_error(build_ext, error): """Attempt to diagnose an error during compilation.""" for c_check, message in C_CHECKS.items(): _expect_compile(build_ext.compiler, c_check, message) python_sources = [ source for source in build_ext.get_source_files() if source.startswith('./src/python') and source.endswith('c') ] for source in python_sources: if not os.path.isfile(source): raise commands.CommandError(( "Diagnostics found a missing Python extension source file:\n{}\n\n" "This is usually because the Cython sources haven't been transpiled " "into C yet and you're building from source.\n" "Try setting the environment variable " "`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or " "when using `pip`, e.g.:\n\n" "pip install -rrequirements.txt\n" "GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .").format(source)) def diagnose_attribute_error(build_ext, error): if any('_needs_stub' in arg for arg in error.args): raise commands.CommandError( "We expect a missing `_needs_stub` attribute from older versions of " "setuptools. Consider upgrading setuptools.") _ERROR_DIAGNOSES = { errors.CompileError: diagnose_compile_error, AttributeError: diagnose_attribute_error, } def diagnose_build_ext_error(build_ext, error, formatted): diagnostic = _ERROR_DIAGNOSES.get(type(error)) if diagnostic is None: raise commands.CommandError( "\n\nWe could not diagnose your build failure. If you are unable to " "proceed, please file an issue at http://www.github.com/grpc/grpc " "with `[Python install]` in the title; please attach the whole log " "(including everything that may have appeared above the Python " "backtrace).\n\n{}".format(formatted)) else: diagnostic(build_ext, error)
unknown
codeparrot/codeparrot-clean
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storageversiongc import ( "context" "fmt" "reflect" "testing" "time" apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" coordinationv1 "k8s.io/api/coordination/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2/ktesting" "k8s.io/utils/ptr" ) func setupController(ctx context.Context, clientset kubernetes.Interface) { informerFactory := informers.NewSharedInformerFactory(clientset, 100*time.Millisecond) leaseInformer := informerFactory.Coordination().V1().Leases() storageVersionInformer := informerFactory.Internal().V1alpha1().StorageVersions() controller := NewStorageVersionGC(ctx, clientset, leaseInformer, storageVersionInformer) informerFactory.Start(ctx.Done()) // Using this ensure informer caches are fully populated before starting the controller. if !cache.WaitForCacheSync(ctx.Done(), controller.leasesSynced, controller.storageVersionSynced) { panic("timed out waiting for caches to sync") } go controller.Run(context.Background()) } func newKubeApiserverLease(name, holderIdentity string) *coordinationv1.Lease { return &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: metav1.NamespaceSystem, Labels: map[string]string{ "apiserver.kubernetes.io/identity": "kube-apiserver", }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: ptr.To(holderIdentity), }, } } // Test_StorageVersionUpdatedWithAllEncodingVersionsEqualOnLeaseDeletion validates that // status.serverStorageVersions is updated when a kube-apiserver Lease is deleted. // If the remaining Leases agree on a new encoding version, status.commonEncodingVersion // should reflect the newly agreed version. func Test_StorageVersionUpdatedWithAllEncodingVersionsEqualOnLeaseDeletion(t *testing.T) { lease1 := newKubeApiserverLease("kube-apiserver-1", "kube-apiserver-1") lease2 := newKubeApiserverLease("kube-apiserver-2", "kube-apiserver-2") lease3 := newKubeApiserverLease("kube-apiserver-3", "kube-apiserver-3") storageVersion := &apiserverinternalv1alpha1.StorageVersion{ ObjectMeta: metav1.ObjectMeta{ Name: "k8s.test.resources", }, Status: apiserverinternalv1alpha1.StorageVersionStatus{ StorageVersions: []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, { APIServerID: "kube-apiserver-2", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, }, CommonEncodingVersion: ptr.To("v1"), }, } clientset := fake.NewClientset(lease1, lease2, lease3, storageVersion) _, ctx := ktesting.NewTestContext(t) setupController(ctx, clientset) // Delete the lease object and verify that storage version status is updated if err := clientset.CoordinationV1().Leases(metav1.NamespaceSystem).Delete(context.Background(), "kube-apiserver-1", metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting lease object: %v", err) } expectedServerStorageVersions := []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-2", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, } var lastErr error // Wait up to 5 seconds, checking every 100ms to ensure controller had a chance to reconcile err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { storageVersion, err := clientset.InternalV1alpha1().StorageVersions().Get( ctx, "k8s.test.resources", metav1.GetOptions{}, ) if err != nil { lastErr = fmt.Errorf("failed to get StorageVersion: %w", err) return false, err } if !reflect.DeepEqual(storageVersion.Status.StorageVersions, expectedServerStorageVersions) { lastErr = fmt.Errorf("storage versions mismatch: got %+v, expected %+v", storageVersion.Status.StorageVersions, expectedServerStorageVersions) return false, err } if *storageVersion.Status.CommonEncodingVersion != "v2" { t.Errorf("unexpected common encoding version") t.Logf("got: %q", *storageVersion.Status.CommonEncodingVersion) t.Logf("expected: %q", "v2") return false, err } if len(storageVersion.Status.Conditions) != 1 { lastErr = fmt.Errorf("CommonEncodingVersion mismatch: got %v, expected %q", storageVersion.Status.CommonEncodingVersion, "v2") return false, err } if storageVersion.Status.Conditions[0].Type != apiserverinternalv1alpha1.AllEncodingVersionsEqual { lastErr = fmt.Errorf("expected condition type 'AllEncodingVersionsEqual', got: %q", storageVersion.Status.Conditions[0].Type) return false, nil } if storageVersion.Status.Conditions[0].Status != apiserverinternalv1alpha1.ConditionTrue { lastErr = fmt.Errorf("expected condition status 'True', got: %q", storageVersion.Status.Conditions[0].Status) return false, nil } return true, nil }) if err != nil { t.Fatalf("controller did not reconcile storage version in time: %v\nLast mismatch: %v", err, lastErr) } } // Test_StorageVersionUpdatedWithDifferentEncodingVersionsOnLeaseDeletion validates that // status.serverStorageVersions is updated when a kube-apiserver Lease is deleted. // If the remaining Leases do not agree on a new encoding version, status.commonEncodingVersion // should remain unchanged. func Test_StorageVersionUpdatedWithDifferentEncodingVersionsOnLeaseDeletion(t *testing.T) { lease1 := newKubeApiserverLease("kube-apiserver-1", "kube-apiserver-1") lease2 := newKubeApiserverLease("kube-apiserver-2", "kube-apiserver-2") lease3 := newKubeApiserverLease("kube-apiserver-3", "kube-apiserver-3") storageVersion := &apiserverinternalv1alpha1.StorageVersion{ ObjectMeta: metav1.ObjectMeta{ Name: "k8s.test.resources", }, Status: apiserverinternalv1alpha1.StorageVersionStatus{ StorageVersions: []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, }, CommonEncodingVersion: ptr.To("v1"), }, } clientset := fake.NewSimpleClientset(lease1, lease2, lease3, storageVersion) _, ctx := ktesting.NewTestContext(t) setupController(ctx, clientset) // Delete the lease object and verify that storage version status is updated if err := clientset.CoordinationV1().Leases(metav1.NamespaceSystem).Delete(context.Background(), "kube-apiserver-2", metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting lease object: %v", err) } // add a delay to ensure controller had a chance to reconcile time.Sleep(2 * time.Second) storageVersion, err := clientset.InternalV1alpha1().StorageVersions().Get(context.Background(), "k8s.test.resources", metav1.GetOptions{}) if err != nil { t.Fatalf("error getting StorageVersion: %v", err) } expectedServerStorageVersions := []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, } if !reflect.DeepEqual(storageVersion.Status.StorageVersions, expectedServerStorageVersions) { t.Error("unexpected storage version object") t.Logf("got: %+v", storageVersion) t.Logf("expected: %+v", expectedServerStorageVersions) } if *storageVersion.Status.CommonEncodingVersion != "v1" { t.Errorf("unexpected common encoding version") t.Logf("got: %q", *storageVersion.Status.CommonEncodingVersion) t.Logf("expected: %q", "v1") } } // Test_StorageVersionContainsInvalidLeaseID validates that status.serverStorageVersions // only contains the holder identity from kube-apiserver Leases that exist. func Test_StorageVersionContainsInvalidLeaseID(t *testing.T) { lease1 := newKubeApiserverLease("kube-apiserver-1", "kube-apiserver-1") lease2 := newKubeApiserverLease("kube-apiserver-2", "kube-apiserver-2") lease3 := newKubeApiserverLease("kube-apiserver-3", "kube-apiserver-3") storageVersion := &apiserverinternalv1alpha1.StorageVersion{ ObjectMeta: metav1.ObjectMeta{ Name: "k8s.test.resources", }, Status: apiserverinternalv1alpha1.StorageVersionStatus{ StorageVersions: []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, { APIServerID: "kube-apiserver-2", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, { APIServerID: "kube-apiserver-4", // doesn't exist EncodingVersion: "v2", DecodableVersions: []string{"v1"}, }, }, CommonEncodingVersion: ptr.To("v1"), }, } clientset := fake.NewSimpleClientset(lease1, lease2, lease3, storageVersion) _, ctx := ktesting.NewTestContext(t) setupController(ctx, clientset) // add a delay to ensure controller had a chance to reconcile time.Sleep(2 * time.Second) storageVersion, err := clientset.InternalV1alpha1().StorageVersions().Get(context.Background(), "k8s.test.resources", metav1.GetOptions{}) if err != nil { t.Fatalf("error getting StorageVersion: %v", err) } expectedServerStorageVersions := []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, { APIServerID: "kube-apiserver-2", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, { APIServerID: "kube-apiserver-3", EncodingVersion: "v2", DecodableVersions: []string{"v2"}, }, } if !reflect.DeepEqual(storageVersion.Status.StorageVersions, expectedServerStorageVersions) { t.Error("unexpected storage version object") t.Logf("got: %+v", storageVersion) t.Logf("expected: %+v", expectedServerStorageVersions) } if len(storageVersion.Status.Conditions) != 1 { t.Errorf("expected 1 condition, got: %d", len(storageVersion.Status.Conditions)) } if storageVersion.Status.Conditions[0].Type != apiserverinternalv1alpha1.AllEncodingVersionsEqual { t.Errorf("expected condition type 'AllEncodingVersionsEqual', got: %q", storageVersion.Status.Conditions[0].Type) } if storageVersion.Status.Conditions[0].Status != apiserverinternalv1alpha1.ConditionFalse { t.Errorf("expected condition status 'True', got: %q", storageVersion.Status.Conditions[0].Status) } } // Test_StorageVersionDeletedOnLeaseDeletion validates that a StorageVersion // object is deleted if there are no kube-apiserver Leases. func Test_StorageVersionDeletedOnLeaseDeletion(t *testing.T) { lease1 := newKubeApiserverLease("kube-apiserver-1", "kube-apiserver-1") storageVersion := &apiserverinternalv1alpha1.StorageVersion{ ObjectMeta: metav1.ObjectMeta{ Name: "k8s.test.resources", }, Status: apiserverinternalv1alpha1.StorageVersionStatus{ StorageVersions: []apiserverinternalv1alpha1.ServerStorageVersion{ { APIServerID: "kube-apiserver-1", EncodingVersion: "v1", DecodableVersions: []string{"v1"}, }, }, }, } clientset := fake.NewClientset(lease1, storageVersion) _, ctx := ktesting.NewTestContext(t) setupController(ctx, clientset) // Delete the lease object and verify that storage version status is updated if err := clientset.CoordinationV1().Leases(metav1.NamespaceSystem).Delete(context.Background(), "kube-apiserver-1", metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting lease object: %v", err) } err := wait.PollUntilContextTimeout(context.Background(), time.Second, 5*time.Second, true, func(ctx context.Context) (bool, error) { // expect deleted _, err := clientset.InternalV1alpha1().StorageVersions().Get(context.Background(), "k8s.test.resources", metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }) if err != nil { t.Fatalf("expected IsNotFound error, but got error: %v", err) } }
go
github
https://github.com/kubernetes/kubernetes
pkg/controller/storageversiongc/gc_controller_test.go
{ "kind": "Dashboard", "apiVersion": "dashboard.grafana.app/v2alpha1", "metadata": { "name": "v0alpha1.panel-stat-tests.v42" }, "spec": { "annotations": [ { "kind": "AnnotationQuery", "spec": { "datasource": { "type": "datasource", "uid": "grafana" }, "query": { "kind": "datasource", "spec": {} }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations \u0026 Alerts", "builtIn": true, "legacyOptions": { "type": "dashboard" } } } ], "cursorSync": "Off", "editable": true, "elements": { "panel-10": { "kind": "Panel", "spec": { "id": 10, "title": "Panel Title", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "min": 0, "noise": 10, "scenarioId": "random_walk", "seriesCount": 6, "spread": 10 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "7m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-12": { "kind": "Panel", "spec": { "id": 12, "title": "Horizontal", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "A", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "B", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "C", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "D", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "E", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "F", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "G", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "10m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "line", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-13": { "kind": "Panel", "spec": { "id": 13, "title": "Horizontal with graph", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "min": 0, "scenarioId": "random_walk", "seriesCount": 7, "spread": 100 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-14": { "kind": "Panel", "spec": { "id": 14, "title": "Panel Title", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "min": 0, "noise": 10, "scenarioId": "random_walk", "seriesCount": 6, "spread": 10 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "7m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-15": { "kind": "Panel", "spec": { "id": 15, "title": "Text mode name", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 5, "scenarioId": "random_walk", "seriesCount": 7, "spread": 20, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "name", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-16": { "kind": "Panel", "spec": { "id": 16, "title": "Value only", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 15, "scenarioId": "random_walk", "seriesCount": 45, "spread": 1, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "value", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-17": { "kind": "Panel", "spec": { "id": 17, "title": "No text", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 15, "scenarioId": "random_walk", "seriesCount": 200, "spread": 1, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "none", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-20": { "kind": "Panel", "spec": { "id": 20, "title": "", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "labels": "", "min": 0, "noise": 5, "scenarioId": "random_walk", "seriesCount": 6, "spread": 100 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "6m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-21": { "kind": "Panel", "spec": { "id": 21, "title": "Panel Title", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "min": 0, "noise": 10, "scenarioId": "random_walk", "seriesCount": 6, "spread": 10 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "7m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-22": { "kind": "Panel", "spec": { "id": 22, "title": "Panel Title", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "min": 0, "noise": 10, "scenarioId": "random_walk", "seriesCount": 6, "spread": 10 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "7m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-23": { "kind": "Panel", "spec": { "id": 23, "title": "Horizontal with graph", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "min": 0, "scenarioId": "random_walk", "seriesCount": 7, "spread": 100 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-24": { "kind": "Panel", "spec": { "id": 24, "title": "Auto grid", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "A", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "B", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "C", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "D", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "E", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "F", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "G", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "10m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "line", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-25": { "kind": "Panel", "spec": { "id": 25, "title": "Horizontal", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "A", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "B", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "C", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "D", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "E", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "F", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "G", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "10m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "line", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-26": { "kind": "Panel", "spec": { "id": 26, "title": "Text mode name", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 5, "scenarioId": "random_walk", "seriesCount": 7, "spread": 20, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "horizontal", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "name", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-27": { "kind": "Panel", "spec": { "id": 27, "title": "Value only", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 15, "scenarioId": "random_walk", "seriesCount": 45, "spread": 1, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "value", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-28": { "kind": "Panel", "spec": { "id": 28, "title": "No text", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__server_names", "max": 200, "min": 0, "noise": 15, "scenarioId": "random_walk", "seriesCount": 50, "spread": 1, "startValue": 0 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "5m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": true, "sparkline": { "show": true }, "textMode": "none", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "min": 0, "max": 200, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-29": { "kind": "Panel", "spec": { "id": 29, "title": "Infinity Percent Change", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "csvContent": "time, value\n2023-12-13T00:00:00Z, 0\n2023-12-13T00:00:00Z, 100", "scenarioId": "csv_content" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": true, "textMode": "value", "wideLayout": true }, "fieldConfig": { "defaults": { "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "green" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-30": { "kind": "Panel", "spec": { "id": 30, "title": "NaN Percent Change", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "csvContent": "time, value\n2023-12-13T00:00:00Z, 0\n2023-12-13T00:00:00Z, 0", "scenarioId": "csv_content" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": true, "textMode": "value", "wideLayout": true }, "fieldConfig": { "defaults": { "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "green" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-31": { "kind": "Panel", "spec": { "id": 31, "title": "Value Options All", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "csvContent": "Name, value\nName1, 10\nName2, 20", "scenarioId": "csv_content" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": true }, "showPercentChange": false, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "green" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-32": { "kind": "Panel", "spec": { "id": 32, "title": "Zero Percent Change", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "csvContent": "time, value\n2023-12-13T00:00:00Z, 50\n2023-12-13T00:00:00Z, 100\n2023-12-13T00:00:00Z, 50", "scenarioId": "csv_content" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": true, "textMode": "value", "wideLayout": true }, "fieldConfig": { "defaults": { "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "green" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-6": { "kind": "Panel", "spec": { "id": 6, "title": "", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "alias": "__house_locations", "labels": "", "min": 0, "noise": 5, "scenarioId": "random_walk", "seriesCount": 6, "spread": 100 } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "6m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } }, "panel-8": { "kind": "Panel", "spec": { "id": 8, "title": "Auto grid", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "A", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "B", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "C", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "D", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "E", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "F", "hidden": false } }, { "kind": "PanelQuery", "spec": { "query": { "kind": "grafana-testdata-datasource", "spec": { "scenarioId": "random_walk" } }, "refId": "G", "hidden": false } } ], "transformations": [], "queryOptions": { "interval": "10m" } } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "11.2.0-pre", "options": { "colorMode": "background", "graphMode": "line", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showPercentChange": false, "sparkline": { "show": true }, "textMode": "auto", "wideLayout": true }, "fieldConfig": { "defaults": { "unit": "areaM2", "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "blue" }, { "value": 10, "color": "green" }, { "value": 20, "color": "purple" }, { "value": 40, "color": "orange" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } } }, "layout": { "kind": "RowsLayout", "spec": { "rows": [ { "kind": "RowsLayoutRow", "spec": { "title": "Original", "collapse": false, "layout": { "kind": "GridLayout", "spec": { "items": [ { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 0, "width": 24, "height": 3, "element": { "kind": "ElementReference", "name": "panel-6" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 3, "width": 24, "height": 6, "element": { "kind": "ElementReference", "name": "panel-10" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 9, "width": 24, "height": 6, "element": { "kind": "ElementReference", "name": "panel-14" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 15, "width": 6, "height": 18, "element": { "kind": "ElementReference", "name": "panel-13" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 15, "width": 4, "height": 9, "element": { "kind": "ElementReference", "name": "panel-8" } } }, { "kind": "GridLayoutItem", "spec": { "x": 10, "y": 15, "width": 6, "height": 9, "element": { "kind": "ElementReference", "name": "panel-12" } } }, { "kind": "GridLayoutItem", "spec": { "x": 16, "y": 15, "width": 8, "height": 9, "element": { "kind": "ElementReference", "name": "panel-15" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 24, "width": 18, "height": 9, "element": { "kind": "ElementReference", "name": "panel-16" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 33, "width": 24, "height": 5, "element": { "kind": "ElementReference", "name": "panel-17" } } } ] } } } }, { "kind": "RowsLayoutRow", "spec": { "title": "Metrics Display", "collapse": false, "layout": { "kind": "GridLayout", "spec": { "items": [ { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 0, "width": 24, "height": 3, "element": { "kind": "ElementReference", "name": "panel-20" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 3, "width": 24, "height": 6, "element": { "kind": "ElementReference", "name": "panel-21" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 9, "width": 24, "height": 6, "element": { "kind": "ElementReference", "name": "panel-22" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 15, "width": 6, "height": 18, "element": { "kind": "ElementReference", "name": "panel-23" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 15, "width": 4, "height": 9, "element": { "kind": "ElementReference", "name": "panel-24" } } }, { "kind": "GridLayoutItem", "spec": { "x": 10, "y": 15, "width": 6, "height": 9, "element": { "kind": "ElementReference", "name": "panel-25" } } }, { "kind": "GridLayoutItem", "spec": { "x": 16, "y": 15, "width": 8, "height": 9, "element": { "kind": "ElementReference", "name": "panel-26" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 24, "width": 18, "height": 9, "element": { "kind": "ElementReference", "name": "panel-27" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 33, "width": 24, "height": 5, "element": { "kind": "ElementReference", "name": "panel-28" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 38, "width": 4, "height": 8, "element": { "kind": "ElementReference", "name": "panel-29" } } }, { "kind": "GridLayoutItem", "spec": { "x": 4, "y": 38, "width": 4, "height": 8, "element": { "kind": "ElementReference", "name": "panel-32" } } }, { "kind": "GridLayoutItem", "spec": { "x": 8, "y": 38, "width": 8, "height": 8, "element": { "kind": "ElementReference", "name": "panel-30" } } }, { "kind": "GridLayoutItem", "spec": { "x": 16, "y": 38, "width": 8, "height": 8, "element": { "kind": "ElementReference", "name": "panel-31" } } } ] } } } } ] } }, "links": [], "liveNow": false, "preload": false, "tags": [ "gdev", "panel-tests", "graph-ng" ], "timeSettings": { "timezone": "", "from": "now-6h", "to": "now", "autoRefresh": "", "autoRefreshIntervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "hideTimepicker": false, "fiscalYearStartMonth": 0 }, "title": "Panel Tests - Stat", "variables": [] }, "status": { "conversion": { "failed": false, "storedVersion": "v0alpha1" } } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-stat/v0alpha1.panel-stat-tests.v42.v2alpha1.json
<!--- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # Apache Hadoop Changelog ## Release 3.0.4 - Unreleased (as of 2018-09-02) ### NEW FEATURES: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HDFS-13448](https://issues.apache.org/jira/browse/HDFS-13448) | HDFS Block Placement - Ignore Locality for First Block Replica | Minor | block placement, hdfs-client | BELUGA BEHR | BELUGA BEHR | ### IMPROVEMENTS: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HADOOP-15252](https://issues.apache.org/jira/browse/HADOOP-15252) | Checkstyle version is not compatible with IDEA's checkstyle plugin | Major | . | Andras Bokor | Andras Bokor | | [HDFS-13602](https://issues.apache.org/jira/browse/HDFS-13602) | Add checkOperation(WRITE) checks in FSNamesystem | Major | ha, namenode | Erik Krogen | Chao Sun | | [HDFS-13155](https://issues.apache.org/jira/browse/HDFS-13155) | BlockPlacementPolicyDefault.chooseTargetInOrder Not Checking Return Value for NULL | Minor | namenode | BELUGA BEHR | Zsolt Venczel | | [HDFS-13659](https://issues.apache.org/jira/browse/HDFS-13659) | Add more test coverage for contentSummary for snapshottable path | Major | namenode, test | Wei-Chiu Chuang | Wei-Chiu Chuang | | [HADOOP-15499](https://issues.apache.org/jira/browse/HADOOP-15499) | Performance severe drop when running RawErasureCoderBenchmark with NativeRSRawErasureCoder | Major | . | Sammi Chen | Sammi Chen | | [HDFS-13653](https://issues.apache.org/jira/browse/HDFS-13653) | Make dfs.client.failover.random.order a per nameservice configuration | Major | federation | Ekanth Sethuramalingam | Ekanth Sethuramalingam | | [YARN-8394](https://issues.apache.org/jira/browse/YARN-8394) | Improve data locality documentation for Capacity Scheduler | Major | . | Weiwei Yang | Weiwei Yang | | [HDFS-13641](https://issues.apache.org/jira/browse/HDFS-13641) | Add metrics for edit log tailing | Major | metrics | Chao Sun | Chao Sun | | [HDFS-13686](https://issues.apache.org/jira/browse/HDFS-13686) | Add overall metrics for FSNamesystemLock | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak | | [HDFS-13692](https://issues.apache.org/jira/browse/HDFS-13692) | StorageInfoDefragmenter floods log when compacting StorageInfo TreeSet | Minor | . | Yiqun Lin | Bharat Viswanadham | | [HDFS-13703](https://issues.apache.org/jira/browse/HDFS-13703) | Avoid allocation of CorruptedBlocks hashmap when no corrupted blocks are hit | Major | performance | Todd Lipcon | Todd Lipcon | | [HADOOP-15554](https://issues.apache.org/jira/browse/HADOOP-15554) | Improve JIT performance for Configuration parsing | Minor | conf, performance | Todd Lipcon | Todd Lipcon | | [HDFS-13714](https://issues.apache.org/jira/browse/HDFS-13714) | Fix TestNameNodePrunesMissingStorages test failures on Windows | Major | hdfs, namenode, test | Lukas Majercak | Lukas Majercak | | [HDFS-13712](https://issues.apache.org/jira/browse/HDFS-13712) | BlockReaderRemote.read() logging improvement | Minor | hdfs-client | Gergo Repas | Gergo Repas | | [HDFS-13719](https://issues.apache.org/jira/browse/HDFS-13719) | Docs around dfs.image.transfer.timeout are misleading | Major | . | Kitti Nanasi | Kitti Nanasi | | [HADOOP-15598](https://issues.apache.org/jira/browse/HADOOP-15598) | DataChecksum calculate checksum is contented on hashtable synchronization | Major | common | Prasanth Jayachandran | Prasanth Jayachandran | | [HADOOP-15609](https://issues.apache.org/jira/browse/HADOOP-15609) | Retry KMS calls when SSLHandshakeException occurs | Major | common, kms | Kitti Nanasi | Kitti Nanasi | | [HADOOP-15612](https://issues.apache.org/jira/browse/HADOOP-15612) | Improve exception when tfile fails to load LzoCodec | Major | . | Gera Shegalov | Gera Shegalov | | [HDFS-11060](https://issues.apache.org/jira/browse/HDFS-11060) | make DEFAULT\_MAX\_CORRUPT\_FILEBLOCKS\_RETURNED configurable | Minor | hdfs | Lantao Jin | Lantao Jin | | [HDFS-13727](https://issues.apache.org/jira/browse/HDFS-13727) | Log full stack trace if DiskBalancer exits with an unhandled exception | Minor | diskbalancer | Stephen O'Donnell | Gabor Bota | | [YARN-8155](https://issues.apache.org/jira/browse/YARN-8155) | Improve ATSv2 client logging in RM and NM publisher | Major | . | Rohith Sharma K S | Abhishek Modi | | [HDFS-13728](https://issues.apache.org/jira/browse/HDFS-13728) | Disk Balancer should not fail if volume usage is greater than capacity | Minor | diskbalancer | Stephen O'Donnell | Stephen O'Donnell | | [YARN-8568](https://issues.apache.org/jira/browse/YARN-8568) | Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md | Minor | yarn | Antal Bálint Steinbach | Antal Bálint Steinbach | | [HDFS-13814](https://issues.apache.org/jira/browse/HDFS-13814) | Remove super user privilege requirement for NameNode.getServiceStatus | Minor | namenode | Chao Sun | Chao Sun | | [YARN-8559](https://issues.apache.org/jira/browse/YARN-8559) | Expose mutable-conf scheduler's configuration in RM /scheduler-conf endpoint | Major | resourcemanager | Anna Savarin | Weiwei Yang | | [HDFS-13813](https://issues.apache.org/jira/browse/HDFS-13813) | Exit NameNode if dangling child inode is detected when saving FsImage | Major | hdfs, namenode | Siyao Meng | Siyao Meng | | [HDFS-13821](https://issues.apache.org/jira/browse/HDFS-13821) | RBF: Add dfs.federation.router.mount-table.cache.enable so that users can disable cache | Major | hdfs | Fei Hui | Fei Hui | | [HDFS-13831](https://issues.apache.org/jira/browse/HDFS-13831) | Make block increment deletion number configurable | Major | . | Yiqun Lin | Ryan Wu | | [YARN-8051](https://issues.apache.org/jira/browse/YARN-8051) | TestRMEmbeddedElector#testCallbackSynchronization is flakey | Major | test | Robert Kanter | Robert Kanter | ### BUG FIXES: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HDFS-12857](https://issues.apache.org/jira/browse/HDFS-12857) | StoragePolicyAdmin should support schema based path | Major | namenode | Surendra Singh Lilhore | Surendra Singh Lilhore | | [YARN-7835](https://issues.apache.org/jira/browse/YARN-7835) | [Atsv2] Race condition in NM while publishing events if second attempt is launched on the same node | Critical | . | Rohith Sharma K S | Rohith Sharma K S | | [YARN-7773](https://issues.apache.org/jira/browse/YARN-7773) | YARN Federation used Mysql as state store throw exception, Unknown column 'homeSubCluster' in 'field list' | Blocker | federation | Yiran Wu | Yiran Wu | | [HDFS-13636](https://issues.apache.org/jira/browse/HDFS-13636) | Cross-Site Scripting vulnerability in HttpServer2 | Major | . | Haibo Yan | Haibo Yan | | [HDFS-13339](https://issues.apache.org/jira/browse/HDFS-13339) | Volume reference can't be released and may lead to deadlock when DataXceiver does a check volume | Critical | datanode | liaoyuxiangqin | Zsolt Venczel | | [YARN-8382](https://issues.apache.org/jira/browse/YARN-8382) | cgroup file leak in NM | Major | nodemanager | Hu Ziqian | Hu Ziqian | | [HDFS-13545](https://issues.apache.org/jira/browse/HDFS-13545) | "guarded" is misspelled as "gaurded" in FSPermissionChecker.java | Trivial | documentation | Jianchao Jia | Jianchao Jia | | [MAPREDUCE-7103](https://issues.apache.org/jira/browse/MAPREDUCE-7103) | Fix TestHistoryViewerPrinter on windows due to a mismatch line separator | Minor | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | | [HADOOP-15217](https://issues.apache.org/jira/browse/HADOOP-15217) | FsUrlConnection does not handle paths with spaces | Major | fs | Joseph Fourny | Zsolt Venczel | | [HDFS-12950](https://issues.apache.org/jira/browse/HDFS-12950) | [oiv] ls will fail in secure cluster | Major | . | Brahma Reddy Battula | Wei-Chiu Chuang | | [YARN-8359](https://issues.apache.org/jira/browse/YARN-8359) | Exclude containermanager.linux test classes on Windows | Major | . | Giovanni Matteo Fumarola | Jason Lowe | | [HDFS-13664](https://issues.apache.org/jira/browse/HDFS-13664) | Refactor ConfiguredFailoverProxyProvider to make inheritance easier | Minor | hdfs-client | Chao Sun | Chao Sun | | [HDFS-12670](https://issues.apache.org/jira/browse/HDFS-12670) | can't renew HDFS tokens with only the hdfs client jar | Critical | . | Thomas Graves | Arpit Agarwal | | [HDFS-13667](https://issues.apache.org/jira/browse/HDFS-13667) | Typo: Marking all "datandoes" as stale | Trivial | namenode | Wei-Chiu Chuang | Nanda kumar | | [YARN-8405](https://issues.apache.org/jira/browse/YARN-8405) | RM zk-state-store.parent-path ACLs has been changed since HADOOP-14773 | Major | . | Rohith Sharma K S | Íñigo Goiri | | [YARN-8404](https://issues.apache.org/jira/browse/YARN-8404) | Timeline event publish need to be async to avoid Dispatcher thread leak in case ATS is down | Blocker | . | Rohith Sharma K S | Rohith Sharma K S | | [HDFS-13673](https://issues.apache.org/jira/browse/HDFS-13673) | TestNameNodeMetrics fails on Windows | Minor | test | Zuoming Zhang | Zuoming Zhang | | [HDFS-13676](https://issues.apache.org/jira/browse/HDFS-13676) | TestEditLogRace fails on Windows | Minor | test | Zuoming Zhang | Zuoming Zhang | | [HDFS-13174](https://issues.apache.org/jira/browse/HDFS-13174) | hdfs mover -p /path times out after 20 min | Major | balancer & mover | Istvan Fajth | Istvan Fajth | | [HADOOP-15523](https://issues.apache.org/jira/browse/HADOOP-15523) | Shell command timeout given is in seconds whereas it is taken as millisec while scheduling | Major | . | Bilwa S T | Bilwa S T | | [HDFS-13682](https://issues.apache.org/jira/browse/HDFS-13682) | Cannot create encryption zone after KMS auth token expires | Critical | encryption, kms, namenode | Xiao Chen | Xiao Chen | | [YARN-8444](https://issues.apache.org/jira/browse/YARN-8444) | NodeResourceMonitor crashes on bad swapFree value | Major | . | Jim Brennan | Jim Brennan | | [YARN-8443](https://issues.apache.org/jira/browse/YARN-8443) | Total #VCores in cluster metrics is wrong when CapacityScheduler reserved some containers | Major | webapp | Tao Yang | Tao Yang | | [YARN-8457](https://issues.apache.org/jira/browse/YARN-8457) | Compilation is broken with -Pyarn-ui | Major | webapp | Sunil Govindan | Sunil Govindan | | [YARN-8401](https://issues.apache.org/jira/browse/YARN-8401) | [UI2] new ui is not accessible with out internet connection | Blocker | . | Bibin A Chundatt | Bibin A Chundatt | | [YARN-8451](https://issues.apache.org/jira/browse/YARN-8451) | Multiple NM heartbeat thread created when a slow NM resync with RM | Major | nodemanager | Botong Huang | Botong Huang | | [HADOOP-15548](https://issues.apache.org/jira/browse/HADOOP-15548) | Randomize local dirs | Minor | . | Jim Brennan | Jim Brennan | | [HDFS-13702](https://issues.apache.org/jira/browse/HDFS-13702) | Remove HTrace hooks from DFSClient to reduce CPU usage | Major | performance | Todd Lipcon | Todd Lipcon | | [HDFS-13635](https://issues.apache.org/jira/browse/HDFS-13635) | Incorrect message when block is not found | Major | datanode | Wei-Chiu Chuang | Gabor Bota | | [HADOOP-15571](https://issues.apache.org/jira/browse/HADOOP-15571) | Multiple FileContexts created with the same configuration object should be allowed to have different umask | Critical | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli | | [HDFS-13121](https://issues.apache.org/jira/browse/HDFS-13121) | NPE when request file descriptors when SC read | Minor | hdfs-client | Gang Xie | Zsolt Venczel | | [YARN-6265](https://issues.apache.org/jira/browse/YARN-6265) | yarn.resourcemanager.fail-fast is used inconsistently | Major | resourcemanager | Daniel Templeton | Yuanbo Liu | | [YARN-8473](https://issues.apache.org/jira/browse/YARN-8473) | Containers being launched as app tears down can leave containers in NEW state | Major | nodemanager | Jason Lowe | Jason Lowe | | [HDFS-13723](https://issues.apache.org/jira/browse/HDFS-13723) | Occasional "Should be different group" error in TestRefreshUserMappings#testGroupMappingRefresh | Major | security, test | Siyao Meng | Siyao Meng | | [HDFS-12837](https://issues.apache.org/jira/browse/HDFS-12837) | Intermittent failure in TestReencryptionWithKMS | Major | encryption, test | Surendra Singh Lilhore | Xiao Chen | | [HDFS-13729](https://issues.apache.org/jira/browse/HDFS-13729) | Fix broken links to RBF documentation | Minor | documentation | jwhitter | Gabor Bota | | [YARN-8515](https://issues.apache.org/jira/browse/YARN-8515) | container-executor can crash with SIGPIPE after nodemanager restart | Major | . | Jim Brennan | Jim Brennan | | [YARN-8421](https://issues.apache.org/jira/browse/YARN-8421) | when moving app, activeUsers is increased, even though app does not have outstanding request | Major | . | kyungwan nam | | | [HDFS-13524](https://issues.apache.org/jira/browse/HDFS-13524) | Occasional "All datanodes are bad" error in TestLargeBlock#testLargeBlockSize | Major | . | Wei-Chiu Chuang | Siyao Meng | | [HADOOP-15610](https://issues.apache.org/jira/browse/HADOOP-15610) | Hadoop Docker Image Pip Install Fails | Critical | . | Jack Bearden | Jack Bearden | | [HADOOP-15614](https://issues.apache.org/jira/browse/HADOOP-15614) | TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails | Major | . | Kihwal Lee | Weiwei Yang | | [YARN-8548](https://issues.apache.org/jira/browse/YARN-8548) | AllocationRespose proto setNMToken initBuilder not done | Major | . | Bibin A Chundatt | Bilwa S T | | [YARN-7748](https://issues.apache.org/jira/browse/YARN-7748) | TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events | Major | capacityscheduler | Haibo Chen | Weiwei Yang | | [YARN-8577](https://issues.apache.org/jira/browse/YARN-8577) | Fix the broken anchor in SLS site-doc | Minor | documentation | Weiwei Yang | Weiwei Yang | | [YARN-4606](https://issues.apache.org/jira/browse/YARN-4606) | CapacityScheduler: applications could get starved because computation of #activeUsers considers pending apps | Critical | capacity scheduler, capacityscheduler | Karam Singh | Manikandan R | | [HDFS-13765](https://issues.apache.org/jira/browse/HDFS-13765) | Fix javadoc for FSDirMkdirOp#createParentDirectories | Minor | documentation | Lokesh Jain | Lokesh Jain | | [YARN-8434](https://issues.apache.org/jira/browse/YARN-8434) | Update federation documentation of Nodemanager configurations | Minor | . | Bibin A Chundatt | Bibin A Chundatt | | [YARN-8558](https://issues.apache.org/jira/browse/YARN-8558) | NM recovery level db not cleaned up properly on container finish | Critical | . | Bibin A Chundatt | Bibin A Chundatt | | [HADOOP-15637](https://issues.apache.org/jira/browse/HADOOP-15637) | LocalFs#listLocatedStatus does not filter out hidden .crc files | Minor | fs | Erik Krogen | Erik Krogen | | [YARN-8397](https://issues.apache.org/jira/browse/YARN-8397) | Potential thread leak in ActivitiesManager | Major | . | Rohith Sharma K S | Rohith Sharma K S | | [YARN-6966](https://issues.apache.org/jira/browse/YARN-6966) | NodeManager metrics may return wrong negative values when NM restart | Major | . | Yang Wang | Szilard Nemeth | | [HDFS-13786](https://issues.apache.org/jira/browse/HDFS-13786) | EC: Display erasure coding policy for sub-directories is not working | Major | erasure-coding | Souryakanta Dwivedy | Ayush Saxena | | [YARN-8331](https://issues.apache.org/jira/browse/YARN-8331) | Race condition in NM container launched after done | Major | . | Yang Wang | Pradeep Ambati | | [HADOOP-15638](https://issues.apache.org/jira/browse/HADOOP-15638) | KMS Accept Queue Size default changed from 500 to 128 in Hadoop 3.x | Major | kms | Wei-Chiu Chuang | Wei-Chiu Chuang | | [HDFS-13738](https://issues.apache.org/jira/browse/HDFS-13738) | fsck -list-corruptfileblocks has infinite loop if user is not privileged. | Major | tools | Wei-Chiu Chuang | Yuen-Kuei Hsueh | | [HDFS-13758](https://issues.apache.org/jira/browse/HDFS-13758) | DatanodeManager should throw exception if it has BlockRecoveryCommand but the block is not under construction | Major | namenode | Wei-Chiu Chuang | chencan | | [YARN-8614](https://issues.apache.org/jira/browse/YARN-8614) | Fix few annotation typos in YarnConfiguration | Trivial | . | Sen Zhao | Sen Zhao | | [HDFS-13746](https://issues.apache.org/jira/browse/HDFS-13746) | Still occasional "Should be different group" failure in TestRefreshUserMappings#testGroupMappingRefresh | Major | . | Siyao Meng | Siyao Meng | | [HDFS-10240](https://issues.apache.org/jira/browse/HDFS-10240) | Race between close/recoverLease leads to missing block | Major | . | zhouyingchao | Jinglun | | [YARN-8612](https://issues.apache.org/jira/browse/YARN-8612) | Fix NM Collector Service Port issue in YarnConfiguration | Major | ATSv2 | Prabha Manepalli | Prabha Manepalli | | [HDFS-13747](https://issues.apache.org/jira/browse/HDFS-13747) | Statistic for list\_located\_status is incremented incorrectly by listStatusIterator | Minor | hdfs-client | Todd Lipcon | Antal Mihalyi | | [HADOOP-15674](https://issues.apache.org/jira/browse/HADOOP-15674) | Test failure TestSSLHttpServer.testExcludedCiphers with TLS\_ECDHE\_RSA\_WITH\_AES\_128\_CBC\_SHA256 cipher suite | Major | common | Gabor Bota | Szilard Nemeth | | [YARN-8640](https://issues.apache.org/jira/browse/YARN-8640) | Restore previous state in container-executor after failure | Major | . | Jim Brennan | Jim Brennan | | [YARN-8679](https://issues.apache.org/jira/browse/YARN-8679) | [ATSv2] If HBase cluster is down for long time, high chances that NM ContainerManager dispatcher get blocked | Major | . | Rohith Sharma K S | Wangda Tan | | [HADOOP-14314](https://issues.apache.org/jira/browse/HADOOP-14314) | The OpenSolaris taxonomy link is dead in InterfaceClassification.md | Major | documentation | Daniel Templeton | Rui Gao | | [YARN-8649](https://issues.apache.org/jira/browse/YARN-8649) | NPE in localizer hearbeat processing if a container is killed while localizing | Major | . | lujie | lujie | | [YARN-8719](https://issues.apache.org/jira/browse/YARN-8719) | Typo correction for yarn configuration in OpportunisticContainers(federation) docs | Major | documentation, federation | Y. SREENIVASULU REDDY | Y. SREENIVASULU REDDY | | [HDFS-13731](https://issues.apache.org/jira/browse/HDFS-13731) | ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints | Major | encryption | Xiao Chen | Zsolt Venczel | | [HADOOP-15705](https://issues.apache.org/jira/browse/HADOOP-15705) | Typo in the definition of "stable" in the interface classification | Minor | . | Daniel Templeton | Daniel Templeton | | [HDFS-13863](https://issues.apache.org/jira/browse/HDFS-13863) | FsDatasetImpl should log DiskOutOfSpaceException | Major | hdfs | Fei Hui | Fei Hui | | [HADOOP-15698](https://issues.apache.org/jira/browse/HADOOP-15698) | KMS log4j is not initialized properly at startup | Major | kms | Kitti Nanasi | Kitti Nanasi | | [HADOOP-15706](https://issues.apache.org/jira/browse/HADOOP-15706) | Typo in compatibility doc: SHOUD -\> SHOULD | Trivial | . | Daniel Templeton | Laszlo Kollar | | [HDFS-13027](https://issues.apache.org/jira/browse/HDFS-13027) | Handle possible NPEs due to deleted blocks in race condition | Major | namenode | Vinayakumar B | Vinayakumar B | ### TESTS: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HDFS-13632](https://issues.apache.org/jira/browse/HDFS-13632) | Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA | Minor | . | Anbang Hu | Anbang Hu | | [HDFS-13651](https://issues.apache.org/jira/browse/HDFS-13651) | TestReencryptionHandler fails on Windows | Minor | . | Anbang Hu | Anbang Hu | | [MAPREDUCE-7102](https://issues.apache.org/jira/browse/MAPREDUCE-7102) | Fix TestJavaSerialization for Windows due a mismatch line separator | Minor | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | | [MAPREDUCE-7105](https://issues.apache.org/jira/browse/MAPREDUCE-7105) | Fix TestNativeCollectorOnlyHandler.testOnCall on Windows because of the path format | Minor | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | | [HDFS-13652](https://issues.apache.org/jira/browse/HDFS-13652) | Randomize baseDir for MiniDFSCluster in TestBlockScanner | Minor | . | Anbang Hu | Anbang Hu | | [HDFS-13649](https://issues.apache.org/jira/browse/HDFS-13649) | Randomize baseDir for MiniDFSCluster in TestReconstructStripedFile and TestReconstructStripedFileWithRandomECPolicy | Minor | . | Anbang Hu | Anbang Hu | | [HDFS-13650](https://issues.apache.org/jira/browse/HDFS-13650) | Randomize baseDir for MiniDFSCluster in TestDFSStripedInputStream and TestDFSStripedInputStreamWithRandomECPolicy | Minor | . | Anbang Hu | Anbang Hu | | [YARN-8370](https://issues.apache.org/jira/browse/YARN-8370) | Some Node Manager tests fail on Windows due to improper path/file separator | Minor | . | Anbang Hu | Anbang Hu | | [YARN-8422](https://issues.apache.org/jira/browse/YARN-8422) | TestAMSimulator failing with NPE | Minor | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | | [HADOOP-15532](https://issues.apache.org/jira/browse/HADOOP-15532) | TestBasicDiskValidator fails with NoSuchFileException | Minor | . | Íñigo Goiri | Giovanni Matteo Fumarola | | [HDFS-13563](https://issues.apache.org/jira/browse/HDFS-13563) | TestDFSAdminWithHA times out on Windows | Minor | . | Anbang Hu | Lukas Majercak | | [HDFS-13681](https://issues.apache.org/jira/browse/HDFS-13681) | Fix TestStartup.testNNFailToStartOnReadOnlyNNDir test failure on Windows | Major | test | Xiao Liang | Xiao Liang | ### SUB-TASKS: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HDFS-12978](https://issues.apache.org/jira/browse/HDFS-12978) | Fine-grained locking while consuming journal stream. | Major | namenode | Konstantin Shvachko | Konstantin Shvachko | | [HDFS-13637](https://issues.apache.org/jira/browse/HDFS-13637) | RBF: Router fails when threadIndex (in ConnectionPool) wraps around Integer.MIN\_VALUE | Critical | federation | CR Hota | CR Hota | | [HDFS-13281](https://issues.apache.org/jira/browse/HDFS-13281) | Namenode#createFile should be /.reserved/raw/ aware. | Critical | encryption | Rushabh S Shah | Rushabh S Shah | | [YARN-4677](https://issues.apache.org/jira/browse/YARN-4677) | RMNodeResourceUpdateEvent update from scheduler can lead to race condition | Major | graceful, resourcemanager, scheduler | Brook Zhou | Wilfred Spiegelenburg | | [HADOOP-15506](https://issues.apache.org/jira/browse/HADOOP-15506) | Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks | Minor | fs/azure | Esfandiar Manii | Esfandiar Manii | | [HADOOP-15529](https://issues.apache.org/jira/browse/HADOOP-15529) | ContainerLaunch#testInvalidEnvVariableSubstitutionType is not supported in Windows | Minor | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | | [HADOOP-15533](https://issues.apache.org/jira/browse/HADOOP-15533) | Make WASB listStatus messages consistent | Trivial | fs/azure | Esfandiar Manii | Esfandiar Manii | | [HADOOP-15458](https://issues.apache.org/jira/browse/HADOOP-15458) | TestLocalFileSystem#testFSOutputStreamBuilder fails on Windows | Minor | test | Xiao Liang | Xiao Liang | | [HDFS-13726](https://issues.apache.org/jira/browse/HDFS-13726) | RBF: Fix RBF configuration links | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma | | [HDFS-13475](https://issues.apache.org/jira/browse/HDFS-13475) | RBF: Admin cannot enforce Router enter SafeMode | Major | . | Wei Yan | Chao Sun | | [HDFS-13733](https://issues.apache.org/jira/browse/HDFS-13733) | RBF: Add Web UI configurations and descriptions to RBF document | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma | | [HDFS-13743](https://issues.apache.org/jira/browse/HDFS-13743) | RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver | Major | . | Takanobu Asanuma | Takanobu Asanuma | | [HDFS-13750](https://issues.apache.org/jira/browse/HDFS-13750) | RBF: Router ID in RouterRpcClient is always null | Major | . | Takanobu Asanuma | Takanobu Asanuma | | [YARN-8129](https://issues.apache.org/jira/browse/YARN-8129) | Improve error message for invalid value in fields attribute | Minor | ATSv2 | Charan Hebri | Abhishek Modi | | [HDFS-13848](https://issues.apache.org/jira/browse/HDFS-13848) | Refactor NameNode failover proxy providers | Major | ha, hdfs-client | Konstantin Shvachko | Konstantin Shvachko | ### OTHER: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HDFS-13788](https://issues.apache.org/jira/browse/HDFS-13788) | Update EC documentation about rack fault tolerance | Major | documentation, erasure-coding | Xiao Chen | Kitti Nanasi |
unknown
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.4/CHANGELOG.3.0.4.md
# Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests server and client side metadata API.""" import unittest import weakref import grpc from grpc import _channel from tests.unit import test_common from tests.unit.framework.common import test_constants _CHANNEL_ARGS = (('grpc.primary_user_agent', 'primary-agent'), ('grpc.secondary_user_agent', 'secondary-agent')) _REQUEST = b'\x00\x00\x00' _RESPONSE = b'\x00\x00\x00' _UNARY_UNARY = '/test/UnaryUnary' _UNARY_STREAM = '/test/UnaryStream' _STREAM_UNARY = '/test/StreamUnary' _STREAM_STREAM = '/test/StreamStream' _INVOCATION_METADATA = ( ( b'invocation-md-key', u'invocation-md-value', ), ( u'invocation-md-key-bin', b'\x00\x01', ), ) _EXPECTED_INVOCATION_METADATA = ( ( 'invocation-md-key', 'invocation-md-value', ), ( 'invocation-md-key-bin', b'\x00\x01', ), ) _INITIAL_METADATA = ((b'initial-md-key', u'initial-md-value'), (u'initial-md-key-bin', b'\x00\x02')) _EXPECTED_INITIAL_METADATA = ( ( 'initial-md-key', 'initial-md-value', ), ( 'initial-md-key-bin', b'\x00\x02', ), ) _TRAILING_METADATA = ( ( 'server-trailing-md-key', 'server-trailing-md-value', ), ( 'server-trailing-md-key-bin', b'\x00\x03', ), ) _EXPECTED_TRAILING_METADATA = _TRAILING_METADATA def _user_agent(metadata): for key, val in metadata: if key == 'user-agent': return val raise KeyError('No user agent!') def validate_client_metadata(test, servicer_context): invocation_metadata = servicer_context.invocation_metadata() test.assertTrue( test_common.metadata_transmitted(_EXPECTED_INVOCATION_METADATA, invocation_metadata)) user_agent = _user_agent(invocation_metadata) test.assertTrue( user_agent.startswith('primary-agent ' + _channel._USER_AGENT)) test.assertTrue(user_agent.endswith('secondary-agent')) def handle_unary_unary(test, request, servicer_context): validate_client_metadata(test, servicer_context) servicer_context.send_initial_metadata(_INITIAL_METADATA) servicer_context.set_trailing_metadata(_TRAILING_METADATA) return _RESPONSE def handle_unary_stream(test, request, servicer_context): validate_client_metadata(test, servicer_context) servicer_context.send_initial_metadata(_INITIAL_METADATA) servicer_context.set_trailing_metadata(_TRAILING_METADATA) for _ in range(test_constants.STREAM_LENGTH): yield _RESPONSE def handle_stream_unary(test, request_iterator, servicer_context): validate_client_metadata(test, servicer_context) servicer_context.send_initial_metadata(_INITIAL_METADATA) servicer_context.set_trailing_metadata(_TRAILING_METADATA) # TODO(issue:#6891) We should be able to remove this loop for request in request_iterator: pass return _RESPONSE def handle_stream_stream(test, request_iterator, servicer_context): validate_client_metadata(test, servicer_context) servicer_context.send_initial_metadata(_INITIAL_METADATA) servicer_context.set_trailing_metadata(_TRAILING_METADATA) # TODO(issue:#6891) We should be able to remove this loop, # and replace with return; yield for request in request_iterator: yield _RESPONSE class _MethodHandler(grpc.RpcMethodHandler): def __init__(self, test, request_streaming, response_streaming): self.request_streaming = request_streaming self.response_streaming = response_streaming self.request_deserializer = None self.response_serializer = None self.unary_unary = None self.unary_stream = None self.stream_unary = None self.stream_stream = None if self.request_streaming and self.response_streaming: self.stream_stream = lambda x, y: handle_stream_stream(test, x, y) elif self.request_streaming: self.stream_unary = lambda x, y: handle_stream_unary(test, x, y) elif self.response_streaming: self.unary_stream = lambda x, y: handle_unary_stream(test, x, y) else: self.unary_unary = lambda x, y: handle_unary_unary(test, x, y) class _GenericHandler(grpc.GenericRpcHandler): def __init__(self, test): self._test = test def service(self, handler_call_details): if handler_call_details.method == _UNARY_UNARY: return _MethodHandler(self._test, False, False) elif handler_call_details.method == _UNARY_STREAM: return _MethodHandler(self._test, False, True) elif handler_call_details.method == _STREAM_UNARY: return _MethodHandler(self._test, True, False) elif handler_call_details.method == _STREAM_STREAM: return _MethodHandler(self._test, True, True) else: return None class MetadataTest(unittest.TestCase): def setUp(self): self._server = test_common.test_server() self._server.add_generic_rpc_handlers((_GenericHandler( weakref.proxy(self)),)) port = self._server.add_insecure_port('[::]:0') self._server.start() self._channel = grpc.insecure_channel( 'localhost:%d' % port, options=_CHANNEL_ARGS) def tearDown(self): self._server.stop(0) def testUnaryUnary(self): multi_callable = self._channel.unary_unary(_UNARY_UNARY) unused_response, call = multi_callable.with_call( _REQUEST, metadata=_INVOCATION_METADATA) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA, call.initial_metadata())) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA, call.trailing_metadata())) def testUnaryStream(self): multi_callable = self._channel.unary_stream(_UNARY_STREAM) call = multi_callable(_REQUEST, metadata=_INVOCATION_METADATA) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA, call.initial_metadata())) for _ in call: pass self.assertTrue( test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA, call.trailing_metadata())) def testStreamUnary(self): multi_callable = self._channel.stream_unary(_STREAM_UNARY) unused_response, call = multi_callable.with_call( iter([_REQUEST] * test_constants.STREAM_LENGTH), metadata=_INVOCATION_METADATA) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA, call.initial_metadata())) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA, call.trailing_metadata())) def testStreamStream(self): multi_callable = self._channel.stream_stream(_STREAM_STREAM) call = multi_callable( iter([_REQUEST] * test_constants.STREAM_LENGTH), metadata=_INVOCATION_METADATA) self.assertTrue( test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA, call.initial_metadata())) for _ in call: pass self.assertTrue( test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA, call.trailing_metadata())) if __name__ == '__main__': unittest.main(verbosity=2)
unknown
codeparrot/codeparrot-clean
import asyncio import functools def unlock(lock): print('callback releasing lock') lock.release() async def coro1(lock): print('coro1 waiting for the lock') with await lock: print('coro1 acquired lock') print('coro1 released lock') async def coro2(lock): print('coro2 waiting for the lock') await lock try: print('coro2 acquired lock') finally: print('coro2 released lock') lock.release() async def main(loop): # Create and acquire a shared lock. lock = asyncio.Lock() print('acquiring the lock before starting coroutines') await lock.acquire() print('lock acquired: {}'.format(lock.locked())) # Schedule a callback to unlock the lock. loop.call_later(0.1, functools.partial(unlock, lock)) # Run the coroutines that want to use the lock. print('waiting for coroutines') await asyncio.wait([coro1(lock), coro2(lock)]), event_loop = asyncio.get_event_loop() try: event_loop.run_until_complete(main(event_loop)) finally: event_loop.close()
unknown
codeparrot/codeparrot-clean
import os from setuptools import setup, find_packages description = """ Simple interface to Raspberry Pi Plantpot Greenhouse add-on board """ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="rpi-greenhouse", version="0.4.1", author="Ben Nuttall", author_email="ben@raspberrypi.org", description=description.strip(), long_description=read('README.rst'), license="BSD", keywords=[ "raspberry pi", "greenhouse", "garden", "plant", "sensor", ], url="https://github.com/bennuttall/rpi-greenhouse", packages=find_packages(), scripts=[ 'scripts/greenhouse-logger', 'scripts/greenhouse-indicator', 'scripts/greenhouse-clear-db', ], install_requires=[ "RPi.GPIO", ], classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python :: 2", "Topic :: Home Automation", "Topic :: Education", "License :: OSI Approved :: BSD License", ], )
unknown
codeparrot/codeparrot-clean
/* * Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors. * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ package org.jetbrains.kotlin.analysis.api.impl.base.test.cases.annotations import org.jetbrains.kotlin.analysis.api.KaSession import org.jetbrains.kotlin.analysis.api.annotations.KaAnnotationList import org.jetbrains.kotlin.analysis.api.symbols.markers.KaAnnotatedSymbol import org.jetbrains.kotlin.analysis.test.framework.base.AbstractAnalysisApiBasedTest import org.jetbrains.kotlin.analysis.test.framework.projectStructure.KtTestModule import org.jetbrains.kotlin.analysis.test.framework.services.expressionMarkerProvider import org.jetbrains.kotlin.psi.KtDeclaration import org.jetbrains.kotlin.psi.KtFile import org.jetbrains.kotlin.test.services.TestServices import org.jetbrains.kotlin.test.services.assertions abstract class AbstractAnalysisApiAnnotationsOnDeclarationsTest : AbstractAnalysisApiBasedTest() { open fun renderAnnotations(analysisSession: KaSession, annotations: KaAnnotationList): String { return TestAnnotationRenderer.renderAnnotations(analysisSession, annotations) } override fun doTestByMainFile(mainFile: KtFile, mainModule: KtTestModule, testServices: TestServices) { val ktDeclaration = testServices.expressionMarkerProvider.getBottommostElementOfTypeAtCaret<KtDeclaration>(mainFile) val actual = copyAwareAnalyzeForTest(ktDeclaration) { contextDeclaration -> val declarationSymbol = contextDeclaration.symbol as KaAnnotatedSymbol buildString { appendLine("${KtDeclaration::class.simpleName}: ${contextDeclaration::class.simpleName} ${contextDeclaration.name}") append(renderAnnotations(useSiteSession, declarationSymbol.annotations)) } } testServices.assertions.assertEqualsToTestOutputFile(actual) } }
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api-impl-base/testFixtures/org/jetbrains/kotlin/analysis/api/impl/base/test/cases/annotations/AbstractAnalysisApiAnnotationsOnDeclarationsTest.kt
#!/usr/bin/python # Copyright 2014 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Test gyp_to_android.py """ import os import shutil import sys import tempfile import test_variables import unittest sys.path.append(test_variables.ANDROID_DIR) import gyp_gen.android_framework_gyp GYPD_SUFFIX = ".gypd" GYP_SUFFIX = ".gyp" GYPI_SUFFIX = ".gypi" OTHER_SUFFIX = ".txt" class CleanGypdTest(unittest.TestCase): def setUp(self): self.__tmp_dir = tempfile.mkdtemp() self.__num_files = 10 # Fill the dir with four types of files. .gypd files should be deleted by # clean_gypd_files(), while the rest should be left alone. for i in range(self.__num_files): self.create_file('%s%s' % (str(i), GYPD_SUFFIX)) self.create_file('%s%s' % (str(i), GYPI_SUFFIX)) self.create_file('%s%s' % (str(i), GYP_SUFFIX)) self.create_file('%s%s' % (str(i), OTHER_SUFFIX)) def create_file(self, basename): """Create a file named 'basename' in self.__tmp_dir. """ f = tempfile.mkstemp(dir=self.__tmp_dir) os.rename(f[1], os.path.join(self.__tmp_dir, basename)) self.assert_file_exists(basename) def assert_file_exists(self, basename): """Assert that 'basename' exists in self.__tmp_dir. """ full_name = os.path.join(self.__tmp_dir, basename) self.assertTrue(os.path.exists(full_name)) def assert_file_does_not_exist(self, basename): """Assert that 'basename' does not exist in self.__tmp_dir. """ full_name = os.path.join(self.__tmp_dir, basename) self.assertFalse(os.path.exists(full_name)) def test_clean(self): """Test that clean_gypd_files() deletes .gypd files, and leaves others. """ gyp_gen.android_framework_gyp.clean_gypd_files(self.__tmp_dir) for i in range(self.__num_files): self.assert_file_exists('%s%s' % (str(i), GYPI_SUFFIX)) self.assert_file_exists('%s%s' % (str(i), GYP_SUFFIX)) self.assert_file_exists('%s%s' % (str(i), OTHER_SUFFIX)) # Only the GYPD files should have been deleted. self.assert_file_does_not_exist('%s%s' % (str(i), GYPD_SUFFIX)) def tearDown(self): shutil.rmtree(self.__tmp_dir) def main(): loader = unittest.TestLoader() suite = loader.loadTestsFromTestCase(CleanGypdTest) unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from ...exceptions import ReadError from .readers import * from pkg_resources import resource_stream # @UnresolvedImport from xml.dom import minidom import logging __all__ = ['INTEGER', 'UINTEGER', 'FLOAT', 'STRING', 'UNICODE', 'DATE', 'MASTER', 'BINARY', 'SPEC_TYPES', 'READERS', 'Element', 'MasterElement', 'parse', 'parse_element', 'get_matroska_specs'] logger = logging.getLogger(__name__) # EBML types INTEGER, UINTEGER, FLOAT, STRING, UNICODE, DATE, MASTER, BINARY = range(8) # Spec types to EBML types mapping SPEC_TYPES = { 'integer': INTEGER, 'uinteger': UINTEGER, 'float': FLOAT, 'string': STRING, 'utf-8': UNICODE, 'date': DATE, 'master': MASTER, 'binary': BINARY } # Readers to use per EBML type READERS = { INTEGER: read_element_integer, UINTEGER: read_element_uinteger, FLOAT: read_element_float, STRING: read_element_string, UNICODE: read_element_unicode, DATE: read_element_date, BINARY: read_element_binary } class Element(object): """Base object of EBML :param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element) :param type: type of the element :type type: :data:`INTEGER`, :data:`UINTEGER`, :data:`FLOAT`, :data:`STRING`, :data:`UNICODE`, :data:`DATE`, :data:`MASTER` or :data:`BINARY` :param string name: name of the element :param int level: level of the element :param int position: position of element's data :param int size: size of element's data :param data: data as read by the corresponding :data:`READERS` """ def __init__(self, id=None, type=None, name=None, level=None, position=None, size=None, data=None): # @ReservedAssignment self.id = id self.type = type self.name = name self.level = level self.position = position self.size = size self.data = data def __repr__(self): return '<%s [%s, %r]>' % (self.__class__.__name__, self.name, self.data) class MasterElement(Element): """Element of type :data:`MASTER` that has a list of :class:`Element` as its data :param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element) :param string name: name of the element :param int level: level of the element :param int position: position of element's data :param int size: size of element's data :param data: child elements :type data: list of :class:`Element` :class:`MasterElement` implements some magic methods to ease manipulation. Thus, a MasterElement supports the `in` keyword to test for the presence of a child element by its name and gives access to it with a container getter:: >>> ebml_element = parse(open('test1.mkv', 'rb'), get_matroska_specs())[0] >>> 'EBMLVersion' in ebml_element False >>> 'DocType' in ebml_element True >>> ebml_element['DocType'] Element(DocType, u'matroska') """ def __init__(self, id=None, name=None, level=None, position=None, size=None, data=None): # @ReservedAssignment super(MasterElement, self).__init__(id, MASTER, name, level, position, size, data) def load(self, stream, specs, ignore_element_types=None, ignore_element_names=None, max_level=None): """Load children :class:`Elements <Element>` with level lower or equal to the `max_level` from the `stream` according to the `specs` :param stream: file-like object from which to read :param dict specs: see :ref:`specs` :param int max_level: maximum level for children elements :param list ignore_element_types: list of element types to ignore :param list ignore_element_names: list of element names to ignore :param int max_level: maximum level of elements """ self.data = parse(stream, specs, self.size, ignore_element_types, ignore_element_names, max_level) def get(self, name, default=None): """Convenience method for ``master_element[name].data if name in master_element else default`` :param string name: the name of the child to get :param default: default value if `name` is not in the :class:`MasterElement` :return: the data of the child :class:`Element` or `default` """ if name not in self: return default element = self[name] if element.type == MASTER: raise ValueError('%s is a MasterElement' % name) return element.data def __getitem__(self, key): if isinstance(key, int): return self.data[key] children = [e for e in self.data if e.name == key] if not children: raise KeyError(key) if len(children) > 1: raise KeyError('More than 1 child with key %s (%d)' % (key, len(children))) return children[0] def __contains__(self, item): return len([e for e in self.data if e.name == item]) > 0 def __iter__(self): return iter(self.data) def parse(stream, specs, size=None, ignore_element_types=None, ignore_element_names=None, max_level=None): """Parse a stream for `size` bytes according to the `specs` :param stream: file-like object from which to read :param size: maximum number of bytes to read, None to read all the stream :type size: int or None :param dict specs: see :ref:`specs` :param list ignore_element_types: list of element types to ignore :param list ignore_element_names: list of element names to ignore :param int max_level: maximum level of elements :return: parsed data as a tree of :class:`~enzyme.parsers.ebml.core.Element` :rtype: list .. note:: If `size` is reached in a middle of an element, reading will continue until the element is fully parsed. """ ignore_element_types = ignore_element_types if ignore_element_types is not None else [] ignore_element_names = ignore_element_names if ignore_element_names is not None else [] start = stream.tell() elements = [] while size is None or stream.tell() - start < size: try: element = parse_element(stream, specs) if element is None: continue logger.debug('%s %s parsed', element.__class__.__name__, element.name) if element.type in ignore_element_types or element.name in ignore_element_names: logger.info('%s %s ignored', element.__class__.__name__, element.name) if element.type == MASTER: stream.seek(element.size, 1) continue if element.type == MASTER: if max_level is not None and element.level >= max_level: logger.info('Maximum level %d reached for children of %s %s', max_level, element.__class__.__name__, element.name) stream.seek(element.size, 1) else: logger.debug('Loading child elements for %s %s with size %d', element.__class__.__name__, element.name, element.size) element.data = parse(stream, specs, element.size, ignore_element_types, ignore_element_names, max_level) elements.append(element) except ReadError: if size is not None: raise break return elements def parse_element(stream, specs, load_children=False, ignore_element_types=None, ignore_element_names=None, max_level=None): """Extract a single :class:`Element` from the `stream` according to the `specs` :param stream: file-like object from which to read :param dict specs: see :ref:`specs` :param bool load_children: load children elements if the parsed element is a :class:`MasterElement` :param list ignore_element_types: list of element types to ignore :param list ignore_element_names: list of element names to ignore :param int max_level: maximum level for children elements :return: the parsed element :rtype: :class:`Element` """ ignore_element_types = ignore_element_types if ignore_element_types is not None else [] ignore_element_names = ignore_element_names if ignore_element_names is not None else [] element_id = read_element_id(stream) if element_id is None: raise ReadError('Cannot read element id') element_size = read_element_size(stream) if element_size is None: raise ReadError('Cannot read element size') if element_id not in specs: logger.error('Element with id 0x%x is not in the specs' % element_id) stream.seek(element_size, 1) return None element_type, element_name, element_level = specs[element_id] if element_type == MASTER: element = MasterElement(element_id, element_name, element_level, stream.tell(), element_size) if load_children: element.data = parse(stream, specs, element.size, ignore_element_types, ignore_element_names, max_level) else: element = Element(element_id, element_type, element_name, element_level, stream.tell(), element_size) element.data = READERS[element_type](stream, element_size) return element def get_matroska_specs(webm_only=False): """Get the Matroska specs :param bool webm_only: load *only* WebM specs :return: the specs in the appropriate format. See :ref:`specs` :rtype: dict """ specs = {} with resource_stream(__name__, 'specs/matroska.xml') as resource: xmldoc = minidom.parse(resource) for element in xmldoc.getElementsByTagName('element'): if not webm_only or element.hasAttribute('webm') and element.getAttribute('webm') == '1': specs[int(element.getAttribute('id'), 16)] = (SPEC_TYPES[element.getAttribute('type')], element.getAttribute('name'), int(element.getAttribute('level'))) return specs
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import java.lang.reflect.Method; import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.concurrent.Callable; import java.util.concurrent.CompletionException; import javax.security.auth.Subject; /** * This class implements reflective access to the deprecated-for-removal methods of AccessController and Subject. * <p>Instantiating this class may fail if any of the required classes or methods are not found. * Method invocations for this class may fail with {@link UnsupportedOperationException} if all methods are found, * but the operation is not permitted to be invoked. * <p>This class is expected to be instantiable in JRE >=8 until the removal finally takes place. */ @SuppressWarnings("unchecked") class LegacyStrategy implements SecurityManagerCompatibility { private final Method doPrivileged; private final Method getContext; private final Method getSubject; private final Method doAs; // Visible for testing LegacyStrategy(ReflectiveStrategy.Loader loader) throws ClassNotFoundException, NoSuchMethodException { Class<?> accessController = loader.loadClass("java.security.AccessController"); doPrivileged = accessController.getDeclaredMethod("doPrivileged", PrivilegedAction.class); getContext = accessController.getDeclaredMethod("getContext"); Class<?> accessControlContext = loader.loadClass("java.security.AccessControlContext"); Class<?> subject = loader.loadClass(Subject.class.getName()); getSubject = subject.getDeclaredMethod("getSubject", accessControlContext); // Note that the Subject class isn't deprecated or removed, so reference it as an argument type. // This allows for mocking out the method implementation while still accepting Subject instances as arguments. doAs = subject.getDeclaredMethod("doAs", Subject.class, PrivilegedExceptionAction.class); } @Override public <T> T doPrivileged(PrivilegedAction<T> action) { return (T) ReflectiveStrategy.invoke(doPrivileged, null, action); } /** * @return the result of AccessController.getContext(), of type AccessControlContext */ private Object getContext() { return ReflectiveStrategy.invoke(getContext, null); } /** * @param context The current AccessControlContext * @return The result of Subject.getSubject(AccessControlContext) */ private Subject getSubject(Object context) { return (Subject) ReflectiveStrategy.invoke(getSubject, null, context); } @Override public Subject current() { return getSubject(getContext()); } /** * @return The result of Subject.doAs(Subject, PrivilegedExceptionAction) */ private <T> T doAs(Subject subject, PrivilegedExceptionAction<T> action) throws PrivilegedActionException { return (T) ReflectiveStrategy.invokeChecked(doAs, PrivilegedActionException.class, null, subject, action); } @Override public <T> T callAs(Subject subject, Callable<T> callable) throws CompletionException { try { return doAs(subject, callable::call); } catch (PrivilegedActionException e) { throw new CompletionException(e.getCause()); } } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/internals/LegacyStrategy.java
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import os import re import time from contextlib import suppress import psutil import pytest from psutil import Error, NoSuchProcess from rich.console import Console from airflow.cli import cli_parser from airflow.utils.cli import setup_locations console = Console(width=400, color_system="standard") class _CommonCLIUvicornTestClass: main_process_regexp: str = "process_to_look_for" @pytest.fixture(autouse=True) def _make_parser(self): self.parser = cli_parser.get_parser() def _check_processes(self, ignore_running: bool): if self.main_process_regexp == "process_to_look_for": raise Exception( "The main_process_regexp must be set in the subclass to something different than" " 'process_to_look_for'" ) # Confirm that nmain procss hasn't been launched. # pgrep returns exit status 1 if no process matched. # Use more specific regexps (^) to avoid matching pytest run when running specific method. # For instance, we want to be able to do: pytest -k 'uvicorn' airflow_internal_api_pids = self._find_all_processes(self.main_process_regexp) uvicorn_pids = self._find_all_processes(r"uvicorn: ") if airflow_internal_api_pids or uvicorn_pids: console.print("[blue]Some processes are still running") for pid in uvicorn_pids + airflow_internal_api_pids: with suppress(NoSuchProcess): console.print(psutil.Process(pid).as_dict(attrs=["pid", "name", "cmdline"])) console.print("[blue]Here list of processes ends") if airflow_internal_api_pids: console.print(f"[yellow]Forcefully killing {self.main_process_regexp} processes") for pid in airflow_internal_api_pids: with suppress(NoSuchProcess): psutil.Process(pid).kill() if uvicorn_pids: console.print("[yellow]Forcefully killing all uvicorn processes") for pid in uvicorn_pids: with suppress(NoSuchProcess): psutil.Process(pid).kill() if not ignore_running: raise AssertionError( "Background processes are running that prevent the test from passing successfully." ) @pytest.fixture(autouse=True) def _cleanup(self): self._check_processes(ignore_running=True) self._clean_pidfiles() yield self._check_processes(ignore_running=True) self._clean_pidfiles() def _clean_pidfiles(self): pidfile_internal_api = setup_locations("internal-api")[0] pidfile_monitor = setup_locations("internal-api-monitor")[0] if os.path.exists(pidfile_internal_api): console.print(f"[blue]Removing pidfile{pidfile_internal_api}") os.remove(pidfile_internal_api) if os.path.exists(pidfile_monitor): console.print(f"[blue]Removing pidfile{pidfile_monitor}") os.remove(pidfile_monitor) def _wait_pidfile(self, pidfile): start_time = time.monotonic() while True: try: with open(pidfile) as file: return int(file.read()) except Exception: if start_time - time.monotonic() > 60: raise console.print(f"[blue]Waiting for pidfile {pidfile} to be created ...") time.sleep(1) def _find_process(self, regexp_match: str, print_found_process=False) -> int | None: """ Find if process is running by matching its command line with a regexp. :param regexp_match: regexp to match the command line of the process :param print_found_process: if True, print the process found :return: PID of the process if found, None otherwise """ matcher = re.compile(regexp_match) for proc in psutil.process_iter(): try: proc_cmdline = " ".join(proc.cmdline()) except Error: # only check processes we can access and are existing continue if matcher.search(proc_cmdline): if print_found_process: console.print(proc.as_dict(attrs=["pid", "name", "cmdline"])) return proc.pid return None def _find_all_processes(self, regexp_match: str, print_found_process=False) -> list[int]: """ Find all running process matching their command line with a regexp and return the list of pids of the processes. found :param regexp_match: regexp to match the command line of the processes :param print_found_process: if True, print the processes found :return: list of PID of the processes matching the regexp """ matcher = re.compile(regexp_match) pids: list[int] = [] for proc in psutil.process_iter(): try: proc_cmdline = " ".join(proc.cmdline()) except Error: # only check processes we can access and are existing continue if matcher.match(proc_cmdline): if print_found_process: console.print(proc.as_dict(attrs=["pid", "name", "cmdline"])) pids.append(proc.pid) return pids def _terminate_multiple_process(self, pid_list): process = [] for pid in pid_list: proc = psutil.Process(pid) proc.terminate() process.append(proc) gone, alive = psutil.wait_procs(process, timeout=120) for p in alive: p.kill()
python
github
https://github.com/apache/airflow
airflow-core/tests/unit/cli/commands/_common_cli_classes.py