code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
{
"name": "my-vend/my-app",
"license": "MIT",
"repositories": [
{
"type": "git",
"url": "example.tld"
}
]
} | json | github | https://github.com/composer/composer | tests/Composer/Test/Config/Fixtures/config/config-with-exampletld-repository-as-list.json |
data = (
'Chu ', # 0x00
'Jing ', # 0x01
'Nie ', # 0x02
'Xiao ', # 0x03
'Bo ', # 0x04
'Chi ', # 0x05
'Qun ', # 0x06
'Mou ', # 0x07
'Shu ', # 0x08
'Lang ', # 0x09
'Yong ', # 0x0a
'Jiao ', # 0x0b
'Chou ', # 0x0c
'Qiao ', # 0x0d
'[?] ', # 0x0e
'Ta ', # 0x0f
'Jian ', # 0x10
'Qi ', # 0x11
'Wo ', # 0x12
'Wei ', # 0x13
'Zhuo ', # 0x14
'Jie ', # 0x15
'Ji ', # 0x16
'Nie ', # 0x17
'Ju ', # 0x18
'Ju ', # 0x19
'Lun ', # 0x1a
'Lu ', # 0x1b
'Leng ', # 0x1c
'Huai ', # 0x1d
'Ju ', # 0x1e
'Chi ', # 0x1f
'Wan ', # 0x20
'Quan ', # 0x21
'Ti ', # 0x22
'Bo ', # 0x23
'Zu ', # 0x24
'Qie ', # 0x25
'Ji ', # 0x26
'Cu ', # 0x27
'Zong ', # 0x28
'Cai ', # 0x29
'Zong ', # 0x2a
'Peng ', # 0x2b
'Zhi ', # 0x2c
'Zheng ', # 0x2d
'Dian ', # 0x2e
'Zhi ', # 0x2f
'Yu ', # 0x30
'Duo ', # 0x31
'Dun ', # 0x32
'Chun ', # 0x33
'Yong ', # 0x34
'Zhong ', # 0x35
'Di ', # 0x36
'Zhe ', # 0x37
'Chen ', # 0x38
'Chuai ', # 0x39
'Jian ', # 0x3a
'Gua ', # 0x3b
'Tang ', # 0x3c
'Ju ', # 0x3d
'Fu ', # 0x3e
'Zu ', # 0x3f
'Die ', # 0x40
'Pian ', # 0x41
'Rou ', # 0x42
'Nuo ', # 0x43
'Ti ', # 0x44
'Cha ', # 0x45
'Tui ', # 0x46
'Jian ', # 0x47
'Dao ', # 0x48
'Cuo ', # 0x49
'Xi ', # 0x4a
'Ta ', # 0x4b
'Qiang ', # 0x4c
'Zhan ', # 0x4d
'Dian ', # 0x4e
'Ti ', # 0x4f
'Ji ', # 0x50
'Nie ', # 0x51
'Man ', # 0x52
'Liu ', # 0x53
'Zhan ', # 0x54
'Bi ', # 0x55
'Chong ', # 0x56
'Lu ', # 0x57
'Liao ', # 0x58
'Cu ', # 0x59
'Tang ', # 0x5a
'Dai ', # 0x5b
'Suo ', # 0x5c
'Xi ', # 0x5d
'Kui ', # 0x5e
'Ji ', # 0x5f
'Zhi ', # 0x60
'Qiang ', # 0x61
'Di ', # 0x62
'Man ', # 0x63
'Zong ', # 0x64
'Lian ', # 0x65
'Beng ', # 0x66
'Zao ', # 0x67
'Nian ', # 0x68
'Bie ', # 0x69
'Tui ', # 0x6a
'Ju ', # 0x6b
'Deng ', # 0x6c
'Ceng ', # 0x6d
'Xian ', # 0x6e
'Fan ', # 0x6f
'Chu ', # 0x70
'Zhong ', # 0x71
'Dun ', # 0x72
'Bo ', # 0x73
'Cu ', # 0x74
'Zu ', # 0x75
'Jue ', # 0x76
'Jue ', # 0x77
'Lin ', # 0x78
'Ta ', # 0x79
'Qiao ', # 0x7a
'Qiao ', # 0x7b
'Pu ', # 0x7c
'Liao ', # 0x7d
'Dun ', # 0x7e
'Cuan ', # 0x7f
'Kuang ', # 0x80
'Zao ', # 0x81
'Ta ', # 0x82
'Bi ', # 0x83
'Bi ', # 0x84
'Zhu ', # 0x85
'Ju ', # 0x86
'Chu ', # 0x87
'Qiao ', # 0x88
'Dun ', # 0x89
'Chou ', # 0x8a
'Ji ', # 0x8b
'Wu ', # 0x8c
'Yue ', # 0x8d
'Nian ', # 0x8e
'Lin ', # 0x8f
'Lie ', # 0x90
'Zhi ', # 0x91
'Li ', # 0x92
'Zhi ', # 0x93
'Chan ', # 0x94
'Chu ', # 0x95
'Duan ', # 0x96
'Wei ', # 0x97
'Long ', # 0x98
'Lin ', # 0x99
'Xian ', # 0x9a
'Wei ', # 0x9b
'Zuan ', # 0x9c
'Lan ', # 0x9d
'Xie ', # 0x9e
'Rang ', # 0x9f
'Xie ', # 0xa0
'Nie ', # 0xa1
'Ta ', # 0xa2
'Qu ', # 0xa3
'Jie ', # 0xa4
'Cuan ', # 0xa5
'Zuan ', # 0xa6
'Xi ', # 0xa7
'Kui ', # 0xa8
'Jue ', # 0xa9
'Lin ', # 0xaa
'Shen ', # 0xab
'Gong ', # 0xac
'Dan ', # 0xad
'Segare ', # 0xae
'Qu ', # 0xaf
'Ti ', # 0xb0
'Duo ', # 0xb1
'Duo ', # 0xb2
'Gong ', # 0xb3
'Lang ', # 0xb4
'Nerau ', # 0xb5
'Luo ', # 0xb6
'Ai ', # 0xb7
'Ji ', # 0xb8
'Ju ', # 0xb9
'Tang ', # 0xba
'Utsuke ', # 0xbb
'[?] ', # 0xbc
'Yan ', # 0xbd
'Shitsuke ', # 0xbe
'Kang ', # 0xbf
'Qu ', # 0xc0
'Lou ', # 0xc1
'Lao ', # 0xc2
'Tuo ', # 0xc3
'Zhi ', # 0xc4
'Yagate ', # 0xc5
'Ti ', # 0xc6
'Dao ', # 0xc7
'Yagate ', # 0xc8
'Yu ', # 0xc9
'Che ', # 0xca
'Ya ', # 0xcb
'Gui ', # 0xcc
'Jun ', # 0xcd
'Wei ', # 0xce
'Yue ', # 0xcf
'Xin ', # 0xd0
'Di ', # 0xd1
'Xuan ', # 0xd2
'Fan ', # 0xd3
'Ren ', # 0xd4
'Shan ', # 0xd5
'Qiang ', # 0xd6
'Shu ', # 0xd7
'Tun ', # 0xd8
'Chen ', # 0xd9
'Dai ', # 0xda
'E ', # 0xdb
'Na ', # 0xdc
'Qi ', # 0xdd
'Mao ', # 0xde
'Ruan ', # 0xdf
'Ren ', # 0xe0
'Fan ', # 0xe1
'Zhuan ', # 0xe2
'Hong ', # 0xe3
'Hu ', # 0xe4
'Qu ', # 0xe5
'Huang ', # 0xe6
'Di ', # 0xe7
'Ling ', # 0xe8
'Dai ', # 0xe9
'Ao ', # 0xea
'Zhen ', # 0xeb
'Fan ', # 0xec
'Kuang ', # 0xed
'Ang ', # 0xee
'Peng ', # 0xef
'Bei ', # 0xf0
'Gu ', # 0xf1
'Ku ', # 0xf2
'Pao ', # 0xf3
'Zhu ', # 0xf4
'Rong ', # 0xf5
'E ', # 0xf6
'Ba ', # 0xf7
'Zhou ', # 0xf8
'Zhi ', # 0xf9
'Yao ', # 0xfa
'Ke ', # 0xfb
'Yi ', # 0xfc
'Qing ', # 0xfd
'Shi ', # 0xfe
'Ping ', # 0xff
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestPreallocateExtend(t *testing.T) {
pf := func(f *os.File, sz int64) error { return Preallocate(f, sz, true) }
tf := func(t *testing.T, f *os.File) {
t.Helper()
testPreallocateExtend(t, f, pf)
}
runPreallocTest(t, tf)
}
func TestPreallocateExtendTrunc(t *testing.T) {
tf := func(t *testing.T, f *os.File) {
t.Helper()
testPreallocateExtend(t, f, preallocExtendTrunc)
}
runPreallocTest(t, tf)
}
func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) error) {
t.Helper()
size := int64(64 * 1000)
require.NoError(t, pf(f, size))
stat, err := f.Stat()
require.NoError(t, err)
if stat.Size() != size {
t.Errorf("size = %d, want %d", stat.Size(), size)
}
}
func TestPreallocateFixed(t *testing.T) { runPreallocTest(t, testPreallocateFixed) }
func testPreallocateFixed(t *testing.T, f *os.File) {
t.Helper()
size := int64(64 * 1000)
require.NoError(t, Preallocate(f, size, false))
stat, err := f.Stat()
require.NoError(t, err)
if stat.Size() != 0 {
t.Errorf("size = %d, want %d", stat.Size(), 0)
}
}
func runPreallocTest(t *testing.T, test func(*testing.T, *os.File)) {
t.Helper()
p := t.TempDir()
f, err := os.CreateTemp(p, "")
require.NoError(t, err)
test(t, f)
} | go | github | https://github.com/etcd-io/etcd | client/pkg/fileutil/preallocate_test.go |
# -- coding: utf-8 --
#from ptrace.debugger.child import createChild
from os import system, dup2, close, open as fopen, O_RDONLY
from sys import stdin
from os import (
fork, execv, execve, getpid,
close, dup2, devnull, O_RDONLY)
from ptrace.binding import ptrace_traceme
from ptrace import PtraceError
from resource import getrlimit, setrlimit, RLIMIT_AS
fds = []
c = 0
class ChildError(RuntimeError):
pass
def _execChild(arguments, no_stdout, env):
if no_stdout:
try:
null = open(devnull, 'wb')
dup2(null.fileno(), 1)
dup2(1, 2)
null.close()
except IOError, err:
close(2)
close(1)
try:
if env is not None:
execve(arguments[0], arguments, env)
else:
execv(arguments[0], arguments)
except Exception, err:
raise ChildError(str(err))
def createChild(arguments, no_stdout, env=None):
"""
Create a child process:
- arguments: list of string where (eg. ['ls', '-la'])
- no_stdout: if True, use null device for stdout/stderr
- env: environment variables dictionary
Use:
- env={} to start with an empty environment
- env=None (default) to copy the environment
"""
# Fork process
pid = fork()
if pid:
return pid
else:
#print "limit",getrlimit(RLIMIT_DATA)
setrlimit(RLIMIT_AS, (1024*1024*1024, -1))
#print "limit",getrlimit(RLIMIT_DATA)
try:
ptrace_traceme()
except PtraceError, err:
raise ChildError(str(err))
_execChild(arguments, no_stdout, env)
exit(255)
def Launch(cmd, no_stdout, env):
global fds
global c
c = c + 1
#cmd = ["/usr/bin/timeout", "-k", "1", "3"]+cmd
#print cmd
if cmd[-1][0:2] == "< ":
filename = cmd[-1].replace("< ", "")
#try:
# close(3)
#except OSError:
# print "OsError!"
# pass
for fd in fds:
#print fd,
try:
close(fd)
#print "closed!"
except OSError:
#print "failed close!"
pass
fds = []
desc = fopen(filename,O_RDONLY)
fds.append(desc)
dup2(desc, stdin.fileno())
fds.append(desc)
#close(desc)
cmd = cmd[:-1]
#print "c:", c
#print "self pid", getpid()
r = createChild(cmd, no_stdout, env)
#print "new pid", r
#print "self pid", getpid()
#print "Done!"
return r
#class Runner:
# def __init__(self, cmd, timeout):
# #threading.Thread.__init__(self)
#
# self.cmd = cmd
# self.timeout = timeout
#
# def Run(self):
# #print self.cmd
# self.p = subprocess.call(self.cmd, shell=False)
# #self.p.wait()
# #self.join(self.timeout)
#
# #if self.is_alive():
# #print "terminate: ", self.p.pid
# #self.p.kill()
# #self.join()
# #return True
# return True | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <psimd.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__psimd(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const psimd_u32 vmultiplier_lo =
psimd_splat_u32(multiplier & UINT32_C(0x0000FFFF));
const psimd_u32 vmultiplier_hi = psimd_splat_u32(multiplier >> 16);
const psimd_s32 vzero_point = psimd_splat_s32((int32_t)(uint32_t)zero_point);
const psimd_s32 vsmin =
psimd_splat_s32((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point);
const psimd_s32 vsmax =
psimd_splat_s32((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point);
const psimd_u32 vrounding_lo = psimd_splat_u32((uint32_t)rounding);
const psimd_u32 vrounding_hi = psimd_splat_u32((uint32_t)(rounding >> 32));
const psimd_u32 vshift = psimd_splat_u32(shift - 32);
for (; n != 0; n -= 16) {
const psimd_s32 x = psimd_load_s32(input);
const psimd_s32 y = psimd_load_s32(input + 4);
const psimd_s32 z = psimd_load_s32(input + 8);
const psimd_s32 w = psimd_load_s32(input + 12);
input += 16;
const psimd_s32 x_neg_mask = x >> psimd_splat_s32(31);
const psimd_s32 y_neg_mask = y >> psimd_splat_s32(31);
const psimd_s32 z_neg_mask = z >> psimd_splat_s32(31);
const psimd_s32 w_neg_mask = w >> psimd_splat_s32(31);
const psimd_u32 x_abs = (psimd_u32)((x ^ x_neg_mask) - x_neg_mask);
const psimd_u32 y_abs = (psimd_u32)((y ^ y_neg_mask) - y_neg_mask);
const psimd_u32 z_abs = (psimd_u32)((z ^ z_neg_mask) - z_neg_mask);
const psimd_u32 w_abs = (psimd_u32)((w ^ w_neg_mask) - w_neg_mask);
const psimd_u32 x_abs_lo = x_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 x_abs_hi = x_abs >> psimd_splat_u32(16);
const psimd_u32 y_abs_lo = y_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 y_abs_hi = y_abs >> psimd_splat_u32(16);
const psimd_u32 z_abs_lo = z_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 z_abs_hi = z_abs >> psimd_splat_u32(16);
const psimd_u32 w_abs_lo = w_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 w_abs_hi = w_abs >> psimd_splat_u32(16);
const psimd_u32 x_product_ll = x_abs_lo * vmultiplier_lo;
const psimd_u32 y_product_ll = y_abs_lo * vmultiplier_lo;
const psimd_u32 z_product_ll = z_abs_lo * vmultiplier_lo;
const psimd_u32 w_product_ll = w_abs_lo * vmultiplier_lo;
const psimd_u32 x_product_lh =
x_abs_lo * vmultiplier_hi + (x_product_ll >> psimd_splat_u32(16));
const psimd_u32 y_product_lh =
y_abs_lo * vmultiplier_hi + (y_product_ll >> psimd_splat_u32(16));
const psimd_u32 z_product_lh =
z_abs_lo * vmultiplier_hi + (z_product_ll >> psimd_splat_u32(16));
const psimd_u32 w_product_lh =
w_abs_lo * vmultiplier_hi + (w_product_ll >> psimd_splat_u32(16));
const psimd_u32 x_product_hl = x_abs_hi * vmultiplier_lo +
(x_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 y_product_hl = y_abs_hi * vmultiplier_lo +
(y_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 z_product_hl = z_abs_hi * vmultiplier_lo +
(z_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 w_product_hl = w_abs_hi * vmultiplier_lo +
(w_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 x_product_lo = (x_product_hl << psimd_splat_u32(16)) +
(x_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 y_product_lo = (y_product_hl << psimd_splat_u32(16)) +
(y_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 z_product_lo = (z_product_hl << psimd_splat_u32(16)) +
(z_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 w_product_lo = (w_product_hl << psimd_splat_u32(16)) +
(w_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 x_product_hi = x_abs_hi * vmultiplier_hi +
(x_product_lh >> psimd_splat_u32(16)) +
(x_product_hl >> psimd_splat_u32(16));
const psimd_u32 y_product_hi = y_abs_hi * vmultiplier_hi +
(y_product_lh >> psimd_splat_u32(16)) +
(y_product_hl >> psimd_splat_u32(16));
const psimd_u32 z_product_hi = z_abs_hi * vmultiplier_hi +
(z_product_lh >> psimd_splat_u32(16)) +
(z_product_hl >> psimd_splat_u32(16));
const psimd_u32 w_product_hi = w_abs_hi * vmultiplier_hi +
(w_product_lh >> psimd_splat_u32(16)) +
(w_product_hl >> psimd_splat_u32(16));
const psimd_u32 x_adjusted_product = (x_product_hi + vrounding_hi) -
((psimd_s32)(x_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 y_adjusted_product = (y_product_hi + vrounding_hi) -
((psimd_s32)(y_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 z_adjusted_product = (z_product_hi + vrounding_hi) -
((psimd_s32)(z_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 w_adjusted_product = (w_product_hi + vrounding_hi) -
((psimd_s32)(w_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 x_abs_scaled = x_adjusted_product >> vshift;
const psimd_u32 y_abs_scaled = y_adjusted_product >> vshift;
const psimd_u32 z_abs_scaled = z_adjusted_product >> vshift;
const psimd_u32 w_abs_scaled = w_adjusted_product >> vshift;
const psimd_s32 x_scaled =
(psimd_s32)(x_abs_scaled ^ x_neg_mask) - x_neg_mask;
const psimd_s32 y_scaled =
(psimd_s32)(y_abs_scaled ^ y_neg_mask) - y_neg_mask;
const psimd_s32 z_scaled =
(psimd_s32)(z_abs_scaled ^ z_neg_mask) - z_neg_mask;
const psimd_s32 w_scaled =
(psimd_s32)(w_abs_scaled ^ w_neg_mask) - w_neg_mask;
const psimd_u32 x_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(x_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 y_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(y_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 z_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(z_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 w_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(w_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u16 xy_clamped =
psimd_concat_even_u16((psimd_u16)x_clamped, (psimd_u16)y_clamped);
const psimd_u16 zw_clamped =
psimd_concat_even_u16((psimd_u16)z_clamped, (psimd_u16)w_clamped);
const psimd_u8 xyzw_clamped =
psimd_concat_even_u8((psimd_u8)xy_clamped, (psimd_u8)zw_clamped);
psimd_store_u8(output, xyzw_clamped);
output += 16;
}
} | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-psimd.c |
"""
Provides a function for importing a git repository into the lms
instance when using a mongo modulestore
"""
import os
import re
import StringIO
import subprocess
import logging
from django.conf import settings
from django.core import management
from django.core.management.base import CommandError
from django.utils import timezone
from django.utils.translation import ugettext as _
import mongoengine
from dashboard.models import CourseImportLog
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
GIT_REPO_DIR = getattr(settings, 'GIT_REPO_DIR', '/edx/var/app/edxapp/course_repos')
GIT_IMPORT_STATIC = getattr(settings, 'GIT_IMPORT_STATIC', True)
class GitImportError(Exception):
"""
Exception class for handling the typical errors in a git import.
"""
NO_DIR = _("Path {0} doesn't exist, please create it, "
"or configure a different path with "
"GIT_REPO_DIR").format(GIT_REPO_DIR)
URL_BAD = _('Non usable git url provided. Expecting something like:'
' git@github.com:mitocw/edx4edx_lite.git')
BAD_REPO = _('Unable to get git log')
CANNOT_PULL = _('git clone or pull failed!')
XML_IMPORT_FAILED = _('Unable to run import command.')
UNSUPPORTED_STORE = _('The underlying module store does not support import.')
# Translators: This is an error message when they ask for a
# particular version of a git repository and that version isn't
# available from the remote source they specified
REMOTE_BRANCH_MISSING = _('The specified remote branch is not available.')
# Translators: Error message shown when they have asked for a git
# repository branch, a specific version within a repository, that
# doesn't exist, or there is a problem changing to it.
CANNOT_BRANCH = _('Unable to switch to specified branch. Please check '
'your branch name.')
def cmd_log(cmd, cwd):
"""
Helper function to redirect stderr to stdout and log the command
used along with the output. Will raise subprocess.CalledProcessError if
command doesn't return 0, and returns the command's output.
"""
output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
log.debug('Command was: {0!r}. '
'Working directory was: {1!r}'.format(' '.join(cmd), cwd))
log.debug('Command output was: {0!r}'.format(output))
return output
def switch_branch(branch, rdir):
"""
This will determine how to change the branch of the repo, and then
use the appropriate git commands to do so.
Raises an appropriate GitImportError exception if there is any issues with changing
branches.
"""
# Get the latest remote
try:
cmd_log(['git', 'fetch', ], rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to fetch remote: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_BRANCH)
# Check if the branch is available from the remote.
cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]
try:
output = cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Getting a list of remote branches failed: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_BRANCH)
if branch not in output:
raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)
# Check it the remote branch has already been made locally
cmd = ['git', 'branch', '-a', ]
try:
output = cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Getting a list of local branches failed: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_BRANCH)
branches = []
for line in output.split('\n'):
branches.append(line.replace('*', '').strip())
if branch not in branches:
# Checkout with -b since it is remote only
cmd = ['git', 'checkout', '--force', '--track',
'-b', branch, 'origin/{0}'.format(branch), ]
try:
cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to checkout remote branch: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_BRANCH)
# Go ahead and reset hard to the newest version of the branch now that we know
# it is local.
try:
cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to reset to branch: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_BRANCH)
def add_repo(repo, rdir_in, branch=None):
"""
This will add a git repo into the mongo modulestore.
If branch is left as None, it will fetch the most recent
version of the current branch.
"""
# pylint: disable=too-many-statements
# Set defaults even if it isn't defined in settings
mongo_db = {
'host': 'localhost',
'port': 27017,
'user': '',
'password': '',
'db': 'xlog',
}
# Allow overrides
if hasattr(settings, 'MONGODB_LOG'):
for config_item in ['host', 'user', 'password', 'db', 'port']:
mongo_db[config_item] = settings.MONGODB_LOG.get(
config_item, mongo_db[config_item])
if not os.path.isdir(GIT_REPO_DIR):
raise GitImportError(GitImportError.NO_DIR)
# pull from git
if not (repo.endswith('.git') or
repo.startswith(('http:', 'https:', 'git:', 'file:'))):
raise GitImportError(GitImportError.URL_BAD)
if rdir_in:
rdir = os.path.basename(rdir_in)
else:
rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]
log.debug('rdir = {0}'.format(rdir))
rdirp = '{0}/{1}'.format(GIT_REPO_DIR, rdir)
if os.path.exists(rdirp):
log.info('directory already exists, doing a git pull instead '
'of git clone')
cmd = ['git', 'pull', ]
cwd = rdirp
else:
cmd = ['git', 'clone', repo, ]
cwd = GIT_REPO_DIR
cwd = os.path.abspath(cwd)
try:
ret_git = cmd_log(cmd, cwd=cwd)
except subprocess.CalledProcessError as ex:
log.exception('Error running git pull: %r', ex.output)
raise GitImportError(GitImportError.CANNOT_PULL)
if branch:
switch_branch(branch, rdirp)
# get commit id
cmd = ['git', 'log', '-1', '--format=%H', ]
try:
commit_id = cmd_log(cmd, cwd=rdirp)
except subprocess.CalledProcessError as ex:
log.exception('Unable to get git log: %r', ex.output)
raise GitImportError(GitImportError.BAD_REPO)
ret_git += '\nCommit ID: {0}'.format(commit_id)
# get branch
cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ]
try:
branch = cmd_log(cmd, cwd=rdirp)
except subprocess.CalledProcessError as ex:
# I can't discover a way to excercise this, but git is complex
# so still logging and raising here in case.
log.exception('Unable to determine branch: %r', ex.output)
raise GitImportError(GitImportError.BAD_REPO)
ret_git += '{0}Branch: {1}'.format(' \n', branch)
# Get XML logging logger and capture debug to parse results
output = StringIO.StringIO()
import_log_handler = logging.StreamHandler(output)
import_log_handler.setLevel(logging.DEBUG)
logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course',
'xmodule.modulestore.xml', 'xmodule.seq_module', ]
loggers = []
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(import_log_handler)
loggers.append(logger)
try:
management.call_command('import', GIT_REPO_DIR, rdir,
nostatic=not GIT_IMPORT_STATIC)
except CommandError:
raise GitImportError(GitImportError.XML_IMPORT_FAILED)
except NotImplementedError:
raise GitImportError(GitImportError.UNSUPPORTED_STORE)
ret_import = output.getvalue()
# Remove handler hijacks
for logger in loggers:
logger.setLevel(logging.NOTSET)
logger.removeHandler(import_log_handler)
course_key = None
location = 'unknown'
# extract course ID from output of import-command-run and make symlink
# this is needed in order for custom course scripts to work
match = re.search(r'(?ms)===> IMPORTING course (\S+)', ret_import)
if match:
course_id = match.group(1)
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
cdir = '{0}/{1}'.format(GIT_REPO_DIR, course_key.course)
log.debug('Studio course dir = {0}'.format(cdir))
if os.path.exists(cdir) and not os.path.islink(cdir):
log.debug(' -> exists, but is not symlink')
log.debug(subprocess.check_output(['ls', '-l', ],
cwd=os.path.abspath(cdir)))
try:
os.rmdir(os.path.abspath(cdir))
except OSError:
log.exception('Failed to remove course directory')
if not os.path.exists(cdir):
log.debug(' -> creating symlink between {0} and {1}'.format(rdirp, cdir))
try:
os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir))
except OSError:
log.exception('Unable to create course symlink')
log.debug(subprocess.check_output(['ls', '-l', ],
cwd=os.path.abspath(cdir)))
# store import-command-run output in mongo
mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db)
try:
if mongo_db['user'] and mongo_db['password']:
mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
else:
mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port'])
except mongoengine.connection.ConnectionError:
log.exception('Unable to connect to mongodb to save log, please '
'check MONGODB_LOG settings')
cil = CourseImportLog(
course_id=course_key,
location=location,
repo_dir=rdir,
created=timezone.now(),
import_log=ret_import,
git_log=ret_git,
)
cil.save()
log.debug('saved CourseImportLog for {0}'.format(cil.course_id))
mdb.disconnect() | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.stat import KernelDensity
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="KernelDensityEstimationExample") # SparkContext
# $example on$
# an RDD of sample data
data = sc.parallelize([1.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 7.0, 8.0, 9.0, 9.0])
# Construct the density estimator with the sample data and a standard deviation for the Gaussian
# kernels
kd = KernelDensity()
kd.setSample(data)
kd.setBandwidth(3.0)
# Find density estimates for the given values
densities = kd.estimate([-1.0, 2.0, 5.0])
# $example off$
print(densities)
sc.stop() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: ISO-8859-1 -*-
from struct import pack, unpack
"""
This module contains functions for reading and writing the special data types
that a midi file contains.
"""
"""
nibbles are four bits. A byte consists of two nibles.
hiBits==0xF0, loBits==0x0F Especially used for setting
channel and event in 1. byte of musical midi events
"""
def getNibbles(byte):
"""
Returns hi and lo bits in a byte as a tuple
>>> getNibbles(142)
(8, 14)
Asserts byte value in byte range
>>> getNibbles(256)
Traceback (most recent call last):
...
ValueError: Byte value out of range 0-255: 256
"""
if not 0 <= byte <= 255:
raise ValueError('Byte value out of range 0-255: %s' % byte)
return (byte >> 4 & 0xF, byte & 0xF)
def setNibbles(hiNibble, loNibble):
"""
Returns byte with value set according to hi and lo bits
Asserts hiNibble and loNibble in range(16)
>>> setNibbles(8, 14)
142
>>> setNibbles(8, 16)
Traceback (most recent call last):
...
ValueError: Nible value out of range 0-15: (8, 16)
"""
if not (0 <= hiNibble <= 15) or not (0 <= loNibble <= 15):
raise ValueError('Nible value out of range 0-15: (%s, %s)' % (hiNibble, loNibble))
return (hiNibble << 4) + loNibble
def readBew(value):
"""
Reads string as big endian word, (asserts len(value) in [1,2,4])
>>> readBew('aáâã')
1642193635L
>>> readBew('aá')
25057
"""
return unpack('>%s' % {1:'B', 2:'H', 4:'L'}[len(value)], value)[0]
def writeBew(value, length):
"""
Write int as big endian formatted string, (asserts length in [1,2,4])
Difficult to print the result in doctest, so I do a simple roundabout test.
>>> readBew(writeBew(25057, 2))
25057
>>> readBew(writeBew(1642193635L, 4))
1642193635L
"""
return pack('>%s' % {1:'B', 2:'H', 4:'L'}[length], value)
"""
Variable Length Data (varlen) is a data format sprayed liberally throughout
a midi file. It can be anywhere from 1 to 4 bytes long.
If the 8'th bit is set in a byte another byte follows. The value is stored
in the lowest 7 bits of each byte. So max value is 4x7 bits = 28 bits.
"""
def readVar(value):
"""
Converts varlength format to integer. Just pass it 0 or more chars that
might be a varlen and it will only use the relevant chars.
use varLen(readVar(value)) to see how many bytes the integer value takes.
asserts len(value) >= 0
>>> readVar('@')
64
>>> readVar('áâãa')
205042145
"""
sum = 0
for byte in unpack('%sB' % len(value), value):
sum = (sum << 7) + (byte & 0x7F)
if not 0x80 & byte: break # stop after last byte
return sum
def varLen(value):
"""
Returns the the number of bytes an integer will be when
converted to varlength
"""
if value <= 127:
return 1
elif value <= 16383:
return 2
elif value <= 2097151:
return 3
else:
return 4
def writeVar(value):
"Converts an integer to varlength format"
sevens = to_n_bits(value, varLen(value))
for i in range(len(sevens)-1):
sevens[i] = sevens[i] | 0x80
return fromBytes(sevens)
def to_n_bits(value, length=1, nbits=7):
"returns the integer value as a sequence of nbits bytes"
bytes = [(value >> (i*nbits)) & 0x7F for i in range(length)]
bytes.reverse()
return bytes
def toBytes(value):
"Turns a string into a list of byte values"
return unpack('%sB' % len(value), value)
def fromBytes(value):
"Turns a list of bytes into a string"
if not value:
return ''
return pack('%sB' % len(value), *value)
if __name__ == '__main__':
# print to7bits(0, 3)
# print to7bits(127, 3)
# print to7bits(255, 3)
# print to7bits(65536, 3)
# simple test cases
# print 'getHiLoHex', getNibbles(16)
# print 'setHiLoHex', setNibbles(1,0)
#
# print 'readBew', readBew('aáâã')
# print 'writeBew', writeBew(1642193635, 4)
#
# print 'varLen', varLen(1)
#
print 'readVar', readVar('@')
print 'writeVar', writeVar(8192)
print 'readVar', readVar('áâãa')
print 'writeVar', writeVar(205058401)
#
# vartest = '\x82\xF7\x80\x00'
# print 'toBytes', toBytes(vartest)
# print 'fromBytes', fromBytes([48, 49, 50,])
# instr = '\xFF\xFF\xFF\x00'
# print 'readVar', readVar(instr)
# inst2 = 268435455
# print inst2
# print writeVar(inst2)
# print writeVar(readVar(instr))
s1 = 0x00000000
print '%08X -' % s1, '00', writeVar(s1)
s2 = 0x00000040
print '%08X -' % s2, '40', writeVar(s2)
s3 = 0x0000007F
print '%08X -' % s3, '7F', writeVar(s3)
s4 = 0x00000080
print '%08X -' % s4, '81 00', writeVar(s4)
s5 = 0x00002000
print '%08X -' % s5, 'C0 00', writeVar(s5)
s6 = 0x00003FFF
print '%08X -' % s6, 'FF 7F', writeVar(s6)
s7 = 0x00004000
print '%08X -' % s7, '81 80 00', writeVar(s7)
s8 = 0x00100000
print '%08X -' % s8, 'C0 80 00', writeVar(s8)
s9 = 0x001FFFFF
print '%08X -' % s9, 'FF FF 7F', writeVar(s9)
s10 = 0x00200000
print '%08X -' % s10, '81 80 80 00', writeVar(s10)
s11 = 0x08000000
print '%08X -' % s11, 'C0 80 80 00', writeVar(s11)
s12 = 0x0FFFFFFF
print '%08X -' % s12, 'FF FF FF 7F', writeVar(s12) | unknown | codeparrot/codeparrot-clean | ||
'''Euromast, Copyright 2017, CommitSudoku'''
import pygame
def process_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
# Give the signal to quit
return True
return False
# class program:
class Game():
def __init__(self):
width = 800
height = 600
screen_size = (width, height) # gebonden resolutie
pygame.init() #startgame
screen = pygame.display.set_mode(screen_size) # pygame code om scherm aan te roepen
name = pygame.display.set_caption('Euromast')
while not process_events():
screen.fill((255, 255, 255)) # kleur display
pygame.display.flip()
def program():
width = 800
height = 600
screen_size = (width, height) #gebonden resolutie
screen = pygame.display.set_mode(screen_size) # pygame code om scherm aan te roepen
while not process_events():
screen.fill((255, 255, 255)) # kleur display
# menutest.displayRules()
pygame.display.flip()
screen.fill((255, 255, 255)) # kleur display
screen.blit(menu.textsurface, (0, 0))
pygame.display.flip()
pygame.display.flip()
def program():
game = Game()
program() | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
__all__ = ["ResponseComputerToolCallOutputItem", "AcknowledgedSafetyCheck"]
class AcknowledgedSafetyCheck(BaseModel):
"""A pending safety check for the computer call."""
id: str
"""The ID of the pending safety check."""
code: Optional[str] = None
"""The type of the pending safety check."""
message: Optional[str] = None
"""Details about the pending safety check."""
class ResponseComputerToolCallOutputItem(BaseModel):
id: str
"""The unique ID of the computer call tool output."""
call_id: str
"""The ID of the computer tool call that produced the output."""
output: ResponseComputerToolCallOutputScreenshot
"""A computer screenshot image used with the computer use tool."""
type: Literal["computer_call_output"]
"""The type of the computer tool call output. Always `computer_call_output`."""
acknowledged_safety_checks: Optional[List[AcknowledgedSafetyCheck]] = None
"""
The safety checks reported by the API that have been acknowledged by the
developer.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the message input.
One of `in_progress`, `completed`, or `incomplete`. Populated when input items
are returned via API.
""" | python | github | https://github.com/openai/openai-python | src/openai/types/responses/response_computer_tool_call_output_item.py |
maxlen=100000
minval=-1000000000
maxval=1000000000
def solution(A):
premax= None
postmax = None
maxabs = 0
tmpabs = 0
maxA=[]
length=len(A)
#not good list
if length < 2 or length > maxlen:
return 0
#build max val list for #n element
for i,val in enumerate(A):
if val <minval or val >maxval:
return 0
if premax == None or val > premax:
premax = val
maxA.append(premax)
#print maxA
i=length-1
for val in A[::-1]:
if postmax == None or postmax < val:
postmax = val
#maxA[i] had the max val before #n
#and postmax had max val after #n
tmpabs = abs(maxA[i]-postmax)
if tmpabs > maxabs:
maxabs = tmpabs
#print i,maxabs,postmax,maxA[i]
i-=1
return maxabs
def run_test():
test_suit = (
[],
[1],
[minval],
[maxval],
[minval-1],
[maxval+1],
[1,1],
[1,2],
[1,2,3],
[-3,-2,-1,1,2,3],
[3,2,1,0,-1,-2,-3],
[3,2,1,5,-1,-2,-3],
[0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0],
[minval,maxval],
)
for tc in test_suit:
print "test data:",tc, "solution data:",solution(tc)
if __name__=="__main__":
run_test() | unknown | codeparrot/codeparrot-clean | ||
#ifndef SRC_NODE_PROCESS_INL_H_
#define SRC_NODE_PROCESS_INL_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include "node_process.h"
#include "v8.h"
#include "debug_utils-inl.h"
namespace node {
// Call process.emitWarning(str), fmt is a snprintf() format string
template <typename... Args>
inline v8::Maybe<bool> ProcessEmitWarning(Environment* env,
const char* fmt,
Args&&... args) {
std::string warning = SPrintF(fmt, std::forward<Args>(args)...);
return ProcessEmitWarningGeneric(env, warning.c_str());
}
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_NODE_PROCESS_INL_H_ | c | github | https://github.com/nodejs/node | src/node_process-inl.h |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js || wasip1
package net
import "syscall"
func setDefaultSockopts(s, family, sotype int, ipv6only bool) error {
return nil
}
func setDefaultListenerSockopts(s int) error {
return nil
}
func setDefaultMulticastSockopts(s int) error {
return nil
}
func setReadBuffer(fd *netFD, bytes int) error {
if fd.fakeNetFD != nil {
return fd.fakeNetFD.setReadBuffer(bytes)
}
return syscall.ENOPROTOOPT
}
func setWriteBuffer(fd *netFD, bytes int) error {
if fd.fakeNetFD != nil {
return fd.fakeNetFD.setWriteBuffer(bytes)
}
return syscall.ENOPROTOOPT
}
func setKeepAlive(fd *netFD, keepalive bool) error {
return syscall.ENOPROTOOPT
}
func setLinger(fd *netFD, sec int) error {
if fd.fakeNetFD != nil {
return fd.fakeNetFD.setLinger(sec)
}
return syscall.ENOPROTOOPT
} | go | github | https://github.com/golang/go | src/net/sockopt_fake.go |
import argparse
import datetime
import json
import os
import sys
import logging
import requests
import subprocess
import six
import time
import yaml
from subprocess import Popen,PIPE
from shlex import split
from utils import *
# Generate common options
def generate_options(args):
gpus = args.gpus
cpu = args.cpu
memory = args.memory
tensorboard = args.tensorboard
output_data = args.output_data
data = args.data
env = args.env
tensorboard_image = args.tensorboard_image
tensorboard = str2bool(args.tensorboard)
log_dir = args.log_dir
sync_source = args.sync_source
options = []
if gpus > 0:
options.extend(['--gpus', str(gpus)])
if cpu != '0':
options.extend(['--cpu', str(cpu)])
if memory != '0':
options.extend(['--memory', str(memory)])
if tensorboard_image != "tensorflow/tensorflow:1.12.0":
options.extend(['--tensorboardImage', tensorboard_image])
if tensorboard:
options.append("--tensorboard")
if os.path.isdir(args.log_dir):
options.extend(['--logdir', args.log_dir])
else:
logging.info("skip log dir :{0}".format(args.log_dir))
if len(data) > 0:
for d in data:
if ":" in d:
options.append("--data={0}".format(d))
else:
logging.info("--data={0} is illegal, skip.".format(d))
if len(env) > 0:
for e in env:
if "=" in e:
options.append("--env={0}".format(e))
else:
logging.info("--env={0} is illegal, skip.".format(e))
if len(args.workflow_name) > 0:
options.append("--env=WORKFLOW_NAME={0}".format(args.workflow_name))
if len(args.step_name) > 0:
options.append("--env=STEP_NAME={0}".format(args.step_name))
if len(sync_source) > 0:
if not sync_source.endswith(".git"):
raise ValueError("sync_source must be an http git url")
options.extend(['--sync-mode','git'])
options.extend(['--sync-source',sync_source])
return options
# Generate standalone job
def generate_job_command(args):
name = args.name
image = args.image
commandArray = [
'arena', 'submit', 'tfjob',
'--name={0}'.format(name),
'--image={0}'.format(image),
]
commandArray.extend(generate_options(args))
return commandArray, "tfjob"
# Generate mpi job
def generate_mpjob_command(args):
name = args.name
workers = args.workers
image = args.image
rdma = args.rdma
commandArray = [
'arena', 'submit', 'mpijob',
'--name={0}'.format(name),
'--workers={0}'.format(workers),
'--image={0}'.format(image),
]
if rdma.lower() == "true":
commandArray.append("--rdma")
commandArray.extend(generate_options(args))
return commandArray, "mpijob" | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: upload-flags
Arg: <flags>
Help: IMAP upload behavior
Protocols: IMAP
Category: curl output
Added: 8.13.0
Multi: single
See-also:
- upload-file
Example:
- --upload-flags Flagged,!Seen --upload-file local/dir/file $URL
---
# `--upload-flags`
Specify additional behavior to apply to uploaded files. Flags are
specified as either a single flag value or a comma-separated list
of flag values. These values are case-sensitive and may be negated
by prepending them with a '-' character. Currently the following
flag values are accepted: answered, deleted, draft, flagged, and
seen. The currently accepted flag values are used to set flags on
IMAP uploads. | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/upload-flags.md |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from nova.compute import resource_tracker
class FakeResourceTracker(resource_tracker.ResourceTracker):
"""Version without a DB requirement."""
def _update(self, context, compute_node, startup=False):
pass
class RTMockMixin(object):
def _mock_rt(self, **kwargs):
if 'spec_set' in kwargs:
kwargs.update({'autospec': False})
return self.useFixture(fixtures.MockPatchObject(
self.compute, 'rt', **kwargs)).mock | unknown | codeparrot/codeparrot-clean | ||
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from numpy import ma
from pandas._config import using_string_dtype
from pandas._libs import lib
from pandas.core.dtypes.astype import astype_is_view
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.dtypes import (
BaseMaskedDtype,
ExtensionDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.string_ import StringDtype
from pandas.core.construction import (
array as pd_array,
extract_array,
range_to_ndarray,
sanitize_array,
)
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
TimedeltaIndex,
default_index,
ensure_index,
get_objs_combined_axis,
maybe_sequence_to_range,
union_indexes,
)
from pandas.core.internals.blocks import (
BlockPlacement,
ensure_block_shape,
new_block,
new_block_2d,
)
from pandas.core.internals.managers import (
create_block_manager_from_blocks,
create_block_manager_from_column_arrays,
)
if TYPE_CHECKING:
from collections.abc import (
Hashable,
Sequence,
)
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
npt,
)
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
columns: Index,
index,
*,
dtype: DtypeObj | None = None,
verify_integrity: bool = True,
consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays, refs = _homogenize(arrays, index, dtype)
# _homogenize ensures
# - all(len(x) == len(index) for x in arrays)
# - all(x.ndim == 1 for x in arrays)
# - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
# - all(type(x) is not NumpyExtensionArray for x in arrays)
else:
index = ensure_index(index)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
# with _from_arrays, the passed arrays should never be Series objects
refs = [None] * len(arrays)
# Reached via DataFrame._from_arrays; we do minimal validation here
for arr in arrays:
if (
not isinstance(arr, (np.ndarray, ExtensionArray))
or arr.ndim != 1
or len(arr) != len(index)
):
raise ValueError(
"Arrays must be 1-dimensional np.ndarray or ExtensionArray "
"with length matching len(index)"
)
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(arrays) must match len(columns)")
# from BlockManager perspective
axes = [columns, index]
return create_block_manager_from_column_arrays(
arrays, axes, consolidate=consolidate, refs=refs
)
def rec_array_to_mgr(
data: np.rec.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
) -> Manager:
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = default_index(len(fdata))
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# create the manager
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype)
if copy:
mgr = mgr.copy(deep=True)
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool
) -> Manager:
# used in DataFrame.__init__
# input must be an ndarray, list, Series, Index, ExtensionArray
infer_object = not isinstance(values, (ABCSeries, Index, ExtensionArray))
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
vdtype = getattr(values, "dtype", None)
refs = None
if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
# GH#19157
if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
# GH#12513 an EA dtype passed with a 2D array, split into
# multiple EAs that view the values
# error: No overload variant of "__getitem__" of "ExtensionArray"
# matches argument type "Tuple[slice, int]"
values = [
values[:, n] # type: ignore[call-overload]
for n in range(values.shape[1])
]
else:
values = [values]
# Handle copy semantics: already copy 1d-only EA. Other arrays will
# be copied when consolidating the blocks
if copy:
values = [
(x.copy(deep=True) if isinstance(x, Index) else x.copy())
if isinstance(x, (ExtensionArray, Index, ABCSeries))
and is_1d_only_ea_dtype(x.dtype)
else x
for x in values
]
if columns is None:
columns = Index(range(len(values)))
else:
columns = ensure_index(columns)
return arrays_to_mgr(values, columns, index, dtype=dtype, consolidate=copy)
if isinstance(values, (ABCSeries, Index)):
if not copy and (dtype is None or astype_is_view(values.dtype, dtype)):
refs = values._references
if isinstance(vdtype, ExtensionDtype):
# i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype)
# are already caught above
values = extract_array(values, extract_numpy=True)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape(-1, 1)
elif isinstance(values, (ABCSeries, Index)):
if copy:
values = values._values.copy()
else:
values = values._values
values = _ensure_2d(values)
elif isinstance(values, (np.ndarray, ExtensionArray)):
# drop subclass info
if copy and (dtype is None or astype_is_view(values.dtype, dtype)):
# only force a copy now if copy=True was requested
# and a subsequent `astype` will not already result in a copy
values = np.array(values, copy=True, order="F")
else:
values = np.asarray(values)
values = _ensure_2d(values)
else:
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarraylike(values, copy=copy)
if dtype is not None and values.dtype != dtype:
# GH#40110 see similar check inside sanitize_array
values = sanitize_array(
values,
None,
dtype=dtype,
copy=copy,
allow_2d=True,
)
# _prep_ndarraylike ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
_check_values_indices_shape_match(values, index, columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and infer_object and is_object_dtype(values.dtype):
obj_columns = list(values)
maybe_datetime = [
lib.maybe_convert_objects(
x,
# Here we do not convert numeric dtypes, as if we wanted that,
# numpy would have done it for us.
convert_numeric=False,
convert_non_numeric=True,
convert_to_nullable_dtype=False,
dtype_if_all_nat=np.dtype("M8[s]"),
)
for x in obj_columns
]
# don't convert (and copy) the objects if no type inference occurs
if any(x is not y for x, y in zip(obj_columns, maybe_datetime, strict=True)):
block_values = [
new_block_2d(ensure_block_shape(dval, 2), placement=BlockPlacement(n))
for n, dval in enumerate(maybe_datetime)
]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp, refs=refs)
block_values = [nb]
elif dtype is None and values.dtype.kind == "U" and using_string_dtype():
dtype = StringDtype(na_value=np.nan)
obj_columns = list(values)
block_values = [
new_block(
dtype.construct_array_type()._from_sequence(data, dtype=dtype),
BlockPlacement(slice(i, i + 1)),
ndim=2,
)
for i, data in enumerate(obj_columns)
]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp, refs=refs)
block_values = [nb]
if len(columns) == 0:
# TODO: check len(values) == 0?
block_values = []
return create_block_manager_from_blocks(
block_values, [columns, index], verify_integrity=False
)
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0 < len(index):
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def dict_to_mgr(
data: dict,
index,
columns,
*,
dtype: DtypeObj | None = None,
copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Sequence[Any]
if columns is not None:
columns = ensure_index(columns)
if dtype is not None and not isinstance(dtype, np.dtype):
# e.g. test_dataframe_from_dict_of_series
arrays = [dtype.na_value] * len(columns)
else:
arrays = [np.nan] * len(columns)
midxs = set()
data_keys = ensure_index(data.keys()) # type: ignore[arg-type]
data_values = list(data.values())
for i, column in enumerate(columns):
try:
idx = data_keys.get_loc(column)
except KeyError:
midxs.add(i)
continue
array = data_values[idx]
arrays[i] = array
if is_scalar(array) and isna(array):
midxs.add(i)
if index is None:
# GH10856
# raise ValueError if only scalars in dict
if midxs:
index = _extract_index(
[array for i, array in enumerate(arrays) if i not in midxs]
)
else:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# no obvious "empty" int column
if midxs and not is_integer_dtype(dtype):
# GH#1783
for i in midxs:
arr = construct_1d_arraylike_from_scalar(
arrays[i],
len(index),
dtype if dtype is not None else np.dtype("object"),
)
arrays[i] = arr
else:
keys = maybe_sequence_to_range(list(data.keys()))
columns = Index(keys) if keys else default_index(0)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
if copy:
# We only need to copy arrays that will not get consolidated, i.e.
# only EA arrays
arrays = [
(
x.copy()
if isinstance(x, ExtensionArray)
else (
x.copy(deep=True)
if (
isinstance(x, Index)
or (isinstance(x, ABCSeries) and is_1d_only_ea_dtype(x.dtype))
)
else x
)
)
for x in arrays
]
return arrays_to_mgr(arrays, columns, index, dtype=dtype, consolidate=copy)
def nested_data_to_arrays(
data: Sequence,
columns: Index | None,
index: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index, Index]:
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
else:
index = default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return (
len(data) > 0
and is_list_like(data[0])
and getattr(data[0], "ndim", 1) == 1
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
)
# ---------------------------------------------------------------------
def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray:
# values is specifically _not_ ndarray, EA, Index, or Series
# We only get here with `not treat_as_nested(values)`
if len(values) == 0:
# TODO: check for length-zero range, in which case return int64 dtype?
# TODO: reuse anything in try_cast?
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = range_to_ndarray(values)
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
# We don't do maybe_infer_objects here bc we will end up doing
# it column-by-column in ndarray_to_mgr
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
# does not convert e.g. [1, "a", True] to ["1", "a", "True"] like
# np.asarray would
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861 see test_constructor_list_of_lists
values = np.array([convert(v) for v in values])
else:
values = convert(values)
return _ensure_2d(values)
def _ensure_2d(values: np.ndarray) -> np.ndarray:
"""
Reshape 1D values, raise on anything else other than 2D.
"""
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(
data, index: Index, dtype: DtypeObj | None
) -> tuple[list[ArrayLike], list[Any]]:
oindex = None
homogenized = []
# if the original array-like in `data` is a Series, keep track of this Series' refs
refs: list[Any] = []
for val in data:
if isinstance(val, (ABCSeries, Index)):
if dtype is not None:
val = val.astype(dtype)
if isinstance(val, ABCSeries) and val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index)
refs.append(val._references)
val = val._values
else:
if isinstance(val, dict):
# GH#41785 this _should_ be equivalent to (but faster than)
# val = Series(val, index=index)._values
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
# see test_constructor_dict_datetime64_index
val = dict_compat(val)
else:
# see test_constructor_subclass_dict
val = dict(val)
if not isinstance(index, MultiIndex) and index.hasnans:
# GH#63889 Check if dict has missing value keys that need special
# handling (i.e. None/np.nan/pd.NA might no longer be matched
# when using fast_multiget with processed object index values)
from pandas import Series
val = Series(val).reindex(index)._values
else:
# Fast path: use lib.fast_multiget for dicts without missing keys
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(val, index, dtype=dtype, copy=False)
com.require_length_match(val, index)
refs.append(None)
homogenized.append(val)
return homogenized, refs
def _extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index: Index
if len(data) == 0:
return default_index(0)
raw_lengths = set()
indexes: list[list[Hashable] | Index] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.add(len(val))
elif isinstance(val, np.ndarray) and val.ndim > 1:
raise ValueError("Per-column arrays must each be 1-dimensional")
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
if len(raw_lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
raw_length = raw_lengths.pop()
if have_series:
if raw_length != len(index):
msg = (
f"array length {raw_length} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = default_index(raw_length)
return ensure_index(index)
def reorder_arrays(
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
) -> tuple[list[ArrayLike], Index]:
"""
Preemptively (cheaply) reindex arrays with new columns.
"""
# reorder according to the columns
if columns is not None:
if not columns.equals(arr_columns):
# if they are equal, there is nothing to do
new_arrays: list[ArrayLike] = []
indexer = arr_columns.get_indexer(columns)
for i, k in enumerate(indexer):
if k == -1:
# by convention default is all-NaN object dtype
arr = np.empty(length, dtype=object)
arr.fill(np.nan)
else:
arr = arrays[k]
new_arrays.append(arr)
arrays = new_arrays
arr_columns = columns
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return default_index(len(data))
index: list[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Index | None, columns: Index | None
) -> tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Index | None, dtype: DtypeObj | None = None
) -> tuple[list[ArrayLike], Index]:
"""
Return list of arrays, columns.
Returns
-------
list[ArrayLike]
These will become columns in a DataFrame.
Index
This will become frame.columns.
Notes
-----
Ensures that len(result_arrays) == len(result_index).
"""
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
if len(data) == 0:
# GH#42456 the indexing above results in list of 2D ndarrays
# TODO: is that an issue with numpy?
for i, arr in enumerate(arrays):
if arr.ndim == 2:
arrays[i] = arr[:, 0]
return arrays, columns
return [], ensure_index([])
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
if columns is None:
columns = Index(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: list,
columns: Index | None,
) -> tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
content = np.vstack(aligned_values)
return content, columns
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
# assure that they are of the base dict class and not of derived
# classes
data = [d if type(d) is dict else dict(d) for d in data]
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# use pre_cols to preserve exact values that were present as dict keys
# (e.g. otherwise missing values might be coerced to the canonical repr)
content = lib.dicts_to_array(data, pre_cols)
else:
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if contents and contents[0].dtype == np.object_:
contents = convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: list[np.ndarray], columns: Index | None
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had {len(content)} columns"
)
if is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
if columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def convert_object_array(
content: list[npt.NDArray[np.object_]],
dtype: DtypeObj | None,
dtype_backend: str = "numpy",
coerce_float: bool = False,
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
dtype_backend: Controls if nullable/pyarrow dtypes are returned.
coerce_float: Cast floats that are integers to int.
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
# e.g. if dtype is UInt32 then we want to cast Nones to NA instead of
# NaN in maybe_convert_objects.
to_nullable = dtype_backend != "numpy" or isinstance(dtype, BaseMaskedDtype)
arr = lib.maybe_convert_objects(
arr,
try_float=coerce_float,
convert_to_nullable_dtype=to_nullable,
)
# Notes on cases that get here 2023-02-15
# 1) we DO get here when arr is all Timestamps and dtype=None
# 2) disabling this doesn't break the world, so this must be
# getting caught at a higher level
# 3) passing convert_non_numeric to maybe_convert_objects get this right
# 4) convert_non_numeric?
if dtype is None:
if arr.dtype == np.dtype("O"):
# i.e. maybe_convert_objects didn't convert
convert_to_nullable_dtype = dtype_backend != "numpy"
arr = lib.maybe_convert_objects(
arr,
# Here we do not convert numeric dtypes, as if we wanted that,
# numpy would have done it for us.
convert_numeric=False,
convert_non_numeric=True,
convert_to_nullable_dtype=convert_to_nullable_dtype,
dtype_if_all_nat=np.dtype("M8[s]"),
)
if convert_to_nullable_dtype and arr.dtype == np.dtype("O"):
new_dtype = StringDtype()
arr_cls = new_dtype.construct_array_type()
arr = arr_cls._from_sequence(arr, dtype=new_dtype)
elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):
if arr.dtype.kind in "iufb":
arr = pd_array(arr, copy=False)
elif isinstance(dtype, ExtensionDtype):
# TODO: test(s) that get here
# TODO: try to de-duplicate this convert function with
# core.construction functions
cls = dtype.construct_array_type()
arr = cls._from_sequence(arr, dtype=dtype, copy=False)
elif dtype.kind in "mM":
# This restriction is harmless bc these are the only cases
# where maybe_cast_to_datetime is not a no-op.
# Here we know:
# 1) dtype.kind in "mM" and
# 2) arr is either object or numeric dtype
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays | python | github | https://github.com/pandas-dev/pandas | pandas/core/internals/construction.py |
#!/usr/bin/python3
""" schedule.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ****************************************
from __future__ import print_function
import datetime
import logging
import httplib2
import re
import os
import sys
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
# Authorship Info *********************************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2016, The RPi-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "csmaue@gmail.com"
__status__ = "Development"
# Class Definitions *******************************************************************************
class Condition(object):
""" A class consisting of a conditoin to be checked and the desired state to pass """
def __init__(self, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.__andor = str()
self.__condition = str()
self.__state = str()
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "andor":
self.andor = value
if key == "condition":
self.condition = value
if key == "state":
self.state = value
@property
def andor(self):
""" Returns the condition type to be checked """
return self.__andor
@andor.setter
def andor(self, value):
""" Sets the condition type to be checked """
if isinstance(value, str):
self.__andor = value
@property
def condition(self):
""" Returns the condition to be checked """
return self.__condition
@condition.setter
def condition(self, value):
""" Sets the condition to be checked """
if isinstance(value, str):
self.__condition = value
@property
def state(self):
""" Returns the desired state to be checked against """
return self.__state
@state.setter
def state(self, value):
""" Sets the desired state to be checked against """
if isinstance(value, str):
self.__state = value
class OnRange(object):
""" Single on/off range with aux conditions """
def __init__(self, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.__on_time = datetime.time()
self.__off_time = datetime.time()
self.__condition = []
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "on_time":
self.on_time = value
if key == "off_time":
self.off_time = value
if key == "condition":
self.condition = value
@property
def on_time(self):
""" Returns on time for a single on/off value pair """
return self.__on_time
@on_time.setter
def on_time(self, value):
""" Sets on time for a single on/off value pair """
if isinstance(value, datetime.time):
self.__on_time = value
@property
def off_time(self):
""" Returns off time for a single on/off value pair' """
return self.__off_time
@off_time.setter
def off_time(self, value):
""" Sets off time for a single on/off value pair """
if isinstance(value, datetime.time):
self.__off_time = value
@property
def condition(self):
""" Returns the condition array for a single on/off value pair """
return self.__condition
@condition.setter
def condition(self, value):
""" Sets the condition array for a single on/off value pair """
if isinstance(value, list):
self.__condition = value
elif isinstance(value, Condition):
self.__condition = [value]
def add_condition(self, andor=None, condition=None, state=None):
""" Adds a condition to the list of conditions associated with a given on/off time pair """
self.__condition.append(Condition(andor=andor, condition=condition, state=state))
def clear_all_conditions(self):
""" Clears condition list for a given on/off time pair """
self.__condition.clear()
def remove_condition(self, index):
""" Removes a specific condition from the condition list based on its position in the list (index) """
try:
self.__condition.pop(index)
except:
pass
class Day(object):
""" Single day schedule """
def __init__(self, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.date = datetime.datetime.now().date()
self.__range = []
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "date":
self.date = value
if key == "range":
self.range = value
@property
def date(self):
""" Returns entire week's schedule' """
return self.__date
@date.setter
def date(self, value):
""" Sets entire week's schedule' """
if isinstance(value, datetime.date):
self.__date = value
@property
def range(self):
""" Returns entire week's schedule' """
return self.__range
@range.setter
def range(self, value):
""" Sets entire week's schedule' """
if isinstance(value, list):
self.__range = value
elif isinstance(value, OnRange):
self.__range = [value]
def add_range(self, on_time, off_time):
""" Adds a condition to the list of conditions associated with a given on/off time pair """
self.__range.append(OnRange(on_time=on_time, off_time=off_time))
def clear_all_ranges(self):
""" Clears condition list for a given on/off time pair """
self.__range.clear()
def remove_range(self, index):
""" Removes a specific condition from the condition list based on its position in the list (index) """
try:
self.__range.pop(index)
except:
pass
def add_range_with_conditions(self, on_time, off_time, conditions=None):
self.__range.append(OnRange(on_time=on_time, off_time=off_time))
self.index = len(self.__range) - 1
# Add single conditions passed in as a tuple
if isinstance(conditions, tuple):
self.logger.debug("single condition passed in with on and off time")
if len(conditions) == 3:
self.__range[self.index].add_condition(andor=conditions[0],
condition=conditions[1],
state=conditions[2])
# Add multiple conditions passed in as an array of tuples
if isinstance(conditions, list):
self.logger.debug("Multiple conditions passed in with on and off time")
for i, j in enumerate(conditions):
if isinstance(j, tuple):
if len(j) == 3:
self.logger.debug("Adding condition: %s %s = %s", j[0], j[1], j[2])
self.__range[self.index].add_condition(andor=j[0],
condition=j[1],
state=j[2])
class Week(object):
""" Weekly schedule with on/off times and extra conditions for a single device """
def __init__(self, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.__day = [Day()] * 7
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "day":
self.day = value
if key == "monday":
self.monday = value
if key == "tuesday":
self.tuesday = value
if key == "wednesday":
self.wednesday = value
if key == "thursday":
self.thursday = value
if key == "friday":
self.friday = value
if key == "saturday":
self.saturday = value
if key == "sunday":
self.sunday = value
@property
def day(self):
""" Returns entire week's schedule' """
return self.__day
@day.setter
def day(self, value):
""" Sets entire week's schedule' """
if isinstance(value, list):
self.__day = value
@property
def monday(self):
""" Returns the day's schedule for Monday """
return self.__day[0]
@monday.setter
def monday(self, value):
""" Set's the schedule for Monday """
if isinstance(value, Day):
self.__day[0] = value
@property
def tuesday(self):
""" Returns the day's schedule for Tuesday """
return self.__day[1]
@tuesday.setter
def tuesday(self, value):
""" Set's the schedule for Tuesday """
if isinstance(value, Day):
self.__day[1] = value
@property
def wednesday(self):
""" Returns the day's schedule for Wednesday """
return self.__day[2]
@wednesday.setter
def wednesday(self, value):
""" Set's the schedule for Wednesday """
if isinstance(value, Day):
self.__day[2] = value
@property
def thursday(self):
""" Returns the day's schedule for Thursday """
return self.__day[3]
@thursday.setter
def thursday(self, value):
""" Set's the schedule for thursday """
if isinstance(value, Day):
self.__day[3] = value
@property
def friday(self):
""" Returns the day's schedule for Friday """
return self.__day[4]
@friday.setter
def friday(self, value):
""" Set's the schedule for Friday """
if isinstance(value, Day):
self.__day[4] = value
@property
def saturday(self):
""" Returns the day's schedule for Satruday """
return self.__day[5]
@saturday.setter
def saturday(self, value):
""" Set's the schedule for Saturday """
if isinstance(value, Day):
self.__day[5] = value
@property
def sunday(self):
""" Returns the day's schedule for Sunday """
return self.__day[6]
@sunday.setter
def sunday(self, value):
""" Set's the schedule for Sunday """
if isinstance(value, Day):
self.__day[6] = value
def match_all_to_monday(self):
self.tuesday = copy.copy(self.monday)
self.wednesday = copy.copy(self.monday)
self.thursday = copy.copy(self.monday)
self.friday = copy.copy(self.monday)
self.saturday = copy.copy(self.monday)
self.sunday = copy.copy(self.monday)
def match_all_wd_to_monday(self):
self.tuesday = copy.copy(self.monday)
self.wednesday = copy.copy(self.monday)
self.thursday = copy.copy(self.monday)
self.friday = copy.copy(self.monday)
def match_all_we_to_saturday(self):
self.sunday = copy.copy(self.saturday)
class GoogleSheetsSchedule(object):
""" Class and methods necessary to read a schedule from a google sheets via google's api' """
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
try:
import argparse
self.flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
self.flags = None
self.home_dir = str()
self.credential_dir = str()
self.store = str()
self.credentials = str()
self.path = str()
self.CLIENT_SECRET_FILE = str()
self.SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
self.CLIENT_SECRET_FILE = 'client_secret.json'
self.APPLICATION_NAME = 'Device Schedule via Google Sheets API'
def get_credentials(self):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials.
Returns: Credentials, the obtained credential.
"""
self.home_dir = os.path.expanduser('~')
self.credential_dir = os.path.join(self.home_dir, '.credentials')
if not os.path.exists(self.credential_dir):
self.logger.debug("Creating directory: %s", self.credential_dir)
os.makedirs(self.credential_dir)
self.credential_path = os.path.join(self.credential_dir,
'sheets.googleapis.com-python-quickstart.json')
self.logger.debug("Setting credential path to: %s", self.credential_path)
self.store = Storage(self.credential_path)
self.logger.debug("Setting store to: %s", self.store)
self.credentials = self.store.get()
self.logger.debug("Getting credentials from store")
if not self.credentials or self.credentials.invalid:
self.logger.debug("Credentials not in store")
self.path = os.path.dirname(sys.argv[0])
self.logger.debug("System path is: %s", self.path)
self.CLIENT_SECRET_FILE = os.path.join(self.path, "client_secret.json")
self.logger.debug("Looking for json file at: %s", self.CLIENT_SECRET_FILE)
self.flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)
self.flow.user_agent = self.APPLICATION_NAME
if self.flags:
self.credentials = tools.run_flow(self.flow, self.store, self.flags)
else: # Needed only for compatibility with Python 2.6
self.credentials = tools.run(self.flow, self.store)
self.logger.debug('Storing credentials to ' + self.credential_path)
self.logger.debug("Returning credentials to main program")
return self.credentials
def read_data(self, sheet_id=None, sheet_range=None):
"""
Returns all data from a specific sheet using the google sheets API
"""
self.credentials = self.get_credentials()
self.http = self.credentials.authorize(httplib2.Http())
self.discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4')
self.service = discovery.build('sheets', 'v4',
http=self.http,
discoveryServiceUrl=self.discoveryUrl)
# Set sheet name and range to read
if sheet_id is not None:
self.spreadsheetId = sheet_id
else:
self.spreadsheetId = '1LJpDC0wMv3eXQtJvHNav_Yty4PQcylthOxXig3_Bwu8'
self.logger.debug("Using sheet id: %s", self.spreadsheetId)
if sheet_range is not None:
self.rangeName = sheet_range
else:
self.rangeName = "fylt1!A3:L"
self.logger.debug("Reading data from range: %s", self.rangeName)
# Read data from sheet/range specified
self.result = self.service.spreadsheets().values().get(spreadsheetId=self.spreadsheetId, range=self.rangeName).execute()
self.values = self.result.get('values', [])
self.logger.debug("Read from table: %s", self.values)
if not self.values:
self.logger.debug("No data found. Returning NONE to main")
return None
else:
self.logger.debug("Returning data to main")
return self.values
class GoogleSheetToSched(object):
""" class and methods used to covert a list of records from a specifically formatted google sheet, into a schedule for the current calendar Week
"""
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.date = datetime.date
def convert_date(self, record):
self.record = record
self.regex1 = r"(\d{4})\-(0?[1-9]|[1][012])\-(0?[1-9]|[12][0-9]|3[01])"
self.regex2 = r"(\d{4})\/(0?[1-9]|[1][012])\/(0?[1-9]|[12][0-9]|3[01])"
if re.search(self.regex1, self.record[0]):
self.logger.debug("Data in day field is a date using a '-' as a separator")
self.split_date = self.record[0].split("-")
self.record[0] = datetime.date(int(self.split_date[0]),
int(self.split_date[1]),
int(self.split_date[2]))
self.logger.debug("Updating to datetime.date data type: %s", self.record[0])
elif re.search(self.regex2, self.record[0]):
self.logger.debug("Data in day field is a date using a '/' as a separator")
self.split_date = self.record[0].split("/")
self.record[0] = datetime.date(int(self.split_date[0]),
int(self.split_date[1]),
int(self.split_date[2]))
self.logger.debug("Updating to datetime.date data type: %s", self.record[0])
else:
self.logger.debug("Data in day field is not a specific date")
return self.record
def main(self, records):
""" Don't really know what to call this yet, but this will be the main decoder sequence' """
self.records = records
self.logger.debug("\n\nDecoding starting for record set:\n%s", self.records)
# First step is to convert all dates in the leading column of the data to datetime objects
for index, record in enumerate(self.records):
self.record = self.convert_date(record)
self.records[index] = self.record
self.logger.debug("\n\nUpdated record-set:\n%s", self.records)
# Next step is to determine the week start and end dates of this current week. This information will be used to filter out specific date assignments outside of this range
self.dt_now = datetime.datetime.now()
self.day = self.dt_now.weekday()
self.dt_monday = self.dt_now + datetime.timedelta(days=-self.day)
self.logger.debug("week starts on: %s", self.dt_monday.date())
self.dt_sunday = self.dt_monday + datetime.timedelta(days=6)
self.logger.debug("week ends on: %s", self.dt_sunday.date())
# Now search the record list and remove any items that have specific dates in their day data fields, but don't fall within the range of the current week being considered
for index, record in enumerate(self.records):
if isinstance(record[0], datetime.date):
if record[0] < self.dt_monday.date() or record[0] > self.dt_sunday.date():
self.logger.debug("Record [%s] falls outside of range and will be discarded", record)
self.records.pop(index)
self.logger.debug("Updated record list: %s", self.records)
# We are now left with only data from the schedule that in some way applies to this week. Now we apply the records to each day based on a pre-defined priority | unknown | codeparrot/codeparrot-clean | ||
"""Constants used be the HomeKit component."""
# #### Misc ####
DEBOUNCE_TIMEOUT = 0.5
DOMAIN = 'homekit'
HOMEKIT_FILE = '.homekit.state'
HOMEKIT_NOTIFY_ID = 4663548
# #### Attributes ####
ATTR_DISPLAY_NAME = 'display_name'
ATTR_VALUE = 'value'
# #### Config ####
CONF_AUTO_START = 'auto_start'
CONF_ENTITY_CONFIG = 'entity_config'
CONF_FEATURE = 'feature'
CONF_FEATURE_LIST = 'feature_list'
CONF_FILTER = 'filter'
CONF_LINKED_BATTERY_SENSOR = 'linked_battery_sensor'
CONF_SAFE_MODE = 'safe_mode'
# #### Config Defaults ####
DEFAULT_AUTO_START = True
DEFAULT_PORT = 51827
DEFAULT_SAFE_MODE = False
# #### Features ####
FEATURE_ON_OFF = 'on_off'
FEATURE_PLAY_PAUSE = 'play_pause'
FEATURE_PLAY_STOP = 'play_stop'
FEATURE_TOGGLE_MUTE = 'toggle_mute'
# #### HomeKit Component Event ####
EVENT_HOMEKIT_CHANGED = 'homekit_state_change'
# #### HomeKit Component Services ####
SERVICE_HOMEKIT_START = 'start'
# #### String Constants ####
BRIDGE_MODEL = 'Bridge'
BRIDGE_NAME = 'Home Assistant Bridge'
BRIDGE_SERIAL_NUMBER = 'homekit.bridge'
MANUFACTURER = 'Home Assistant'
# #### Switch Types ####
TYPE_FAUCET = 'faucet'
TYPE_OUTLET = 'outlet'
TYPE_SHOWER = 'shower'
TYPE_SPRINKLER = 'sprinkler'
TYPE_SWITCH = 'switch'
TYPE_VALVE = 'valve'
# #### Services ####
SERV_ACCESSORY_INFO = 'AccessoryInformation'
SERV_AIR_QUALITY_SENSOR = 'AirQualitySensor'
SERV_BATTERY_SERVICE = 'BatteryService'
SERV_CARBON_DIOXIDE_SENSOR = 'CarbonDioxideSensor'
SERV_CARBON_MONOXIDE_SENSOR = 'CarbonMonoxideSensor'
SERV_CONTACT_SENSOR = 'ContactSensor'
SERV_FANV2 = 'Fanv2'
SERV_GARAGE_DOOR_OPENER = 'GarageDoorOpener'
SERV_HUMIDITY_SENSOR = 'HumiditySensor'
SERV_LEAK_SENSOR = 'LeakSensor'
SERV_LIGHT_SENSOR = 'LightSensor'
SERV_LIGHTBULB = 'Lightbulb'
SERV_LOCK = 'LockMechanism'
SERV_MOTION_SENSOR = 'MotionSensor'
SERV_OCCUPANCY_SENSOR = 'OccupancySensor'
SERV_OUTLET = 'Outlet'
SERV_SECURITY_SYSTEM = 'SecuritySystem'
SERV_SMOKE_SENSOR = 'SmokeSensor'
SERV_SWITCH = 'Switch'
SERV_TEMPERATURE_SENSOR = 'TemperatureSensor'
SERV_THERMOSTAT = 'Thermostat'
SERV_VALVE = 'Valve'
SERV_WINDOW_COVERING = 'WindowCovering'
# #### Characteristics ####
CHAR_ACTIVE = 'Active'
CHAR_AIR_PARTICULATE_DENSITY = 'AirParticulateDensity'
CHAR_AIR_QUALITY = 'AirQuality'
CHAR_BATTERY_LEVEL = 'BatteryLevel'
CHAR_BRIGHTNESS = 'Brightness'
CHAR_CARBON_DIOXIDE_DETECTED = 'CarbonDioxideDetected'
CHAR_CARBON_DIOXIDE_LEVEL = 'CarbonDioxideLevel'
CHAR_CARBON_DIOXIDE_PEAK_LEVEL = 'CarbonDioxidePeakLevel'
CHAR_CARBON_MONOXIDE_DETECTED = 'CarbonMonoxideDetected'
CHAR_CARBON_MONOXIDE_LEVEL = 'CarbonMonoxideLevel'
CHAR_CARBON_MONOXIDE_PEAK_LEVEL = 'CarbonMonoxidePeakLevel'
CHAR_CHARGING_STATE = 'ChargingState'
CHAR_COLOR_TEMPERATURE = 'ColorTemperature'
CHAR_CONTACT_SENSOR_STATE = 'ContactSensorState'
CHAR_COOLING_THRESHOLD_TEMPERATURE = 'CoolingThresholdTemperature'
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL = 'CurrentAmbientLightLevel'
CHAR_CURRENT_DOOR_STATE = 'CurrentDoorState'
CHAR_CURRENT_HEATING_COOLING = 'CurrentHeatingCoolingState'
CHAR_CURRENT_POSITION = 'CurrentPosition'
CHAR_CURRENT_HUMIDITY = 'CurrentRelativeHumidity'
CHAR_CURRENT_SECURITY_STATE = 'SecuritySystemCurrentState'
CHAR_CURRENT_TEMPERATURE = 'CurrentTemperature'
CHAR_FIRMWARE_REVISION = 'FirmwareRevision'
CHAR_HEATING_THRESHOLD_TEMPERATURE = 'HeatingThresholdTemperature'
CHAR_HUE = 'Hue'
CHAR_IN_USE = 'InUse'
CHAR_LEAK_DETECTED = 'LeakDetected'
CHAR_LOCK_CURRENT_STATE = 'LockCurrentState'
CHAR_LOCK_TARGET_STATE = 'LockTargetState'
CHAR_LINK_QUALITY = 'LinkQuality'
CHAR_MANUFACTURER = 'Manufacturer'
CHAR_MODEL = 'Model'
CHAR_MOTION_DETECTED = 'MotionDetected'
CHAR_NAME = 'Name'
CHAR_OCCUPANCY_DETECTED = 'OccupancyDetected'
CHAR_ON = 'On'
CHAR_OUTLET_IN_USE = 'OutletInUse'
CHAR_POSITION_STATE = 'PositionState'
CHAR_ROTATION_DIRECTION = 'RotationDirection'
CHAR_ROTATION_SPEED = 'RotationSpeed'
CHAR_SATURATION = 'Saturation'
CHAR_SERIAL_NUMBER = 'SerialNumber'
CHAR_SMOKE_DETECTED = 'SmokeDetected'
CHAR_STATUS_LOW_BATTERY = 'StatusLowBattery'
CHAR_SWING_MODE = 'SwingMode'
CHAR_TARGET_DOOR_STATE = 'TargetDoorState'
CHAR_TARGET_HEATING_COOLING = 'TargetHeatingCoolingState'
CHAR_TARGET_POSITION = 'TargetPosition'
CHAR_TARGET_SECURITY_STATE = 'SecuritySystemTargetState'
CHAR_TARGET_TEMPERATURE = 'TargetTemperature'
CHAR_TEMP_DISPLAY_UNITS = 'TemperatureDisplayUnits'
CHAR_VALVE_TYPE = 'ValveType'
# #### Properties ####
PROP_MAX_VALUE = 'maxValue'
PROP_MIN_VALUE = 'minValue'
PROP_MIN_STEP = 'minStep'
PROP_CELSIUS = {'minValue': -273, 'maxValue': 999}
# #### Device Classes ####
DEVICE_CLASS_CO = 'co'
DEVICE_CLASS_CO2 = 'co2'
DEVICE_CLASS_DOOR = 'door'
DEVICE_CLASS_GARAGE_DOOR = 'garage_door'
DEVICE_CLASS_GAS = 'gas'
DEVICE_CLASS_MOISTURE = 'moisture'
DEVICE_CLASS_MOTION = 'motion'
DEVICE_CLASS_OCCUPANCY = 'occupancy'
DEVICE_CLASS_OPENING = 'opening'
DEVICE_CLASS_PM25 = 'pm25'
DEVICE_CLASS_SMOKE = 'smoke'
DEVICE_CLASS_WINDOW = 'window'
# #### Thresholds ####
THRESHOLD_CO = 25
THRESHOLD_CO2 = 1000
# #### Default values ####
DEFAULT_MIN_TEMP_WATER_HEATER = 40 # °C
DEFAULT_MAX_TEMP_WATER_HEATER = 60 # °C | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_
#define TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
class BuiltinRefOpResolver : public MutableOpResolver {
public:
BuiltinRefOpResolver();
const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const override;
const TfLiteRegistration* FindOp(const char* op, int version) const override;
};
} // namespace builtin
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/lite/kernels/register_ref.h |
apiVersion: v1
kind: ConfigMap
metadata:
name: alloy-config
namespace: default
data:
config.alloy: |
// Pyroscope configuration to receive profiles
pyroscope.write "default" {
endpoint {
url = "http://pyroscope.default.svc.cluster.local:4040"
}
}
// Scrape CPU profiles from the IAM operator
pyroscope.scrape "iam_operator" {
targets = [
{
"__address__" = "iam-folder-reconciler.default.svc.cluster.local:6060",
"service_name" = "iam-folder-reconciler",
},
]
forward_to = [pyroscope.write.default.receiver]
job_name = "iam-operator"
scrape_interval = "30s"
scrape_timeout = "25s"
profiling_config {
profile.process_cpu {
enabled = true
path = "/debug/pprof/profile"
delta = false
}
profile.godeltaprof_memory {
enabled = true
path = "/debug/pprof/delta_heap"
}
profile.memory {
enabled = true
path = "/debug/pprof/heap"
delta = false
}
profile.godeltaprof_mutex {
enabled = true
path = "/debug/pprof/delta_mutex"
}
profile.godeltaprof_block {
enabled = true
path = "/debug/pprof/delta_block"
}
profile.goroutine {
enabled = true
path = "/debug/pprof/goroutine"
delta = false
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: alloy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
name: alloy
template:
metadata:
labels:
name: alloy
spec:
containers:
- name: alloy
image: grafana/alloy:v1.10.0
args:
- run
- /etc/alloy/config.alloy
- --storage.path=/var/lib/alloy/data
- --server.http.listen-addr=0.0.0.0:12345
- --stability.level=experimental
ports:
- containerPort: 12345
name: http
volumeMounts:
- name: config
mountPath: /etc/alloy
- name: storage
mountPath: /var/lib/alloy/data
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
volumes:
- name: config
configMap:
name: alloy-config
- name: storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: alloy
namespace: default
spec:
selector:
name: alloy
ports:
- name: http
port: 12345
targetPort: 12345 | unknown | github | https://github.com/grafana/grafana | apps/iam/local/yamls/alloy.yaml |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 1991-1998, 2000, 2002, 2003 Progiciels Bourbeau-Pinard inc.
# François Pinard <pinard@iro.umontreal.ca>, 1991-04.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""\
Handling of boxed comments in various box styles.
The user documentation for this tool may be found at:
http://pymacs.progiciels-bpi.ca/rebox.html
"""
## Note: a double hash comment introduces a group of functions or methods.
__metatype__ = type
import re, sys
## Batch specific features.
def main(*arguments):
refill = True
style = None
tabify = False
verbose = False
width = 79
import getopt
options, arguments = getopt.getopt(arguments, 'ns:tvw:', ['help'])
for option, value in options:
if option == '--help':
sys.stdout.write(__doc__)
sys.exit(0)
elif option == '-n':
refill = False
elif option == '-s':
style = int(value)
elif option == '-t':
tabify = True
elif option == '-v':
verbose = True
elif option == '-w':
width = int(value)
if len(arguments) == 0:
text = sys.stdin.read()
elif len(arguments) == 1:
text = open(arguments[0]).read()
else:
sys.stderr.write("Invalid usage, try `rebox --help' for help.\n")
sys.exit(1)
old_style, new_style, text, position = engine(
text, style=style, width=width, refill=refill, tabify=tabify)
if text is None:
sys.stderr.write("* Cannot rebox to style %d.\n" % new_style)
sys.exit(1)
sys.stdout.write(text)
if verbose:
if old_style == new_style:
sys.stderr.write("Reboxed with style %d.\n" % old_style)
else:
sys.stderr.write("Reboxed from style %d to %d.\n"
% (old_style, new_style))
## Emacs specific features.
def pymacs_load_hook():
global interactions, lisp, Let, region, comment, set_default_style
from Pymacs import lisp, Let
emacs_rebox = Emacs_Rebox()
# Declare functions for Emacs to import.
interactions = {}
region = emacs_rebox.region
interactions[region] = 'P'
comment = emacs_rebox.comment
interactions[comment] = 'P'
set_default_style = emacs_rebox.set_default_style
class Emacs_Rebox:
def __init__(self):
self.default_style = None
def set_default_style(self, style):
"""\
Set the default style to STYLE.
"""
self.default_style = style
def region(self, flag):
"""\
Rebox the boxed comment in the current region, obeying FLAG.
"""
self.emacs_engine(flag, self.find_region)
def comment(self, flag):
"""\
Rebox the surrounding boxed comment, obeying FLAG.
"""
self.emacs_engine(flag, self.find_comment)
def emacs_engine(self, flag, find_limits):
"""\
Rebox text while obeying FLAG. Call FIND_LIMITS to discover the extent
of the boxed comment.
"""
# `C-u -' means that box style is to be decided interactively.
if flag == lisp['-']:
flag = self.ask_for_style()
# If FLAG is zero or negative, only change default box style.
if isinstance(flag, int) and flag <= 0:
self.default_style = -flag
lisp.message("Default style set to %d" % -flag)
return
# Decide box style and refilling.
if flag is None:
style = self.default_style
refill = True
elif isinstance(flag, int):
if self.default_style is None:
style = flag
else:
style = merge_styles(self.default_style, flag)
refill = True
else:
flag = flag.copy()
if isinstance(flag, list):
style = self.default_style
refill = False
else:
lisp.error("Unexpected flag value %s" % flag)
# Prepare for reboxing.
lisp.message("Reboxing...")
checkpoint = lisp.buffer_undo_list.value()
start, end = find_limits()
text = lisp.buffer_substring(start, end)
width = lisp.fill_column.value()
tabify = lisp.indent_tabs_mode.value() is not None
point = lisp.point()
if start <= point < end:
position = point - start
else:
position = None
# Rebox the text and replace it in Emacs buffer.
old_style, new_style, text, position = engine(
text, style=style, width=width,
refill=refill, tabify=tabify, position=position)
if text is None:
lisp.error("Cannot rebox to style %d" % new_style)
lisp.delete_region(start, end)
lisp.insert(text)
if position is not None:
lisp.goto_char(start + position)
# Collapse all operations into a single one, for Undo.
self.clean_undo_after(checkpoint)
# We are finished, tell the user.
if old_style == new_style:
lisp.message("Reboxed with style %d" % old_style)
else:
lisp.message("Reboxed from style %d to %d"
% (old_style, new_style))
def ask_for_style(self):
"""\
Request the style interactively, using the minibuffer.
"""
language = quality = type = None
while language is None:
lisp.message("\
Box language is 100-none, 200-/*, 300-//, 400-#, 500-;, 600-%%")
key = lisp.read_char()
if key >= ord('0') and key <= ord('6'):
language = key - ord('0')
while quality is None:
lisp.message("\
Box quality/width is 10-simple/1, 20-rounded/2, 30-starred/3 or 40-starred/4")
key = lisp.read_char()
if key >= ord('0') and key <= ord('4'):
quality = key - ord('0')
while type is None:
lisp.message("\
Box type is 1-opened, 2-half-single, 3-single, 4-half-double or 5-double")
key = lisp.read_char()
if key >= ord('0') and key <= ord('5'):
type = key - ord('0')
return 100*language + 10*quality + type
def find_region(self):
"""\
Return the limits of the region.
"""
return lisp.point(), lisp.mark(lisp.t)
def find_comment(self):
"""\
Find and return the limits of the block of comments following or enclosing
the cursor, or return an error if the cursor is not within such a block
of comments. Extend it as far as possible in both directions.
"""
let = Let()
let.push_excursion()
# Find the start of the current or immediately following comment.
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t\n')
lisp.beginning_of_line()
if not language_matcher[0](self.remainder_of_line()):
temp = lisp.point()
if not lisp.re_search_forward('\\*/', None, lisp.t):
lisp.error("outside any comment block")
lisp.re_search_backward('/\\*')
if lisp.point() > temp:
lisp.error("outside any comment block")
temp = lisp.point()
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t')
if lisp.point() != temp:
lisp.error("text before start of comment")
lisp.beginning_of_line()
start = lisp.point()
language = guess_language(self.remainder_of_line())
# Find the end of this comment.
if language == 2:
lisp.search_forward('*/')
if not lisp.looking_at('[ \t]*$'):
lisp.error("text after end of comment")
lisp.end_of_line()
if lisp.eobp():
lisp.insert('\n')
else:
lisp.forward_char(1)
end = lisp.point()
# Try to extend the comment block backwards.
lisp.goto_char(start)
while not lisp.bobp():
if language == 2:
lisp.skip_chars_backward(' \t\n')
if not lisp.looking_at('[ \t]*\n[ \t]*/\\*'):
break
if lisp.point() < 2:
break
lisp.backward_char(2)
if not lisp.looking_at('\\*/'):
break
lisp.re_search_backward('/\\*')
temp = lisp.point()
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t')
if lisp.point() != temp:
break
lisp.beginning_of_line()
else:
lisp.previous_line(1)
if not language_matcher[language](self.remainder_of_line()):
break
start = lisp.point()
# Try to extend the comment block forward.
lisp.goto_char(end)
while language_matcher[language](self.remainder_of_line()):
if language == 2:
lisp.re_search_forward('[ \t]*/\\*')
lisp.re_search_forward('\\*/')
if lisp.looking_at('[ \t]*$'):
lisp.beginning_of_line()
lisp.forward_line(1)
end = lisp.point()
else:
lisp.forward_line(1)
end = lisp.point()
return start, end
def remainder_of_line(self):
"""\
Return all characters between point and end of line in Emacs buffer.
"""
return lisp('''\
(buffer-substring (point) (save-excursion (skip-chars-forward "^\n") (point)))
''')
def clean_undo_after_old(self, checkpoint):
"""\
Remove all intermediate boundaries from the Undo list since CHECKPOINT.
"""
# Declare some Lisp functions.
car = lisp.car
cdr = lisp.cdr
eq = lisp.eq
setcdr = lisp.setcdr
# Remove any `nil' delimiter recently added to the Undo list.
cursor = lisp.buffer_undo_list.value()
if not eq(cursor, checkpoint):
tail = cdr(cursor)
while not eq(tail, checkpoint):
if car(tail):
cursor = tail
tail = cdr(cursor)
else:
tail = cdr(tail)
setcdr(cursor, tail)
def clean_undo_after(self, checkpoint):
"""\
Remove all intermediate boundaries from the Undo list since CHECKPOINT.
"""
lisp("""
(let ((undo-list %s))
(if (not (eq buffer-undo-list undo-list))
(let ((cursor buffer-undo-list))
(while (not (eq (cdr cursor) undo-list))
(if (car (cdr cursor))
(setq cursor (cdr cursor))
(setcdr cursor (cdr (cdr cursor)))))))
nil)
"""
% (checkpoint or 'nil'))
## Reboxing main control.
def engine(text, style=None, width=79, refill=True, tabify=False,
position=None):
"""\
Add, delete or adjust a boxed comment held in TEXT, according to STYLE.
STYLE values are explained at beginning of this file. Any zero attribute
in STYLE indicates that the corresponding attribute should be recovered
from the currently existing box. Produced lines will not go over WIDTH
columns if possible, if refilling gets done. But if REFILL is false, WIDTH
is ignored. If TABIFY is true, the beginning of produced lines will have
spaces replace by TABs. POSITION is either None, or a character position
within TEXT. Returns four values: the old box style, the new box style,
the reformatted text, and either None or the adjusted value of POSITION in
the new text. The reformatted text is returned as None if the requested
style does not exist.
"""
last_line_complete = text and text[-1] == '\n'
if last_line_complete:
text = text[:-1]
lines = text.expandtabs().split('\n')
# Decide about refilling and the box style to use.
new_style = 111
old_template = guess_template(lines)
new_style = merge_styles(new_style, old_template.style)
if style is not None:
new_style = merge_styles(new_style, style)
new_template = template_registry.get(new_style)
# Interrupt processing if STYLE does not exist.
if not new_template:
return old_template.style, new_style, None, None
# Remove all previous comment marks, and left margin.
if position is not None:
marker = Marker()
marker.save_position(text, position, old_template.characters())
lines, margin = old_template.unbuild(lines)
# Ensure only one white line between paragraphs.
counter = 1
while counter < len(lines) - 1:
if lines[counter] == '' and lines[counter-1] == '':
del lines[counter]
else:
counter = counter + 1
# Rebuild the boxed comment.
lines = new_template.build(lines, width, refill, margin)
# Retabify to the left only.
if tabify:
for counter in range(len(lines)):
tabs = len(re.match(' *', lines[counter]).group()) / 8
lines[counter] = '\t' * tabs + lines[counter][8*tabs:]
# Restore the point position.
text = '\n'.join(lines)
if last_line_complete:
text = text + '\n'
if position is not None:
position = marker.get_position(text, new_template.characters())
return old_template.style, new_style, text, position
def guess_language(line):
"""\
Guess the language in use for LINE.
"""
for language in range(len(language_matcher) - 1, 1, -1):
if language_matcher[language](line):
return language
return 1
def guess_template(lines):
"""\
Find the heaviest box template matching LINES.
"""
best_template = None
for template in template_registry.values():
if best_template is None or template > best_template:
if template.match(lines):
best_template = template
return best_template
def left_margin_size(lines):
"""\
Return the width of the left margin for all LINES. Ignore white lines.
"""
margin = None
for line in lines:
counter = len(re.match(' *', line).group())
if counter != len(line):
if margin is None or counter < margin:
margin = counter
if margin is None:
margin = 0
return margin
def merge_styles(original, update):
"""\
Return style attributes as per ORIGINAL, in which attributes have been
overridden by non-zero corresponding style attributes from UPDATE.
"""
style = [original / 100, original / 10 % 10, original % 10]
merge = update / 100, update / 10 % 10, update % 10
for counter in range(3):
if merge[counter]:
style[counter] = merge[counter]
return 100*style[0] + 10*style[1] + style[2]
## Refilling logic.
def refill_lines(lines, width,
cached_refiller=[]):
"""\
Refill LINES, trying to not produce lines having more than WIDTH columns.
"""
if not cached_refiller:
for Refiller in Refiller_Gnu_Fmt, Refiller_Textwrap, Refiller_Dumb:
refiller = Refiller()
new_lines = refiller.fill(lines, width)
if new_lines is not None:
cached_refiller.append(refiller)
return new_lines
return cached_refiller[0].fill(lines, width)
class Refiller:
available = True
def fill(self, lines, width):
if self.available:
new_lines = []
start = 0
while start < len(lines) and not lines[start]:
start = start + 1
end = start
while end < len(lines):
while end < len(lines) and lines[end]:
end = end + 1
new_lines = new_lines + self.fill_paragraph(lines[start:end],
width)
while end < len(lines) and not lines[end]:
end = end + 1
if end < len(lines):
new_lines.append('')
start = end
return new_lines
class Refiller_Gnu_Fmt(Refiller):
"""\
Use both Knuth algorithm and protection for full stops at end of sentences.
"""
def fill(self, lines, width):
if self.available:
import tempfile, os
name = tempfile.mktemp()
open(name, 'w').write('\n'.join(lines) + '\n')
process = os.popen('fmt -cuw %d %s' % (width, name))
text = process.read()
os.remove(name)
if process.close() is None:
return [line.expandtabs() for line in text.split('\n')[:-1]]
class Refiller_Textwrap(Refiller):
"""\
No Knuth algorithm, but protection for full stops at end of sentences.
"""
def __init__(self):
try:
from textwrap import TextWrapper
except ImportError:
self.available = False
else:
self.wrapper = TextWrapper(fix_sentence_endings=1)
def fill_paragraph(self, lines, width):
# FIXME: This one fills indented lines more aggressively than the
# dumb refiller. I'm not sure what it the best thing to do, but
# ideally, all refillers should behave more or less the same way.
self.wrapper.width = width
prefix = ' ' * left_margin_size(lines)
self.wrapper.initial_indent = prefix
self.wrapper.subsequent_indent = prefix
return self.wrapper.wrap(' '.join(lines))
class Refiller_Dumb(Refiller):
"""\
No Knuth algorithm, nor even protection for full stops at end of sentences.
"""
def fill_paragraph(self, lines, width):
margin = left_margin_size(lines)
prefix = ' ' * margin
new_lines = []
new_line = ''
for line in lines:
counter = len(line) - len(line.lstrip())
if counter > margin:
if new_line:
new_lines.append(prefix + new_line)
new_line = ''
indent = ' ' * (counter - margin)
else:
indent = ''
for word in line.split():
if new_line:
if len(new_line) + 1 + len(word) > width:
new_lines.append(prefix + new_line)
new_line = word
else:
new_line = new_line + ' ' + word
else:
new_line = indent + word
indent = ''
if new_line:
new_lines.append(prefix + new_line)
return new_lines
## Marking logic.
class Marker:
"""\
Heuristics to simulate a marker while reformatting boxes.
"""
def save_position(self, text, position, ignorable):
"""\
Given a TEXT and a POSITION in that text, save the adjusted position
by faking that all IGNORABLE characters before POSITION were removed.
"""
ignore = {}
for character in ' \t\r\n' + ignorable:
ignore[character] = None
counter = 0
for character in text[:position]:
if ignore.has_key(character):
counter = counter + 1
self.position = position - counter
def get_position(self, text, ignorable, latest=0):
"""\
Given a TEXT, return the value that would yield the currently saved position,
if it was saved by `save_position' with IGNORABLE. Unless the position lies
within a series of ignorable characters, LATEST has no effect in practice.
If LATEST is true, return the biggest possible value instead of the smallest.
"""
ignore = {}
for character in ' \t\r\n' + ignorable:
ignore[character] = None
counter = 0
position = 0
if latest:
for character in text:
if ignore.has_key(character):
counter = counter + 1
else:
if position == self.position:
break
position = position + 1
elif self.position > 0:
for character in text:
if ignore.has_key(character):
counter = counter + 1
else:
position = position + 1
if position == self.position:
break
return position + counter
## Template processing.
class Template:
def __init__(self, style, weight, lines):
"""\
Digest and register a single template. The template is numbered STYLE,
has a parsing WEIGHT, and is described by one to three LINES.
STYLE should be used only once through all `declare_template' calls.
One of the lines should contain the substring `box' to represent the comment
to be boxed, and if three lines are given, `box' should appear in the middle
one. Lines containing only spaces are implied as necessary before and after
the the `box' line, so we have three lines.
Normally, all three template lines should be of the same length. If the first
line is shorter, it represents a start comment string to be bundled within the
first line of the comment text. If the third line is shorter, it represents
an end comment string to be bundled at the end of the comment text, and
refilled with it.
"""
assert not template_registry.has_key(style), \
"Style %d defined more than once" % style
self.style = style
self.weight = weight
# Make it exactly three lines, with `box' in the middle.
start = lines[0].find('box')
if start >= 0:
line1 = None
line2 = lines[0]
if len(lines) > 1:
line3 = lines[1]
else:
line3 = None
else:
start = lines[1].find('box')
if start >= 0:
line1 = lines[0]
line2 = lines[1]
if len(lines) > 2:
line3 = lines[2]
else:
line3 = None
else:
assert 0, "Erroneous template for %d style" % style
end = start + len('box')
# Define a few booleans.
self.merge_nw = line1 is not None and len(line1) < len(line2)
self.merge_se = line3 is not None and len(line3) < len(line2)
# Define strings at various cardinal directions.
if line1 is None:
self.nw = self.nn = self.ne = None
elif self.merge_nw:
self.nw = line1
self.nn = self.ne = None
else:
if start > 0:
self.nw = line1[:start]
else:
self.nw = None
if line1[start] != ' ':
self.nn = line1[start]
else:
self.nn = None
if end < len(line1):
self.ne = line1[end:].rstrip()
else:
self.ne = None
if start > 0:
self.ww = line2[:start]
else:
self.ww = None
if end < len(line2):
self.ee = line2[end:]
else:
self.ee = None
if line3 is None:
self.sw = self.ss = self.se = None
elif self.merge_se:
self.sw = self.ss = None
self.se = line3.rstrip()
else:
if start > 0:
self.sw = line3[:start]
else:
self.sw = None
if line3[start] != ' ':
self.ss = line3[start]
else:
self.ss = None
if end < len(line3):
self.se = line3[end:].rstrip()
else:
self.se = None
# Define parsing regexps.
if self.merge_nw:
self.regexp1 = re.compile(' *' + regexp_quote(self.nw) + '.*$')
elif self.nw and not self.nn and not self.ne:
self.regexp1 = re.compile(' *' + regexp_quote(self.nw) + '$')
elif self.nw or self.nn or self.ne:
self.regexp1 = re.compile(
' *' + regexp_quote(self.nw) + regexp_ruler(self.nn)
+ regexp_quote(self.ne) + '$')
else:
self.regexp1 = None
if self.ww or self.ee:
self.regexp2 = re.compile(
' *' + regexp_quote(self.ww) + '.*'
+ regexp_quote(self.ee) + '$')
else:
self.regexp2 = None
if self.merge_se:
self.regexp3 = re.compile('.*' + regexp_quote(self.se) + '$')
elif self.sw and not self.ss and not self.se:
self.regexp3 = re.compile(' *' + regexp_quote(self.sw) + '$')
elif self.sw or self.ss or self.se:
self.regexp3 = re.compile(
' *' + regexp_quote(self.sw) + regexp_ruler(self.ss)
+ regexp_quote(self.se) + '$')
else:
self.regexp3 = None
# Save results.
template_registry[style] = self
def __cmp__(self, other):
return cmp(self.weight, other.weight)
def characters(self):
"""\
Return a string of characters which may be used to draw the box.
"""
characters = ''
for text in (self.nw, self.nn, self.ne,
self.ww, self.ee,
self.sw, self.ss, self.se):
if text:
for character in text:
if character not in characters:
characters = characters + character
return characters
def match(self, lines):
"""\
Returns true if LINES exactly match this template.
"""
start = 0
end = len(lines)
if self.regexp1 is not None:
if start == end or not self.regexp1.match(lines[start]):
return 0
start = start + 1
if self.regexp3 is not None:
if end == 0 or not self.regexp3.match(lines[end-1]):
return 0
end = end - 1
if self.regexp2 is not None:
for line in lines[start:end]:
if not self.regexp2.match(line):
return 0
return 1
def unbuild(self, lines):
"""\
Remove all comment marks from LINES, as hinted by this template. Returns the
cleaned up set of lines, and the size of the left margin.
"""
margin = left_margin_size(lines)
# Remove box style marks.
start = 0
end = len(lines)
if self.regexp1 is not None:
lines[start] = unbuild_clean(lines[start], self.regexp1)
start = start + 1
if self.regexp3 is not None:
lines[end-1] = unbuild_clean(lines[end-1], self.regexp3)
end = end - 1
if self.regexp2 is not None:
for counter in range(start, end):
lines[counter] = unbuild_clean(lines[counter], self.regexp2)
# Remove the left side of the box after it turned into spaces.
delta = left_margin_size(lines) - margin
for counter in range(len(lines)):
lines[counter] = lines[counter][delta:]
# Remove leading and trailing white lines.
start = 0
end = len(lines)
while start < end and lines[start] == '':
start = start + 1
while end > start and lines[end-1] == '':
end = end - 1
return lines[start:end], margin
def build(self, lines, width, refill, margin):
"""\
Put LINES back into a boxed comment according to this template, after
having refilled them if REFILL. The box should start at column MARGIN,
and the total size of each line should ideally not go over WIDTH.
"""
# Merge a short end delimiter now, so it gets refilled with text.
if self.merge_se:
if lines:
lines[-1] = lines[-1] + ' ' + self.se
else:
lines = [self.se]
# Reduce WIDTH according to left and right inserts, then refill.
if self.ww:
width = width - len(self.ww)
if self.ee:
width = width - len(self.ee)
if refill:
lines = refill_lines(lines, width)
# Reduce WIDTH further according to the current right margin,
# and excluding the left margin.
maximum = 0
for line in lines:
if line:
if line[-1] in '.!?':
length = len(line) + 1
else:
length = len(line)
if length > maximum:
maximum = length
width = maximum - margin
# Construct the top line.
if self.merge_nw:
lines[0] = ' ' * margin + self.nw + lines[0][margin:]
start = 1
elif self.nw or self.nn or self.ne:
if self.nn:
line = self.nn * width
else:
line = ' ' * width
if self.nw:
line = self.nw + line
if self.ne:
line = line + self.ne
lines.insert(0, (' ' * margin + line).rstrip())
start = 1
else:
start = 0
# Construct all middle lines.
for counter in range(start, len(lines)):
line = lines[counter][margin:]
line = line + ' ' * (width - len(line))
if self.ww:
line = self.ww + line
if self.ee:
line = line + self.ee
lines[counter] = (' ' * margin + line).rstrip()
# Construct the bottom line.
if self.sw or self.ss or self.se and not self.merge_se:
if self.ss:
line = self.ss * width
else:
line = ' ' * width
if self.sw:
line = self.sw + line
if self.se and not self.merge_se:
line = line + self.se
lines.append((' ' * margin + line).rstrip())
return lines
def regexp_quote(text):
"""\
Return a regexp matching TEXT without its surrounding space, maybe
followed by spaces. If STRING is nil, return the empty regexp.
Unless spaces, the text is nested within a regexp parenthetical group.
"""
if text is None:
return ''
if text == ' ' * len(text):
return ' *'
return '(' + re.escape(text.strip()) + ') *'
def regexp_ruler(character):
"""\
Return a regexp matching two or more repetitions of CHARACTER, maybe
followed by spaces. Is CHARACTER is nil, return the empty regexp.
Unless spaces, the ruler is nested within a regexp parenthetical group.
"""
if character is None:
return ''
if character == ' ':
return ' +'
return '(' + re.escape(character + character) + '+) *'
def unbuild_clean(line, regexp):
"""\
Return LINE with all parenthetical groups in REGEXP erased and replaced by an
equivalent number of spaces, except for trailing spaces, which get removed.
"""
match = re.match(regexp, line)
groups = match.groups()
for counter in range(len(groups)):
if groups[counter] is not None:
start, end = match.span(1 + counter)
line = line[:start] + ' ' * (end - start) + line[end:]
return line.rstrip()
## Template data.
# Matcher functions for a comment start, indexed by numeric LANGUAGE.
language_matcher = []
for pattern in (r' *(/\*|//+|#+|;+|%+)',
r'', # 1
r' */\*', # 2
r' *//+', # 3
r' *#+', # 4
r' *;+', # 5
r' *%+'): # 6
language_matcher.append(re.compile(pattern).match)
# Template objects, indexed by numeric style.
template_registry = {}
def make_generic(style, weight, lines):
"""\
Add various language digit to STYLE and generate one template per language,
all using the same WEIGHT. Replace `?' in LINES accordingly.
"""
for language, character in ((300, '/'), # C++ style comments
(400, '#'), # scripting languages
(500, ';'), # Lisp and assembler
(600, '%')): # TeX and PostScript
new_style = language + style
if 310 < new_style <= 319:
# Disallow quality 10 with C++.
continue
new_lines = []
for line in lines:
new_lines.append(line.replace('?', character))
Template(new_style, weight, new_lines)
# Generic programming language templates.
make_generic(11, 115, ('? box',))
make_generic(12, 215, ('? box ?',
'? --- ?'))
make_generic(13, 315, ('? --- ?',
'? box ?',
'? --- ?'))
make_generic(14, 415, ('? box ?',
'???????'))
make_generic(15, 515, ('???????',
'? box ?',
'???????'))
make_generic(21, 125, ('?? box',))
make_generic(22, 225, ('?? box ??',
'?? --- ??'))
make_generic(23, 325, ('?? --- ??',
'?? box ??',
'?? --- ??'))
make_generic(24, 425, ('?? box ??',
'?????????'))
make_generic(25, 525, ('?????????',
'?? box ??',
'?????????'))
make_generic(31, 135, ('??? box',))
make_generic(32, 235, ('??? box ???',
'??? --- ???'))
make_generic(33, 335, ('??? --- ???',
'??? box ???',
'??? --- ???'))
make_generic(34, 435, ('??? box ???',
'???????????'))
make_generic(35, 535, ('???????????',
'??? box ???',
'???????????'))
make_generic(41, 145, ('???? box',))
make_generic(42, 245, ('???? box ????',
'???? --- ????'))
make_generic(43, 345, ('???? --- ????',
'???? box ????',
'???? --- ????'))
make_generic(44, 445, ('???? box ????',
'?????????????'))
make_generic(45, 545, ('?????????????',
'???? box ????',
'?????????????'))
# Textual (non programming) templates.
Template(111, 113, ('box',))
Template(112, 213, ('| box |',
'+-----+'))
Template(113, 313, ('+-----+',
'| box |',
'+-----+'))
Template(114, 413, ('| box |',
'*=====*'))
Template(115, 513, ('*=====*',
'| box |',
'*=====*'))
Template(121, 123, ('| box |',))
Template(122, 223, ('| box |',
'`-----\''))
Template(123, 323, ('.-----.',
'| box |',
'`-----\''))
Template(124, 423, ('| box |',
'\\=====/'))
Template(125, 523, ('/=====\\',
'| box |',
'\\=====/'))
Template(141, 143, ('| box ',))
Template(142, 243, ('* box *',
'*******'))
Template(143, 343, ('*******',
'* box *',
'*******'))
Template(144, 443, ('X box X',
'XXXXXXX'))
Template(145, 543, ('XXXXXXX',
'X box X',
'XXXXXXX'))
# C language templates.
Template(211, 118, ('/* box */',))
Template(212, 218, ('/* box */',
'/* --- */'))
Template(213, 318, ('/* --- */',
'/* box */',
'/* --- */'))
Template(214, 418, ('/* box */',
'/* === */'))
Template(215, 518, ('/* === */',
'/* box */',
'/* === */'))
Template(221, 128, ('/* ',
' box',
'*/'))
Template(222, 228, ('/* .',
'| box |',
'`----*/'))
Template(223, 328, ('/*----.',
'| box |',
'`----*/'))
Template(224, 428, ('/* \\',
'| box |',
'\\====*/'))
Template(225, 528, ('/*====\\',
'| box |',
'\\====*/'))
Template(231, 138, ('/* ',
' | box',
' */ '))
Template(232, 238, ('/* ',
' | box | ',
' *-----*/'))
Template(233, 338, ('/*-----* ',
' | box | ',
' *-----*/'))
Template(234, 438, ('/* box */',
'/*-----*/'))
Template(235, 538, ('/*-----*/',
'/* box */',
'/*-----*/'))
Template(241, 148, ('/* ',
' * box',
' */ '))
Template(242, 248, ('/* * ',
' * box * ',
' *******/'))
Template(243, 348, ('/******* ',
' * box * ',
' *******/'))
Template(244, 448, ('/* box */',
'/*******/'))
Template(245, 548, ('/*******/',
'/* box */',
'/*******/'))
Template(251, 158, ('/* ',
' * box',
' */ '))
if __name__ == '__main__':
main(*sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql_test
import (
"context"
"fmt"
"reflect"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
)
func TestScatterRandomizeLeases(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "uses too many resources for race")
skip.UnderShort(t, "takes 25s")
const numHosts = 3
tc := serverutils.StartCluster(t, numHosts, base.TestClusterArgs{})
defer tc.Stopper().Stop(context.Background())
sqlutils.CreateTable(
t, tc.ServerConn(0), "t",
"k INT PRIMARY KEY, v INT",
1000,
sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(10)),
)
r := sqlutils.MakeSQLRunner(tc.ServerConn(0))
// Introduce 99 splits to get 100 ranges.
r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))")
getLeaseholders := func() (map[int]int, error) {
rows := r.Query(t, `SELECT range_id, lease_holder FROM [SHOW RANGES FROM TABLE test.t WITH DETAILS]`)
leaseholders := make(map[int]int)
numRows := 0
for ; rows.Next(); numRows++ {
var rangeID, leaseholder int
if err := rows.Scan(&rangeID, &leaseholder); err != nil {
return nil, err
}
if rangeID < 1 {
t.Fatalf("invalid rangeID: %d", rangeID)
}
if leaseholder < 1 || leaseholder > numHosts {
return nil, fmt.Errorf("invalid lease_holder value: %d", leaseholder)
}
leaseholders[rangeID] = leaseholder
}
if err := rows.Err(); err != nil {
return nil, err
}
if numRows != 100 {
return nil, fmt.Errorf("expected 100 ranges, got %d", numRows)
}
return leaseholders, nil
}
oldLeaseholders, err := getLeaseholders()
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
// Ensure that scattering changes the leaseholders, which is really all
// that randomizing the lease placements can probabilistically guarantee -
// it doesn't guarantee a uniform distribution.
r.Exec(t, "ALTER TABLE test.t SCATTER")
newLeaseholders, err := getLeaseholders()
if err != nil {
t.Fatal(err)
}
if reflect.DeepEqual(oldLeaseholders, newLeaseholders) {
t.Errorf("expected scatter to change lease distribution, but got no change: %v", newLeaseholders)
}
oldLeaseholders = newLeaseholders
}
}
// TestScatterResponse ensures that ALTER TABLE... SCATTER includes one row of
// output per range in the table. It does *not* test that scatter properly
// distributes replicas and leases; see TestScatter for that.
//
// TODO(benesch): consider folding this test into TestScatter once TestScatter
// is unskipped.
func TestScatterResponse(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ts, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer ts.Stopper().Stop(context.Background())
s := ts.ApplicationLayer()
sqlutils.CreateTable(
t, sqlDB, "t",
"k INT PRIMARY KEY, v INT",
1000,
sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(10)),
)
tableDesc := desctestutils.TestingGetPublicTableDescriptor(kvDB, s.Codec(), "test", "t")
r := sqlutils.MakeSQLRunner(sqlDB)
// Range split decisions happen asynchronously and in this test we check for
// the actual split boundaries. Wait until the table itself is split off
// into its own range.
testutils.SucceedsSoon(t, func() error {
row := r.QueryRow(t, `SELECT count(*) FROM [SHOW RANGES FROM TABLE test.t] WHERE start_key LIKE '%TableMin%'`)
var nRanges int
row.Scan(&nRanges)
if nRanges != 1 {
return errors.Newf("expected to find single range for table, found %d", nRanges)
}
return nil
})
r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))")
rows := r.Query(t, "ALTER TABLE test.t SCATTER")
i := 0
for ; rows.Next(); i++ {
var actualKey []byte
var pretty string
if err := rows.Scan(&actualKey, &pretty); err != nil {
t.Fatal(err)
}
var expectedKey roachpb.Key
if i == 0 {
expectedKey = s.Codec().TablePrefix(uint32(tableDesc.GetID()))
} else {
var err error
expectedKey, err = randgen.TestingMakePrimaryIndexKeyForTenant(tableDesc, s.Codec(), i*10)
if err != nil {
t.Fatal(err)
}
}
if e, a := expectedKey, roachpb.Key(actualKey); !e.Equal(a) {
t.Errorf("%d: expected split key %s, but got %s", i, e, a)
}
if e, a := expectedKey.String(), pretty; e != a {
t.Errorf("%d: expected pretty split key %s, but got %s", i, e, a)
}
}
if err := rows.Err(); err != nil {
t.Fatal(err)
}
if e, a := 100, i; e != a {
t.Fatalf("expected %d rows, but got %d", e, a)
}
}
// TestScatterWithOneVoter tests that the scatter command works when the
// replication factor is set to 1. We expect that the total number of replicas
// remains unchanged and that the scattering store loses some replicas. Note we
// don't assert on the final distribution being even across all stores, scatter
// promises randomness, not necessarily uniformity.
func TestScatterWithOneVoter(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t) // Too slow under stressrace.
skip.UnderDeadlock(t)
skip.UnderShort(t)
zcfg := zonepb.DefaultZoneConfig()
zcfg.NumReplicas = proto.Int32(1)
tc := serverutils.StartCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DefaultZoneConfigOverride: &zcfg,
},
},
},
})
defer tc.Stopper().Stop(context.Background())
sqlutils.CreateTable(
t, tc.ServerConn(0), "t",
"k INT PRIMARY KEY, v INT",
500, /* numRows */
sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(10)),
)
r := sqlutils.MakeSQLRunner(tc.ServerConn(0))
// Create 49 splits, for 50 ranges in the test table.
r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 49) AS g(i))")
getReplicaCounts := func() (map[int]int, int, error) {
rows := r.Query(t, `
WITH ranges_info AS (
SHOW RANGES FROM TABLE test.t
)
SELECT
store_id,
count(*) AS replica_count
FROM
(
SELECT
unnest(replicas) AS store_id
FROM
ranges_info
) AS store_replicas
GROUP BY
store_id;`)
replicaCounts := make(map[int]int)
totalReplicas := 0
for rows.Next() {
var storeID, replicaCount int
if err := rows.Scan(&storeID, &replicaCount); err != nil {
return nil, 0, err
}
replicaCounts[storeID] = replicaCount
totalReplicas += replicaCount
}
if err := rows.Err(); err != nil {
return nil, 0, err
}
return replicaCounts, totalReplicas, nil
}
oldReplicaCounts, oldTotalReplicas, err := getReplicaCounts()
if err != nil {
t.Fatal(err)
}
t.Logf("before scatter: replica distribution across stores: %v (total: %d)",
oldReplicaCounts, oldTotalReplicas)
// Expect that the number of replicas on store 1 to have changed. We can't
// assert that the distribution will be even across all three stores, but s1
// (the initial leaseholder and replica) should have a different number of
// replicas than before. Rebalancing is otherwise disabled in this test, so
// the only replica movements are from the scatter.
// Enable tracing to capture detailed scatter execution information.
r.Exec(t, "SET tracing = on")
r.Exec(t, "ALTER TABLE test.t SCATTER")
r.Exec(t, "SET tracing = off")
// If the test fails, print the trace of the scatter statement to help
// us investigate failures.
defer func() {
if !t.Failed() {
return
}
traceRows := r.QueryStr(t, "SELECT age, message FROM [SHOW TRACE FOR SESSION]")
if len(traceRows) > 0 {
t.Logf("SCATTER trace (%d relevant messages):", len(traceRows))
for _, row := range traceRows {
t.Logf("[%s] %s", row[0], row[1])
}
}
}()
newReplicaCounts, newTotalReplicas, err := getReplicaCounts()
require.NoError(t, err)
t.Logf("after scatter: replica distribution across stores: %v (total: %d)", newReplicaCounts, newTotalReplicas)
require.Equal(t, oldTotalReplicas, newTotalReplicas,
"expected total replica count to remain the same post-scatter, "+
"old replica counts(%d): %v, new replica counts(%d): %v",
oldTotalReplicas, oldReplicaCounts, newTotalReplicas, newReplicaCounts)
require.NotEqual(t, oldReplicaCounts[1], newReplicaCounts[1])
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/scatter_test.go |
"""
Better Emphasis.
pymdownx.betterem
Add inteligent handling of to em and strong notations
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import SimpleTagPattern, DoubleTagPattern
SMART_UNDER_CONTENT = r'((?:[^_]|_(?=\w|\s)|(?<=\s)_+?(?=\s))+?_*?)'
SMART_STAR_CONTENT = r'((?:[^\*]|\*(?=[^\W_]|\*|\s)|(?<=\s)\*+?(?=\s))+?\**?)'
SMART_UNDER_MIXED_CONTENT = r'((?:[^_]|_(?=\w)|(?<=\s)_+?(?=\s))+?_*)'
SMART_STAR_MIXED_CONTENT = r'((?:[^\*]|\*(?=[^\W_]|\*)|(?<=\s)\*+?(?=\s))+?\**)'
UNDER_CONTENT = r'(_|(?:(?<=\s)_|[^_])+?)'
UNDER_CONTENT2 = r'((?:[^_]|(?<!_)_(?=\w))+?)'
STAR_CONTENT = r'(\*|(?:(?<=\s)\*|[^\*])+?)'
STAR_CONTENT2 = r'((?:[^\*]|(?<!\*)\*(?=[^\W_]|\*))+?)'
# ***strong,em***
STAR_STRONG_EM = r'(\*{3})(?!\s)(\*{1,2}|[^\*]+?)(?<!\s)\2'
# ___strong,em___
UNDER_STRONG_EM = r'(_{3})(?!\s)(_{1,2}|[^_]+?)(?<!\s)\2'
# ***strong,em*strong**
STAR_STRONG_EM2 = r'(\*{3})(?![\s\*])%s(?<!\s)\*%s(?<!\s)\*{2}' % (STAR_CONTENT, STAR_CONTENT2)
# ___strong,em_strong__
UNDER_STRONG_EM2 = r'(_{3})(?![\s_])%s(?<!\s)_%s(?<!\s)_{2}' % (UNDER_CONTENT, UNDER_CONTENT2)
# ***em,strong**em*
STAR_EM_STRONG = r'(\*{3})(?![\s\*])%s(?<!\s)\*{2}%s(?<!\s)\*' % (STAR_CONTENT2, STAR_CONTENT)
# ___em,strong__em_
UNDER_EM_STRONG = r'(_{3})(?![\s_])%s(?<!\s)_{2}%s(?<!\s)_' % (UNDER_CONTENT2, UNDER_CONTENT)
# **strong**
STAR_STRONG = r'(\*{2})(?!\s)%s(?<!\s)\2' % STAR_CONTENT2
# __strong__
UNDER_STRONG = r'(_{2})(?!\s)%s(?<!\s)\2' % UNDER_CONTENT2
# *emphasis*
STAR_EM = r'(\*)(?!\s)%s(?<!\s)\2' % STAR_CONTENT
# _emphasis_
UNDER_EM = r'(_)(?!\s)%s(?<!\s)\2' % UNDER_CONTENT
# Smart rules for when "smart underscore" is enabled
# SMART: ___strong,em___
SMART_UNDER_STRONG_EM = r'(?<!\w)(_{3})(?![\s_])%s(?<!\s)\2(?!\w)' % SMART_UNDER_CONTENT
# ___strong,em_ strong__
SMART_UNDER_STRONG_EM2 = \
r'(?<!\w)(_{3})(?![\s_])%s(?<!\s)_(?!\w)%s(?<!\s)_{2}(?!\w)' % (SMART_UNDER_MIXED_CONTENT, SMART_UNDER_CONTENT)
# ___em,strong__ em_
SMART_UNDER_EM_STRONG = \
r'(?<!\w)(_{3})(?![\s_])%s(?<!\s)_{2}(?!\w)%s(?<!\s)_(?!\w)' % (SMART_UNDER_MIXED_CONTENT, SMART_UNDER_CONTENT)
# __strong__
SMART_UNDER_STRONG = r'(?<!\w)(_{2})(?![\s_])%s(?<!\s)\2(?!\w)' % SMART_UNDER_CONTENT
# SMART _em_
SMART_UNDER_EM = r'(?<!\w)(_)(?![\s_])%s(?<!\s)\2(?!\w)' % SMART_UNDER_CONTENT
# Smart rules for when "smart asterisk" is enabled
# SMART: ***strong,em***
SMART_STAR_STRONG_EM = r'(?:(?<=_)|(?<![\w\*]))(\*{3})(?![\s\*])%s(?<!\s)\2(?:(?=_)|(?![\w\*]))' % SMART_STAR_CONTENT
# ***strong,em* strong**
SMART_STAR_STRONG_EM2 = \
r'(?:(?<=_)|(?<![\w\*]))(\*{3})(?![\s\*])%s(?<!\s)\*(?:(?=_)|(?![\w\*]))%s(?<!\s)\*{2}(?:(?=_)|(?![\w\*]))' % (
SMART_STAR_MIXED_CONTENT, SMART_STAR_CONTENT
)
# ***em,strong** em*
SMART_STAR_EM_STRONG = \
r'(?:(?<=_)|(?<![\w\*]))(\*{3})(?![\s\*])%s(?<!\s)\*{2}(?:(?=_)|(?![\w\*]))%s(?<!\s)\*(?:(?=_)|(?![\w\*]))' % (
SMART_STAR_MIXED_CONTENT, SMART_STAR_CONTENT
)
# **strong**
SMART_STAR_STRONG = r'(?:(?<=_)|(?<![\w\*]))(\*{2})(?![\s\*])%s(?<!\s)\2(?:(?=_)|(?![\w\*]))' % SMART_STAR_CONTENT
# SMART *em*
SMART_STAR_EM = r'(?:(?<=_)|(?<![\w\*]))(\*)(?![\s\*])%s(?<!\s)\2(?:(?=_)|(?![\w\*]))' % SMART_STAR_CONTENT
class BetterEmExtension(Extension):
"""Add extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'smart_enable': ["underscore", "Treat connected words intelligently - Default: underscore"]
}
super(BetterEmExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Modify inline patterns."""
# Not better yet, so let's make it better
md.registerExtension(self)
self.make_better(md)
def make_better(self, md):
"""
Configure all the pattern rules.
This should be used instead of smart_strong package.
pymdownx.extra should be used in place of makrdown.extensions.extra.
"""
config = self.getConfigs()
enabled = config["smart_enable"]
if enabled:
enable_all = enabled == "all"
enable_under = enabled == "underscore" or enable_all
enable_star = enabled == "asterisk" or enable_all
star_strong_em = SMART_STAR_STRONG_EM if enable_star else STAR_STRONG_EM
under_strong_em = SMART_UNDER_STRONG_EM if enable_under else UNDER_STRONG_EM
star_em_strong = SMART_STAR_EM_STRONG if enable_star else STAR_EM_STRONG
under_em_strong = SMART_UNDER_EM_STRONG if enable_under else UNDER_EM_STRONG
star_strong_em2 = SMART_STAR_STRONG_EM2 if enable_star else STAR_STRONG_EM2
under_strong_em2 = SMART_UNDER_STRONG_EM2 if enable_under else UNDER_STRONG_EM2
star_strong = SMART_STAR_STRONG if enable_star else STAR_STRONG
under_strong = SMART_UNDER_STRONG if enable_under else UNDER_STRONG
star_emphasis = SMART_STAR_EM if enable_star else STAR_EM
under_emphasis = SMART_UNDER_EM if enable_under else UNDER_EM
md.inlinePatterns["strong_em"] = DoubleTagPattern(star_strong_em, 'strong,em')
md.inlinePatterns.add("strong_em2", DoubleTagPattern(under_strong_em, 'strong,em'), '>strong_em')
md.inlinePatterns.link("em_strong", ">strong_em2")
md.inlinePatterns["em_strong"] = DoubleTagPattern(star_em_strong, 'em,strong')
md.inlinePatterns.add('em_strong2', DoubleTagPattern(under_em_strong, 'em,strong'), '>em_strong')
md.inlinePatterns.add('strong_em3', DoubleTagPattern(star_strong_em2, 'strong,em'), '>em_strong2')
md.inlinePatterns.add('strong_em4', DoubleTagPattern(under_strong_em2, 'strong,em'), '>strong_em3')
md.inlinePatterns["strong"] = SimpleTagPattern(star_strong, 'strong')
md.inlinePatterns.add("strong2", SimpleTagPattern(under_strong, 'strong'), '>strong')
md.inlinePatterns["emphasis"] = SimpleTagPattern(star_emphasis, 'em')
md.inlinePatterns["emphasis2"] = SimpleTagPattern(under_emphasis, 'em')
def makeExtension(*args, **kwargs):
"""Return extension."""
return BetterEmExtension(*args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""JSON support for message types.
Public classes:
MessageJSONEncoder: JSON encoder for message objects.
Public functions:
encode_message: Encodes a message in to a JSON string.
decode_message: Merge from a JSON string in to a message.
"""
import base64
import binascii
import logging
import six
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import util
__all__ = [
'ALTERNATIVE_CONTENT_TYPES',
'CONTENT_TYPE',
'MessageJSONEncoder',
'encode_message',
'decode_message',
'ProtoJson',
]
def _load_json_module():
"""Try to load a valid json module.
There are more than one json modules that might be installed. They are
mostly compatible with one another but some versions may be different.
This function attempts to load various json modules in a preferred order.
It does a basic check to guess if a loaded version of json is compatible.
Returns:
Compatible json module.
Raises:
ImportError if there are no json modules or the loaded json module is
not compatible with ProtoRPC.
"""
first_import_error = None
for module_name in ['json',
'simplejson']:
try:
module = __import__(module_name, {}, {}, 'json')
if not hasattr(module, 'JSONEncoder'):
message = (
'json library "%s" is not compatible with ProtoRPC' %
module_name)
logging.warning(message)
raise ImportError(message)
else:
return module
except ImportError as err:
if not first_import_error:
first_import_error = err
logging.error('Must use valid json library (json or simplejson)')
raise first_import_error # pylint:disable=raising-bad-type
json = _load_json_module()
# TODO: Rename this to MessageJsonEncoder.
class MessageJSONEncoder(json.JSONEncoder):
"""Message JSON encoder class.
Extension of JSONEncoder that can build JSON from a message object.
"""
def __init__(self, protojson_protocol=None, **kwargs):
"""Constructor.
Args:
protojson_protocol: ProtoJson instance.
"""
super(MessageJSONEncoder, self).__init__(**kwargs)
self.__protojson_protocol = (
protojson_protocol or ProtoJson.get_default())
def default(self, value):
"""Return dictionary instance from a message object.
Args:
value: Value to get dictionary for. If not encodable, will
call superclasses default method.
"""
if isinstance(value, messages.Enum):
return str(value)
if six.PY3 and isinstance(value, bytes):
return value.decode('utf8')
if isinstance(value, messages.Message):
result = {}
for field in value.all_fields():
item = value.get_assigned_value(field.name)
if item not in (None, [], ()):
result[field.name] = (
self.__protojson_protocol.encode_field(field, item))
# Handle unrecognized fields, so they're included when a message is
# decoded then encoded.
for unknown_key in value.all_unrecognized_fields():
unrecognized_field, _ = value.get_unrecognized_field_info(
unknown_key)
result[unknown_key] = unrecognized_field
return result
else:
return super(MessageJSONEncoder, self).default(value)
class ProtoJson(object):
"""ProtoRPC JSON implementation class.
Implementation of JSON based protocol used for serializing and
deserializing message objects. Instances of remote.ProtocolConfig
constructor or used with remote.Protocols.add_protocol. See the
remote.py module for more details.
"""
CONTENT_TYPE = 'application/json'
ALTERNATIVE_CONTENT_TYPES = [
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json',
'text/json',
]
def encode_field(self, field, value):
"""Encode a python field value to a JSON value.
Args:
field: A ProtoRPC field instance.
value: A python value supported by field.
Returns:
A JSON serializable value appropriate for field.
"""
if isinstance(field, messages.BytesField):
if field.repeated:
value = [base64.b64encode(byte) for byte in value]
else:
value = base64.b64encode(value)
elif isinstance(field, message_types.DateTimeField):
# DateTimeField stores its data as a RFC 3339 compliant string.
if field.repeated:
value = [i.isoformat() for i in value]
else:
value = value.isoformat()
return value
def encode_message(self, message):
"""Encode Message instance to JSON string.
Args:
Message instance to encode in to JSON string.
Returns:
String encoding of Message instance in protocol JSON format.
Raises:
messages.ValidationError if message is not initialized.
"""
message.check_initialized()
return json.dumps(message, cls=MessageJSONEncoder,
protojson_protocol=self)
def decode_message(self, message_type, encoded_message):
"""Merge JSON structure to Message instance.
Args:
message_type: Message to decode data to.
encoded_message: JSON encoded version of message.
Returns:
Decoded instance of message_type.
Raises:
ValueError: If encoded_message is not valid JSON.
messages.ValidationError if merged message is not initialized.
"""
if not encoded_message.strip():
return message_type()
dictionary = json.loads(encoded_message)
message = self.__decode_dictionary(message_type, dictionary)
message.check_initialized()
return message
def __find_variant(self, value):
"""Find the messages.Variant type that describes this value.
Args:
value: The value whose variant type is being determined.
Returns:
The messages.Variant value that best describes value's type,
or None if it's a type we don't know how to handle.
"""
if isinstance(value, bool):
return messages.Variant.BOOL
elif isinstance(value, six.integer_types):
return messages.Variant.INT64
elif isinstance(value, float):
return messages.Variant.DOUBLE
elif isinstance(value, six.string_types):
return messages.Variant.STRING
elif isinstance(value, (list, tuple)):
# Find the most specific variant that covers all elements.
variant_priority = [None,
messages.Variant.INT64,
messages.Variant.DOUBLE,
messages.Variant.STRING]
chosen_priority = 0
for v in value:
variant = self.__find_variant(v)
try:
priority = variant_priority.index(variant)
except IndexError:
priority = -1
if priority > chosen_priority:
chosen_priority = priority
return variant_priority[chosen_priority]
# Unrecognized type.
return None
def __decode_dictionary(self, message_type, dictionary):
"""Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries.
"""
message = message_type()
for key, value in six.iteritems(dictionary):
if value is None:
try:
message.reset(key)
except AttributeError:
pass # This is an unrecognized field, skip it.
continue
try:
field = message.field_by_name(key)
except KeyError:
# Save unknown values.
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
else:
logging.warning(
'No variant found for unrecognized field: %s', key)
continue
# Normalize values in to a list.
if isinstance(value, list):
if not value:
continue
else:
value = [value]
valid_value = []
for item in value:
valid_value.append(self.decode_field(field, item))
if field.repeated:
_ = getattr(message, field.name)
setattr(message, field.name, valid_value)
else:
setattr(message, field.name, valid_value[-1])
return message
def decode_field(self, field, value):
"""Decode a JSON value to a python value.
Args:
field: A ProtoRPC field instance.
value: A serialized JSON value.
Return:
A Python value compatible with field.
"""
if isinstance(field, messages.EnumField):
try:
return field.type(value)
except TypeError:
raise messages.DecodeError(
'Invalid enum value "%s"' % (value or ''))
elif isinstance(field, messages.BytesField):
try:
return base64.b64decode(value)
except (binascii.Error, TypeError) as err:
raise messages.DecodeError('Base64 decoding error: %s' % err)
elif isinstance(field, message_types.DateTimeField):
try:
return util.decode_datetime(value)
except ValueError as err:
raise messages.DecodeError(err)
elif (isinstance(field, messages.MessageField) and
issubclass(field.type, messages.Message)):
return self.__decode_dictionary(field.type, value)
elif (isinstance(field, messages.FloatField) and
isinstance(value, (six.integer_types, six.string_types))):
try:
return float(value)
except: # pylint:disable=bare-except
pass
elif (isinstance(field, messages.IntegerField) and
isinstance(value, six.string_types)):
try:
return int(value)
except: # pylint:disable=bare-except
pass
return value
@staticmethod
def get_default():
"""Get default instanceof ProtoJson."""
try:
return ProtoJson.__default
except AttributeError:
ProtoJson.__default = ProtoJson()
return ProtoJson.__default
@staticmethod
def set_default(protocol):
"""Set the default instance of ProtoJson.
Args:
protocol: A ProtoJson instance.
"""
if not isinstance(protocol, ProtoJson):
raise TypeError('Expected protocol of type ProtoJson')
ProtoJson.__default = protocol
CONTENT_TYPE = ProtoJson.CONTENT_TYPE
ALTERNATIVE_CONTENT_TYPES = ProtoJson.ALTERNATIVE_CONTENT_TYPES
encode_message = ProtoJson.get_default().encode_message
decode_message = ProtoJson.get_default().decode_message | unknown | codeparrot/codeparrot-clean | ||
""" Helpers for guessing our way into queries against the database
"""
import ast
import logging
import re
import arrow
import six
import sqlalchemy as sa
import sqlalchemy_utils as sau
from ._tables import TABLES
logger = logging.getLogger('tilezilla')
DATETIME_TYPES = (sa.sql.sqltypes.DateTime,
sa.sql.sqltypes.Date,
sau.ArrowType)
COMPARATORS = ['eq', 'ne', 'le', 'lt', 'ge', 'gt', 'in', 'like']
# Database linkages as graph ---------------------------------------------------
def _make_table_graph(tables):
graph = {}
for t in tables:
relations = sa.inspect(t).relationships
graph[t] = [relation.mapper.class_ for relation in relations]
return graph
def _link_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = _link_path(graph, node, end, path)
if newpath: return newpath
return None
TABLES_GRAPH = _make_table_graph(TABLES.values())
# [KEY]
# 1. PARSE SYSTEM --------------------------------------------------------------
def _convert_expression_operator(expr):
""" Convert expression operators from '=' style to 'eq' style
Args:
expr (str): Expression to convert
Returns:
str: Converted expression
"""
TO_CONVERT = {
' eq ': ['=', '=='],
' ne ': ['!=', '=!'],
' lt ': ['<'],
' le ': ['<=', '=<'],
' gt ': ['>'],
' ge ': ['>=', '=>']
}
for op, matches in six.iteritems(TO_CONVERT):
for match in matches:
if match in expr:
return expr.replace(match, op)
return expr
def _parse_expression_filter(expr):
""" Convert a filter expression into [KEY], [OPERATOR], [VALUE...]
Args:
str: A filter expression
Returns:
tuple (str, str, str): The key, operator, and value or values
Raises:
KeyError: Raise if operator not in available comparators
(:attr:`COMPARATORS`)
"""
# Parse operator
for comp in COMPARATORS:
split = re.split('\s(?i)%s\s' % comp, expr)
if len(split) == 2:
return split[0].strip(), comp, split[1].lstrip()
raise KeyError('Could not find a supported comparator in expression')
def _tablename_to_class(base, tablename):
""" Return class of tablename
"""
for c in base._decl_class_registry.values():
if hasattr(c, '__tablename__') and c.__tablename__ == tablename:
return c
# [VALUE]
# TYPE CAST SYSTEM
def convert_query_type(column, value):
""" Return `value` converted to type expected by `table` for column `key`
Args:
column (sqlalchemy.orm.attributes.QueryableAttribute): A SQLAlchemy
ORM attribute
value (str): A value to convert
Returns:
type: `value`, converted to a different type
"""
if isinstance(column.type, DATETIME_TYPES):
return arrow.get(value).datetime
elif isinstance(column.type, sau.ScalarListType):
try:
value = ast.literal_eval(value)
except SyntaxError as exc:
logger.error(exc)
raise SyntaxError('Cannot convert column {} - invalid syntax: {}'
.format(column, value))
else:
return value
return sau.cast_if(value, type(column.type))
# [FUNCTION]
# The thing to write that uses above
def construct_filter(query, items, conjunction='and'):
""" Construct a filter from a combination of filter items
Args:
query (sqlalchemy.orm.query.Query): The SQLAlchemy SQL ORM object
items (list[str]): List of query expressions in form of
"[KEY][OPERATOR][VALUE...]"
conjunction (str): Combine the filter items using 'and' or 'or'
Returns:
sqlalchemy.orm.query.Query: A query with filter items applied
Raises:
KeyError: Raise if column given in filter expression does not exist
"""
# Find table model
entities = sau.get_query_entities(query)
if len(entities) > 1:
logger.warning('Using first entity from search query ({})'
.format(entities[0]))
table = entities[0]
base = sau.get_declarative_base(table)
# Parse expressions
items = [_convert_expression_operator(item) for item in items]
exprs = [_parse_expression_filter(item) for item in items]
filters = []
joins = set([])
for key, operator, value in exprs:
# Preprocess case of linked table
if '.' in key:
_tablename, key = key.split('.', maxsplit=1)
_table = _tablename_to_class(base, _tablename)
joins.update([t for t in _link_path(TABLES_GRAPH, table, _table)
if t is not table])
else:
_table = table
column = getattr(_table, key, None)
if column is None:
raise KeyError('Cannot construct filter: column "{}" does not '
'exist'.format(key))
if operator.lower() == 'in':
value = [convert_query_type(column, v) for v in
value.replace(' ', ',').split(',') if v]
filter_item = column.in_(value)
else:
attr = [pattern % operator for pattern in
('%s', '%s_', '__%s__')
if hasattr(column, pattern % operator)]
if not attr:
raise KeyError('Cannot construct filter: column {} is not '
'usable with "{}"'.format(key, operator))
if value.lower() in ('null', 'none', 'na', 'nan'):
value = None
else:
value = convert_query_type(column, value)
filter_item = getattr(column, attr[0])(value)
filters.append(filter_item)
for join_table in joins:
query = query.join(join_table)
if conjunction == 'or':
return query.filter(sa.or_(*filters))
else:
return query.filter(sa.and_(*filters)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class ServiceClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ServiceClientJSON, self).__init__(auth_provider)
self.service = CONF.identity.catalog_type
self.endpoint_url = 'adminURL'
self.api_version = "v3"
def update_service(self, service_id, **kwargs):
"""Updates a service."""
resp, body = self.get_service(service_id)
name = kwargs.get('name', body['name'])
type = kwargs.get('type', body['type'])
desc = kwargs.get('description', body['description'])
patch_body = {
'description': desc,
'type': type,
'name': name
}
patch_body = json.dumps({'service': patch_body})
resp, body = self.patch('services/%s' % service_id, patch_body)
body = json.loads(body)
return resp, body['service']
def get_service(self, service_id):
"""Get Service."""
url = 'services/%s' % service_id
resp, body = self.get(url)
body = json.loads(body)
return resp, body['service']
def create_service(self, serv_type, name=None, description=None,
enabled=True):
body_dict = {
"name": name,
'type': serv_type,
'enabled': enabled,
"description": description,
}
body = json.dumps({'service': body_dict})
resp, body = self.post("services", body)
body = json.loads(body)
return resp, body["service"]
def delete_service(self, serv_id):
url = "services/" + serv_id
resp, body = self.delete(url)
return resp, body | unknown | codeparrot/codeparrot-clean | ||
"""s3.py: Contains a Amazon S3 implementation of ObjectStore."""
__author__ = "Chelsea Urquhart"
__copyright__ = "Copyright 2015, Chelsea Urquhart"
__license__ = "GPL"
__email__ = "me@chelseau.com"
import os
from boto3.session import Session
import sys
from objectstore import ObjectStore
from threadeddeleter import ThreadedDeleter
class Store(ObjectStore):
"""A ObjectStore class for Amazon S3"""
@classmethod
def get_retry_text(cls, retries):
"""
Returns retry text based on the number of retries
:param retries: The number of retries
:return: A string
"""
if retries == 0:
return ' All retries exhausted.'
else:
return ' Retrying {} more times.'.format(retries)
def __init__(self, parser):
"""
Initialize all our variables
:param parser: Our config parser object
:return: None
:throws: Exception on validation error
"""
# Store arguments
self.objects = dict()
self.aws = None
self.region = ''
self.bulk_size = 0
self.access_key_id = ''
self.access_key_secret = ''
self.page_size = 10000
options = ['access_key_id', 'access_key_secret', 'region', 'page_size',
'bulk_size']
optional = ['bulk_size', 'page_size']
if not parser.has_section('s3'):
raise Exception('S3 configuration is missing')
for option in options:
if not parser.has_option('s3', option):
if option not in optional:
raise Exception('Missing S3 option: {}'.format(
option))
else:
setattr(self, option, parser.get('s3', option))
# Ensure data type
self.bulk_size = int(self.bulk_size)
# Ensure data type
self.page_size = int(self.page_size)
# Validate options
if len(self.region) == 0:
raise Exception('No region specified')
if len(self.access_key_id) == 0:
raise Exception('No API key specified')
if len(self.access_key_secret) == 0:
raise Exception('No API key secret specified')
if self.page_size <= 0:
raise Exception('Invalid page size specified')
def login(self):
"""
Logs into S3. Note that this is on the main thread.
init_thread is responsible for initializing individual threads.
:return: True on success, false on failure
"""
try:
session = Session(aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.access_key_secret,
region_name=self.region)
self.aws = session.resource('s3')
except Exception as e:
ThreadedDeleter.output('Unknown error occurred: {msg}'.format(
msg=str(e)))
return False
return True
def list_containers(self, prefixes, retry=2):
"""
Lists containers beginning with any of the provided prefixes
:param prefixes: The (list of) prefixes to get containers for
:param retry: The number of retries to use
:return: A list of containers or False on error
"""
containers = list()
if len(prefixes) == 0:
prefixes = [None]
try:
for prefix in prefixes:
for bucket in self.aws.buckets.filter(Prefix=prefix):
containers.append(bucket.name)
except Exception as e:
ThreadedDeleter.output('List containers failed: {msg}.{retry}'
.format(msg=str(e),
retry=self.get_retry_text(
retry)))
if retry == 0:
return False
# Retry
return self.list_containers(prefixes, retry - 1)
return containers
def list_objects(self, container_name, retry=2):
"""
Lists objects in a given container
:param container_name: The name of the container to get objects from
:param retry: The number of retries to use
:return: A list of objects or False on error
"""
if container_name in self.objects:
objects = self.objects.get(container_name)
else:
objects = None
objects_ = list()
try:
if objects is None:
bucket = self.aws.Bucket(container_name)
objects = iter(bucket.objects.page_size(self.page_size))
self.objects[container_name] = objects
for i in range(0, self.page_size):
try:
object_ = next(objects)
objects_.append(object_.key)
except StopIteration as e:
# Just ignore this. We're out of files.
pass
except Exception as e:
ThreadedDeleter.output('List objects failed: {msg}.{retry}'
.format(msg=str(e),
retry=self.get_retry_text(retry)))
if retry == 0:
return False
# Retry
return self.list_objects(container_name, retry - 1)
return objects_
def delete_objects_bulk(self, local):
if local.size > 0:
for container, objects in local.data.iteritems()\
if hasattr(local.data, 'iteritems')\
else local.data.items():
try:
bucket = self.aws.Bucket(container)
bucket.delete_objects(Delete=dict(
Objects=objects
))
except Exception as e:
ThreadedDeleter.output('Bulk delete objects failed: {msg}.'
.format(msg=str(e)))
local.size = 0
local.data = dict()
def delete_object(self, container, object_, local):
"""
Deletes an object from a given container
:param container: The name of the container to get objects from
:param object_: The name of the object to delete
:param local: A Local class object for storing thread-specific
variables in.
:return: None
"""
if self.bulk_size <= 1:
try:
bucket = self.aws.Bucket(container)
object_ = bucket.Object(object_)
object_.delete()
except Exception as e:
ThreadedDeleter.output('Delete object failed: {msg}.'
.format(msg=str(e)))
else:
if container not in local.data:
local.data[container] = list()
local.data[container].append(dict(Key=object_))
local.size += 1
if local.size >= self.bulk_size:
self.delete_objects_bulk(local)
def init_thread(self, local):
"""
Initialize thread-specific S3 connection & data list
:param local: The Local object
:return: None
"""
session = Session(aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.access_key_secret,
region_name=self.region)
local.aws = session.resource('s3')
local.data = dict()
local.size = 0
def cleanup_thread(self, local):
"""
Cleanup thread-specific S3 connection
:param local: The Local object
:return: None
"""
# Delete any remaining objects first if using bulk deletions
self.delete_objects_bulk(local)
def delete_container(self, container, retry=2):
"""
Deletes a container
:param container: The name of the container to get objects from
:param retry: The number of retries to use
:return: None
"""
try:
bucket = self.aws.Bucket(container)
bucket.delete()
return True
except Exception as e:
ThreadedDeleter.output('Delete container failed: {msg}.{retry}'
.format(msg=str(e),
retry=self.get_retry_text(retry)))
if retry == 0:
return False
# Retry
return self.delete_container(container, retry - 1) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='st2actions',
version='0.4.0',
description='',
author='StackStorm',
author_email='info@stackstorm.com',
install_requires=[
"pecan",
],
test_suite='st2actions',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_REPORTER_H_
#define TENSORFLOW_CORE_UTIL_REPORTER_H_
#include <cstdlib>
#include <memory>
#include <string>
#include <unordered_set>
#include "xla/tsl/util/reporter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using tsl::TestReporter; // NOLINT
using tsl::TestReportFile; // NOLINT
} // namespace tensorflow
#endif // TENSORFLOW_CORE_UTIL_REPORTER_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/util/reporter.h |
// boost/filesystem.hpp --------------------------------------------------------------//
// Copyright Beman Dawes 2010
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
// Library home page: http://www.boost.org/libs/filesystem
//--------------------------------------------------------------------------------------//
#ifndef BOOST_FILESYSTEM_FILESYSTEM_HPP
#define BOOST_FILESYSTEM_FILESYSTEM_HPP
#include <boost/filesystem/config.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/filesystem/exception.hpp>
#include <boost/filesystem/directory.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/file_status.hpp>
#endif // BOOST_FILESYSTEM_FILESYSTEM_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/filesystem.hpp |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for stubify_application_manifest."""
import unittest
from xml.etree import ElementTree
from tools.android.build_split_manifest import BuildSplitManifest
MAIN_MANIFEST = """
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="com.google.package"
android:versionCode="1"
android:versionName="1.0">
</manifest>
"""
class BuildSplitManifestTest(unittest.TestCase):
def testNoPackageOveride(self):
split = BuildSplitManifest(MAIN_MANIFEST, None, "split", False)
manifest = ElementTree.fromstring(split)
self.assertEqual("com.google.package",
manifest.get("package"))
def testPackageOveride(self):
split = BuildSplitManifest(MAIN_MANIFEST, "package.other", "split", False)
manifest = ElementTree.fromstring(split)
self.assertEqual("package.other",
manifest.get("package"))
def testSplitName(self):
split = BuildSplitManifest(MAIN_MANIFEST, None, "my.little.splony", False)
manifest = ElementTree.fromstring(split)
self.assertEqual("my.little.splony", manifest.get("split"))
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Joe Sylve
@license: GNU General Public License 2.0
@contact: joe.sylve@gmail.com
@organization: 504ENSICS Labs
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_check_syscall_arm(linux_common.AbstractLinuxARMCommand):
""" Checks if the system call table has been altered """
def _get_syscall_table_size(self):
""" Get size of syscall table from the vector_swi function """
vector_swi_addr = self.addr_space.profile.get_symbol("vector_swi")
max_opcodes_to_check = 1024
while (max_opcodes_to_check):
opcode = obj.Object("unsigned int", offset = vector_swi_addr, vm = self.addr_space)
if ((opcode & 0xffff0000) == 0xe3570000):
shift = 0x10 - ((opcode & 0xff00) >> 8)
size = (opcode & 0xff) << (2 * shift)
return size
break
vector_swi_addr += 4
max_opcodes_to_check -= 1
debug.error("Syscall table size could not be determined.")
def _get_syscall_table_address(self):
""" returns the address of the syscall table """
syscall_table_address = self.addr_space.profile.get_symbol("sys_call_table")
if syscall_table_address:
return syscall_table_address
#TODO: Handle event where this isn't exported (if needed)
debug.error("Symbol sys_call_table not export. Please file a bug report.")
def calculate(self):
"""
This works by walking the system call table
and verifies that each is a symbol in the kernel
"""
linux_common.set_plugin_members(self)
num_syscalls = self._get_syscall_table_size()
syscall_addr = self._get_syscall_table_address()
sym_addrs = self.profile.get_all_addresses()
table = obj.Object("Array", offset = syscall_addr, vm = self.addr_space, targetType = "unsigned int", count = num_syscalls)
for (i, call_addr) in enumerate(table):
if not call_addr:
continue
# have to treat them as 'long' so need to mask
call_addr = call_addr & 0xffffffff
if not call_addr in sym_addrs:
yield(i, call_addr, 1)
else:
yield(i, call_addr, 0)
def unified_output(self, data):
return TreeGrid([("Index", Address),
("Address", Address),
("Symbol", str)],
self.generator(data))
def generator(self, data):
for (i, call_addr, hooked) in data:
if hooked == 0:
sym_name = self.profile.get_symbol_by_address("kernel", call_addr)
else:
sym_name = "HOOKED"
yield (0 [Address(i), Address(call_addr), str(sym_name)])
def render_text(self, outfd, data):
self.table_header(outfd, [("Index", "[addr]"), ("Address", "[addrpad]"), ("Symbol", "<30")])
for (i, call_addr, hooked) in data:
if hooked == 0:
sym_name = self.profile.get_symbol_by_address("kernel", call_addr)
else:
sym_name = "HOOKED"
self.table_row(outfd, i, call_addr, sym_name) | unknown | codeparrot/codeparrot-clean | ||
# Used as part of the "make parse" Makefile target.
# See common.mk for details.
$file = ARGV[0]
$str = ARGF.read.sub(/^__END__.*\z/m, '')
puts '# ' + '-' * 70
puts "# target program: "
puts '# ' + '-' * 70
puts $str
puts '# ' + '-' * 70
$parsed = RubyVM::InstructionSequence.compile_file($file)
puts "# disasm result: "
puts '# ' + '-' * 70
puts $parsed.disasm
puts '# ' + '-' * 70 | ruby | github | https://github.com/ruby/ruby | tool/parse.rb |
name: Update package-lock.json
on:
schedule:
# This is probably 6am UTC, which is 10pm PST or 11pm PDT
# Alternatively, 6am local is also fine
- cron: '0 6 * * *'
workflow_dispatch: {}
permissions:
contents: read
# Ensure scripts are run with pipefail. See:
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
defaults:
run:
shell: bash
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'microsoft/TypeScript'
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
token: ${{ secrets.TS_BOT_GITHUB_TOKEN }}
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with:
node-version: 'lts/*'
- run: |
npm --version
# corepack enable npm
npm install -g $(jq -r '.packageManager' < package.json)
npm --version
- name: Update package-lock.json and push
run: |
rm package-lock.json
npm install
if git diff --exit-code --name-only package-lock.json; then
echo "No change."
else
npm test
npx hereby LKG
git config user.email "typescriptbot@microsoft.com"
git config user.name "TypeScript Bot"
git add -f package-lock.json
git commit -m "Update package-lock.json"
git push
fi | unknown | github | https://github.com/microsoft/TypeScript | .github/workflows/update-package-lock.yaml |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode_test
import (
"testing"
. "unicode"
)
type T struct {
rune rune
script string
}
var inCategoryTest = []T{
{0x0081, "Cc"},
{0x200B, "Cf"},
{0xf0000, "Co"},
{0xdb80, "Cs"},
{0x0236, "Ll"},
{0x1d9d, "Lm"},
{0x07cf, "Lo"},
{0x1f8a, "Lt"},
{0x03ff, "Lu"},
{0x0bc1, "Mc"},
{0x20df, "Me"},
{0x07f0, "Mn"},
{0x1bb2, "Nd"},
{0x10147, "Nl"},
{0x2478, "No"},
{0xfe33, "Pc"},
{0x2011, "Pd"},
{0x301e, "Pe"},
{0x2e03, "Pf"},
{0x2e02, "Pi"},
{0x0022, "Po"},
{0x2770, "Ps"},
{0x00a4, "Sc"},
{0xa711, "Sk"},
{0x25f9, "Sm"},
{0x2108, "So"},
{0x2028, "Zl"},
{0x2029, "Zp"},
{0x202f, "Zs"},
// Unifieds.
{0x04aa, "L"},
{0x0009, "C"},
{0x1712, "M"},
{0x0031, "N"},
{0x00bb, "P"},
{0x00a2, "S"},
{0x00a0, "Z"},
{0x0065, "LC"},
// Unassigned
{0x0378, "Cn"},
{0x0378, "C"},
}
var inPropTest = []T{
{0x0046, "ASCII_Hex_Digit"},
{0x200F, "Bidi_Control"},
{0x2212, "Dash"},
{0xE0001, "Deprecated"},
{0x00B7, "Diacritic"},
{0x30FE, "Extender"},
{0xFF46, "Hex_Digit"},
{0x2E17, "Hyphen"},
{0x2FFB, "IDS_Binary_Operator"},
{0x2FF3, "IDS_Trinary_Operator"},
{0xFA6A, "Ideographic"},
{0x200D, "Join_Control"},
{0x0EC4, "Logical_Order_Exception"},
{0x2FFFF, "Noncharacter_Code_Point"},
{0x065E, "Other_Alphabetic"},
{0x2065, "Other_Default_Ignorable_Code_Point"},
{0x0BD7, "Other_Grapheme_Extend"},
{0x0387, "Other_ID_Continue"},
{0x212E, "Other_ID_Start"},
{0x2094, "Other_Lowercase"},
{0x2040, "Other_Math"},
{0x216F, "Other_Uppercase"},
{0x0027, "Pattern_Syntax"},
{0x0020, "Pattern_White_Space"},
{0x06DD, "Prepended_Concatenation_Mark"},
{0x300D, "Quotation_Mark"},
{0x2EF3, "Radical"},
{0x1f1ff, "Regional_Indicator"},
{0x061F, "STerm"}, // Deprecated alias of Sentence_Terminal
{0x061F, "Sentence_Terminal"},
{0x2071, "Soft_Dotted"},
{0x003A, "Terminal_Punctuation"},
{0x9FC3, "Unified_Ideograph"},
{0xFE0F, "Variation_Selector"},
{0x0020, "White_Space"},
{0x221e, "ID_Compat_Math_Start"},
{0x06e3, "Modifier_Combining_Mark"},
{0x2080, "ID_Compat_Math_Continue"},
{0x2ffe, "IDS_Unary_Operator"},
}
func TestCategories(t *testing.T) {
notTested := make(map[string]bool)
for k := range Categories {
notTested[k] = true
}
for _, test := range inCategoryTest {
if _, ok := Categories[test.script]; !ok {
t.Fatal(test.script, "not a known category")
}
if !Is(Categories[test.script], test.rune) {
t.Errorf("IsCategory(%U, %s) = false, want true", test.rune, test.script)
}
delete(notTested, test.script)
}
for k := range notTested {
t.Error("category not tested:", k)
}
}
func TestProperties(t *testing.T) {
notTested := make(map[string]bool)
for k := range Properties {
notTested[k] = true
}
for _, test := range inPropTest {
if _, ok := Properties[test.script]; !ok {
t.Fatal(test.script, "not a known prop")
}
if !Is(Properties[test.script], test.rune) {
t.Errorf("IsCategory(%U, %s) = false, want true", test.rune, test.script)
}
delete(notTested, test.script)
}
for k := range notTested {
t.Error("property not tested:", k)
}
} | go | github | https://github.com/golang/go | src/unicode/script_test.go |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class account_asset_asset(orm.Model):
_inherit = 'account.asset.asset'
def _xls_acquisition_fields(self, cr, uid, context=None):
"""
Update list in custom module to add/drop columns or change order
"""
return [
'account', 'name', 'code', 'date_start', 'asset_value',
'salvage_value',
]
def _xls_active_fields(self, cr, uid, context=None):
"""
Update list in custom module to add/drop columns or change order
"""
return [
'account', 'name', 'code', 'date_start',
'asset_value', 'salvage_value',
'fy_start_value', 'fy_depr', 'fy_end_value',
'fy_end_depr',
'method', 'method_number', 'prorata',
]
def _xls_removal_fields(self, cr, uid, context=None):
"""
Update list in custom module to add/drop columns or change order
"""
return [
'account', 'name', 'code', 'date_remove', 'asset_value',
'salvage_value',
]
def _xls_acquisition_template(self, cr, uid, context=None):
"""
Template updates
"""
return {}
def _xls_active_template(self, cr, uid, context=None):
"""
Template updates
"""
return {}
def _xls_removal_template(self, cr, uid, context=None):
"""
Template updates
"""
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
//go:build !enterprise
package vault
import (
"github.com/hashicorp/vault/helper/namespace"
)
func (ts *TokenStore) baseView(ns *namespace.Namespace) *BarrierView {
return ts.baseBarrierView
}
func (ts *TokenStore) idView(ns *namespace.Namespace) *BarrierView {
return ts.idBarrierView
}
func (ts *TokenStore) accessorView(ns *namespace.Namespace) *BarrierView {
return ts.accessorBarrierView
}
func (ts *TokenStore) parentView(ns *namespace.Namespace) *BarrierView {
return ts.parentBarrierView
}
func (ts *TokenStore) rolesView(ns *namespace.Namespace) *BarrierView {
return ts.rolesBarrierView
} | go | github | https://github.com/hashicorp/vault | vault/token_store_util.go |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^signup/$", views.signup, name="account_signup"),
url(r"^login/$", views.login, name="account_login"),
url(r"^logout/$", views.logout, name="account_logout"),
url(r"^password/change/$", views.password_change,
name="account_change_password"),
url(r"^password/set/$", views.password_set, name="account_set_password"),
url(r"^inactive/$", views.account_inactive, name="account_inactive"),
# E-mail
url(r"^email/$", views.email, name="account_email"),
url(r"^confirm-email/$", views.email_verification_sent,
name="account_email_verification_sent"),
url(r"^confirm-email/(?P<key>[-:\w]+)/$", views.confirm_email,
name="account_confirm_email"),
# password reset
url(r"^password/reset/$", views.password_reset,
name="account_reset_password"),
url(r"^password/reset/done/$", views.password_reset_done,
name="account_reset_password_done"),
url(r"^password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$",
views.password_reset_from_key,
name="account_reset_password_from_key"),
url(r"^password/reset/key/done/$", views.password_reset_from_key_done,
name="account_reset_password_from_key_done"),
] | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals, division, absolute_import
import datetime
from math import ceil
from flask import jsonify, request
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.plugins.api.series import NoResultFound
from flexget.plugins.filter import movie_queue as mq
from flexget.utils import qualities
movie_queue_api = api.namespace('movie_queue', description='Movie Queue operations')
default_error_schema = {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'message': {'type': 'string'}
}
}
default_error_schema = api.schema('default_error_schema', default_error_schema)
empty_response = api.schema('empty', {'type': 'object'})
movie_object = {
'type': 'object',
'properties': {
'added_date': {'type': 'string'},
'is_downloaded': {'type': 'boolean'},
'download_date': {'type': 'string'},
'entry_original_url': {'type': 'string'},
'entry_title': {'type': 'string'},
'entry_url': {'type': 'string'},
'id': {'type': 'integer'},
'imdb_id': {'type': 'string'},
'quality': {'type': 'string'},
'title': {'type': 'string'},
'tmdb_id': {'type': 'string'},
'queue_name': {'type': 'string'}
}
}
movie_object_schema = api.schema('movie_object', movie_object)
movie_queue_schema = {
'type': 'object',
'properties': {
'movies': {
'type': 'array',
'items': movie_object
},
'number_of_movies': {'type': 'integer'},
'total_number_of_pages': {'type': 'integer'},
'page_number': {'type': 'integer'}
}
}
movie_queue_schema = api.schema('list_movie_queue', movie_queue_schema)
movie_queue_parser = api.parser()
movie_queue_parser.add_argument('page', type=int, default=1, help='Page number')
movie_queue_parser.add_argument('max', type=int, default=100, help='Movies per page')
movie_queue_parser.add_argument('queue_name', default='default', help='Filter by movie queue name')
movie_queue_parser.add_argument('is_downloaded', type=inputs.boolean, help='Filter list by movies download status')
movie_queue_parser.add_argument('sort_by', choices=('added', 'is_downloaded', 'id', 'title', 'download_date'),
default='added', help="Sort response by attribute")
movie_queue_parser.add_argument('order', choices=('asc', 'desc'), default='desc', help="Sorting order")
movie_add_input_schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'imdb_id': {'type': 'string', 'pattern': r'tt\d{7}'},
'tmdb_id': {'type': 'integer'},
'quality': {'type': 'string', 'format': 'quality_requirements', 'default': 'any'},
'queue_name': {'type': 'string', 'default': 'default'}
},
'anyOf': [
{'required': ['title']},
{'required': ['imdb_id']},
{'required': ['tmdb_id']}
]
}
movie_add_input_schema = api.schema('movie_add_input_schema', movie_add_input_schema)
movie_edit_input_schema = {
'type': 'object',
'properties': {
'quality': {'type': 'string', 'format': 'quality_requirements'},
'reset_downloaded': {'type': 'boolean', 'default': True}
},
'anyOf': [
{'required': ['quality']},
{'required': ['reset_downloaded']}
]
}
movie_edit_input_schema = api.schema('movie_edit_input_schema', movie_edit_input_schema)
@movie_queue_api.route('/')
class MovieQueueAPI(APIResource):
@api.response(404, 'Page does not exist', model=default_error_schema)
@api.response(code_or_apierror=200, model=movie_queue_schema)
@api.doc(parser=movie_queue_parser, description="Get flexget's queued movies")
def get(self, session=None):
""" List queued movies """
args = movie_queue_parser.parse_args()
page = args['page']
max_results = args['max']
downloaded = args['is_downloaded']
sort_by = args['sort_by']
order = args['order']
queue_name = args['queue_name']
# Handles default if it explicitly called
if order == 'desc':
order = True
else:
order = False
raw_movie_queue = mq.queue_get(session=session, downloaded=downloaded, queue_name=queue_name)
converted_movie_queue = [movie.to_dict() for movie in raw_movie_queue]
sorted_movie_list = sorted(converted_movie_queue,
key=lambda movie: movie[sort_by] if movie[sort_by] else datetime.datetime,
reverse=order)
count = len(sorted_movie_list)
pages = int(ceil(count / float(max_results)))
if page > pages and pages != 0:
return {'status': 'error',
'message': 'page %s does not exist' % page}, 404
start = (page - 1) * max_results
finish = start + max_results
if finish > count:
finish = count
movie_items = []
for movie_number in range(start, finish):
movie_items.append(sorted_movie_list[movie_number])
return jsonify({
'movies': movie_items,
'number_of_movies': count,
'page_number': page,
'total_number_of_pages': pages
})
@api.response(500, 'Movie already in queue', model=default_error_schema)
@api.response(201, 'Movie successfully added', model=movie_object_schema)
@api.validate(movie_add_input_schema)
@api.doc(description="Add a movie to flexget's queued movies")
def post(self, session=None):
""" Add movies to movie queue """
kwargs = request.json
kwargs['quality'] = qualities.Requirements(kwargs.get('quality'))
kwargs['session'] = session
try:
movie = mq.queue_add(**kwargs)
except mq.QueueError as e:
reply = {
'status': 'error',
'message': e.message
}
return reply, 500
reply = jsonify(movie)
reply.status_code = 201
return reply
@api.response(404, 'ID not found', model=default_error_schema)
@movie_queue_api.route('/<id>/')
@api.doc(params={'id': 'ID of Queued Movie'})
class MovieQueueManageAPI(APIResource):
@api.response(200, 'Movie successfully retrieved', movie_object_schema)
@api.doc(description="Get a specific movie")
def get(self, id, session=None):
""" Returns a movie from queue by ID """
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound as e:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return jsonify(movie)
@api.response(200, 'Movie successfully deleted', model=empty_response)
@api.doc(description="Delete a specific movie")
def delete(self, id, session=None):
""" Delete movies from movie queue """
try:
mq.delete_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return {}
@api.response(405, 'Movie not marked as downloaded', model=default_error_schema)
@api.response(200, 'Movie successfully updated', movie_object_schema)
@api.validate(model=movie_edit_input_schema,
description='Values to use when editing existing movie. At least one value should be used')
@api.doc(description="Update a specific movie")
def put(self, id, session=None):
""" Updates movie quality or downloaded state in movie queue """
data = request.json
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
queue_name = movie.get('queue_name')
if data.get('reset_downloaded'):
try:
movie = mq.queue_forget(movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
if e.errno == 1:
reply = {
'status': 'error',
'message': e.message
}
return reply, 405
else:
reply = {
'status': 'error',
'message': e.message
}
return reply, 404
if data.get('quality'):
try:
movie = mq.queue_edit(quality=data['quality'], movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
reply = {'status': 'error',
'message': e.message}
return reply, 404
if not movie:
return {'status': 'error',
'message': 'Not enough parameters to edit movie data'}, 400
return jsonify(movie) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/fsi/ibm,p9-occ.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: IBM FSI-attached On-Chip Controller (OCC)
maintainers:
- Eddie James <eajames@linux.ibm.com>
description:
The POWER processor On-Chip Controller (OCC) helps manage power and
thermals for the system, accessed through the FSI-attached SBEFIFO
from a service processor.
properties:
compatible:
enum:
- ibm,p9-occ
- ibm,p10-occ
hwmon:
type: object
$ref: /schemas/hwmon/ibm,occ-hwmon.yaml
required:
- compatible
additionalProperties: false
examples:
- |
occ {
compatible = "ibm,p9-occ";
hwmon {
compatible = "ibm,p9-occ-hwmon";
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/fsi/ibm,p9-occ.yaml |
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import importlib
import os
import pkgutil
import re
import sys
COMMENT_REQUIREMENTS = (
'RPi.GPIO',
'rpi-rf',
'Adafruit_Python_DHT',
'Adafruit_BBIO',
'fritzconnection',
'pybluez',
'bluepy',
'python-lirc',
'gattlib',
'pyuserinput',
'evdev',
'pycups',
'python-eq3bt',
'avion',
'decora'
)
IGNORE_PACKAGES = (
'homeassistant.components.recorder.models',
)
IGNORE_PIN = ('colorlog>2.1,<3', 'keyring>=9.3,<10.0', 'urllib3')
URL_PIN = ('https://home-assistant.io/developers/code_review_platform/'
'#1-requirements')
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, '__path__'):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + '.'):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
with open('setup.py') as inp:
reqs_raw = re.search(
r'REQUIRES = \[(.*?)\]', inp.read(), re.S).group(1)
return re.findall(r"'(.*?)'", reqs_raw)
def comment_requirement(req):
"""Some requirements don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information and construct the output."""
reqs = {}
errors = []
output = []
for package in sorted(explore_module('homeassistant.components', True) +
explore_module('homeassistant.scripts', True)):
try:
module = importlib.import_module(package)
except ImportError:
if package not in IGNORE_PACKAGES:
errors.append(package)
continue
if not getattr(module, 'REQUIREMENTS', None):
continue
for req in module.REQUIREMENTS:
if req.partition('==')[1] == '' and req not in IGNORE_PIN:
errors.append(
"{}[Please pin requirement {}, see {}]".format(
package, req, URL_PIN))
reqs.setdefault(req, []).append(package)
for key in reqs:
reqs[key] = sorted(reqs[key],
key=lambda name: (len(name.split('.')), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ', '.join(errors))
print("Make sure you import 3rd party libraries inside methods.")
return None
output.append('# Home Assistant core')
output.append('\n')
output.append('\n'.join(core_requirements()))
output.append('\n')
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements,
key=lambda name: (len(name.split('.')), name)):
output.append('\n# {}'.format(req))
if comment_requirement(pkg):
output.append('\n# {}\n'.format(pkg))
else:
output.append('\n{}\n'.format(pkg))
return ''.join(output)
def write_file(data):
"""Write the modules to the requirements_all.txt."""
with open('requirements_all.txt', 'w+') as req_file:
req_file.write(data)
def validate_file(data):
"""Validate if requirements_all.txt is up to date."""
with open('requirements_all.txt', 'r') as req_file:
return data == ''.join(req_file)
def main():
"""Main section of the script."""
if not os.path.isfile('requirements_all.txt'):
print('Run this from HA root dir')
return
data = gather_modules()
if data is None:
sys.exit(1)
if sys.argv[-1] == 'validate':
if validate_file(data):
sys.exit(0)
print("******* ERROR")
print("requirements_all.txt is not up to date")
print("Please run script/gen_requirements_all.py")
sys.exit(1)
write_file(data)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from kombu import Connection
from kombu.tests.case import Case, SkipTest, patch
class test_sqlalchemy(Case):
def setUp(self):
try:
import sqlalchemy # noqa
except ImportError:
raise SkipTest('sqlalchemy not installed')
def test_url_parser(self):
with patch('kombu.transport.sqlalchemy.Channel._open'):
url = 'sqlalchemy+sqlite:///celerydb.sqlite'
Connection(url).connect()
url = 'sqla+sqlite:///celerydb.sqlite'
Connection(url).connect()
# Should prevent regression fixed by f187ccd
url = 'sqlb+sqlite:///celerydb.sqlite'
with self.assertRaises(KeyError):
Connection(url).connect()
def test_simple_queueing(self):
conn = Connection('sqlalchemy+sqlite:///:memory:')
conn.connect()
channel = conn.channel()
self.assertEqual(
channel.queue_cls.__table__.name,
'kombu_queue'
)
self.assertEqual(
channel.message_cls.__table__.name,
'kombu_message'
)
channel._put('celery', 'DATA')
assert channel._get('celery') == 'DATA'
def test_custom_table_names(self):
raise SkipTest('causes global side effect')
conn = Connection('sqlalchemy+sqlite:///:memory:', transport_options={
'queue_tablename': 'my_custom_queue',
'message_tablename': 'my_custom_message'
})
conn.connect()
channel = conn.channel()
self.assertEqual(
channel.queue_cls.__table__.name,
'my_custom_queue'
)
self.assertEqual(
channel.message_cls.__table__.name,
'my_custom_message'
)
channel._put('celery', 'DATA')
assert channel._get('celery') == 'DATA'
def test_clone(self):
hostname = 'sqlite:///celerydb.sqlite'
x = Connection('+'.join(['sqla', hostname]))
self.assertEqual(x.uri_prefix, 'sqla')
self.assertEqual(x.hostname, hostname)
clone = x.clone()
self.assertEqual(clone.hostname, hostname)
self.assertEqual(clone.uri_prefix, 'sqla') | unknown | codeparrot/codeparrot-clean | ||
# PY-3639
def f(x):
from <error descr="Unresolved reference 'foo'">foo</error> import <error descr="Unresolved reference 'StringIO'">StringIO</error>
return StringIO(x)
def f(x):
try:
from <error descr="Unresolved reference 'foo'">foo</error> import <warning descr="Module 'StringIO' not found">StringIO</warning>
except Exception:
pass
return x
def f(x):
try:
from foo import <warning descr="'StringIO' in the try block with 'except ImportError' should also be defined in the except block">StringIO</warning>
except ImportError:
pass
return StringIO(x)
def f(x):
try:
from lib1 import StringIO
except ImportError:
StringIO = lambda x: x
return StringIO(x)
# PY-3675
try:
import foo as bar
except ImportError:
import <warning descr="Module 'bar' not found">bar</warning>
# PY-3678
def f():
try:
from foo import bar #pass
except ImportError:
import <warning descr="Module 'bar' not found">bar</warning> #fail
finally:
pass
# PY-3869
def f(x):
try:
from foo import bar #pass
except ImportError:
def bar(x):
return x
return bar(x)
# PY-3919
def f(x):
try:
from foo import Bar #pass
except ImportError:
class Bar(object):
pass
return Bar()
# PY-8933: Import unreferenced outside the try block should not be reported
def f(x):
try:
from foo import StringIO
except ImportError:
pass
return None
# PY-8933: Don't report cases where in case of the ImportError block being terminal
def f(x):
try:
from foo import StringIO
except ImportError:
raise
return StringIO(x)
# PY-8933: Import unreferenced outside the try block should not be reported -- global scope
try:
from foo import Unused
except ImportError:
pass
# PY-8933: Import referenced by inner scope should be reported
try:
from foo import <warning descr="'UsedInsideFunction' in the try block with 'except ImportError' should also be defined in the except block">UsedInsideFunction</warning>
except ImportError:
pass
def f(x):
return UsedInsideFunction(x)
# PY-8933: Do not report if imported name declared in parent scope
DeclaredAtFileScope = True
def f(x):
try:
from foo import DeclaredAtFileScope
except ImportError:
pass
return DeclaredAtFileScope(x)
# PY-8203 do not report builtins
def f(x):
try:
from foo import any
except ImportError:
pass
return any([1,2,3]) | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
import subprocess
import traceback
class ProcessHandler:
args = log = None
def __init__(self, args, log):
self.args = args
self.log = log
def start_process(self, cmd):
env = dict(os.environ)
if not "LD_LIBRARY_PATH" in env:
env["LD_LIBRARY_PATH"] = ""
env["LD_LIBRARY_PATH"] += os.pathsep + os.pathsep.join(
self.args.library_paths)
self.log.debug("Starting process with arguments: {0} ".format(' '.join(cmd)))
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
def start_output(self, cmd):
env = dict(os.environ)
if not "LD_LIBRARY_PATH" in env:
env["LD_LIBRARY_PATH"] = ""
env["LD_LIBRARY_PATH"] += os.pathsep + os.pathsep.join(
self.args.library_paths)
self.log.debug("Starting process and waiting with arguments: {0}".format(
' '.join(cmd)))
process_out = sys.stdout
process_err = sys.stderr
fnull = None
# Suppress tool output in quiet mode
if self.args.quiet:
fnull = open(os.devnull, "w")
process_out = fnull
process_err = fnull
try:
p = subprocess.call(cmd, env=env, stdout=process_out, stderr=process_err)
except KeyboardInterrupt:
print
self.log.critical("Tool execution cancelled; exiting")
if fnull:
fnull.close()
sys.exit(1)
except:
raise traceback.print_exc()
finally:
if fnull:
fnull.close()
return p | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <ATen/Tensor.h>
namespace at::native::sparse::impl::mkl {
void addmm_out_sparse_csr(
const Tensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
const Tensor& result);
void addmv_out_sparse_csr(
const Tensor& mat,
const Tensor& vec,
const Scalar& beta,
const Scalar& alpha,
const Tensor& result);
void add_out_sparse_csr(
const Tensor& mat1,
const Tensor& mat2,
const Scalar& alpha,
const Tensor& result);
void triangular_solve_out_sparse_csr(
const Tensor& A,
const Tensor& B,
const Tensor& X,
bool upper,
bool transpose,
bool unitriangular);
} // namespace at | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/mkl/SparseBlasImpl.h |
"""
Sane parameters for stats.distributions.
"""
distcont = [
['alpha', (3.5704770516650459,)],
['anglit', ()],
['arcsine', ()],
['beta', (2.3098496451481823, 0.62687954300963677)],
['betaprime', (5, 6)],
['bradford', (0.29891359763170633,)],
['burr', (10.5, 4.3)],
['burr12', (10, 4)],
['cauchy', ()],
['chi', (78,)],
['chi2', (55,)],
['cosine', ()],
['dgamma', (1.1023326088288166,)],
['dweibull', (2.0685080649914673,)],
['erlang', (10,)],
['expon', ()],
['exponnorm', (1.5,)],
['exponpow', (2.697119160358469,)],
['exponweib', (2.8923945291034436, 1.9505288745913174)],
['f', (29, 18)],
['fatiguelife', (29,)], # correction numargs = 1
['fisk', (3.0857548622253179,)],
['foldcauchy', (4.7164673455831894,)],
['foldnorm', (1.9521253373555869,)],
['frechet_l', (3.6279911255583239,)],
['frechet_r', (1.8928171603534227,)],
['gamma', (1.9932305483800778,)],
['gausshyper', (13.763771604130699, 3.1189636648681431,
2.5145980350183019, 5.1811649903971615)], # veryslow
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
['genextreme', (-0.1,)],
['gengamma', (4.4162385429431925, 3.1193091679242761)],
['gengamma', (4.4162385429431925, -3.1193091679242761)],
['genhalflogistic', (0.77274727809929322,)],
['genlogistic', (0.41192440799679475,)],
['gennorm', (1.2988442399460265,)],
['halfgennorm', (0.6748054997000371,)],
['genpareto', (0.1,)], # use case with finite moments
['gilbrat', ()],
['gompertz', (0.94743713075105251,)],
['gumbel_l', ()],
['gumbel_r', ()],
['halfcauchy', ()],
['halflogistic', ()],
['halfnorm', ()],
['hypsecant', ()],
['invgamma', (4.0668996136993067,)],
['invgauss', (0.14546264555347513,)],
['invweibull', (10.58,)],
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
['kappa4', (0.0, 0.0)],
['kappa4', (-0.1, 0.1)],
['kappa4', (0.0, 0.1)],
['kappa4', (0.1, 0.0)],
['kappa3', (1.0,)],
['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956
['kstwobign', ()],
['laplace', ()],
['levy', ()],
['levy_l', ()],
['levy_stable', (0.35667405469844993,
-0.67450531578494011)], # NotImplementedError
# rvs not tested
['loggamma', (0.41411931826052117,)],
['logistic', ()],
['loglaplace', (3.2505926592051435,)],
['lognorm', (0.95368226960575331,)],
['lomax', (1.8771398388773268,)],
['maxwell', ()],
['mielke', (10.4, 3.6)],
['nakagami', (4.9673794866666237,)],
['ncf', (27, 27, 0.41578441799226107)],
['nct', (14, 0.24045031331198066)],
['ncx2', (21, 1.0560465975116415)],
['norm', ()],
['pareto', (2.621716532144454,)],
['pearson3', (0.1,)],
['powerlaw', (1.6591133289905851,)],
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
['powernorm', (4.4453652254590779,)],
['rayleigh', ()],
['rdist', (0.9,)], # feels also slow
['recipinvgauss', (0.63004267809369119,)],
['reciprocal', (0.0062309367010521255, 1.0062309367010522)],
['rice', (0.7749725210111873,)],
['semicircular', ()],
['skewnorm', (4.0,)],
['t', (2.7433514990818093,)],
['trapz', (0.2, 0.8)],
['triang', (0.15785029824528218,)],
['truncexpon', (4.6907725456810478,)],
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
['truncnorm', (0.1, 2.)],
['tukeylambda', (3.1321477856738267,)],
['uniform', ()],
['vonmises', (3.9939042581071398,)],
['vonmises_line', (3.9939042581071398,)],
['wald', ()],
['weibull_max', (2.8687961709100187,)],
['weibull_min', (1.7866166930421596,)],
['wrapcauchy', (0.031071279018614728,)]]
distdiscrete = [
['bernoulli', (0.3,)],
['binom', (5, 0.4)],
['boltzmann', (1.4, 19)],
['dlaplace', (0.8,)], # 0.5
['geom', (0.5,)],
['hypergeom', (30, 12, 6)],
['hypergeom', (21, 3, 12)], # numpy.random (3,18,12) numpy ticket:921
['hypergeom', (21, 18, 11)], # numpy.random (18,3,11) numpy ticket:921
['logser', (0.6,)], # reenabled, numpy ticket:921
['nbinom', (5, 0.5)],
['nbinom', (0.4, 0.4)], # from tickets: 583
['planck', (0.51,)], # 4.1
['poisson', (0.6,)],
['randint', (7, 31)],
['skellam', (15, 8)],
['zipf', (6.5,)]
] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import unittest
from tasty.tastyot import TastyOT
from multiprocessing import Process
from tasty.osock import *
from time import sleep
from tasty.types import Party
from tasty import utils, config, state
from tasty.protocols.otprotocols import paillierot
from tasty.protocols.otprotocols import PaillierOT, ECNaorPinkasOT
import socket, atexit
from gmpy import mpz
from itertools import product
import time
#TODO: @Immo: please document how OT tests work
class TastyOTTestCase(unittest.TestCase):
def setUp(self):
state.config = config.create_configuration(security_level="short", asymmetric_security_parameter=1024, symmetric_security_parameter=80, ot_type = "EC", host="::1", port=8000, protocol_dir="docs/millionaires_problem/")
state.config.ot_chain = [PaillierOT]
def test_tastyot(self):
""" testing the global TastyOT """
# state.config.ot_chain = [paillierot.PaillierOT]
n = 128 # must be even
self.ot = t = OTTest(n)
x = tuple(utils.get_random(0,1,n/2))
y = tuple(utils.get_random(0,1,n/2))
xc = [tuple(utils.get_random(0,2**80-1,2)) for i in xrange(n/2)]
yc = [tuple(utils.get_random(0,2**80-1,2)) for i in xrange(n/2)]
resultx = tuple(map(lambda x: mpz(x[0][x[1]]), zip(xc, x)))
resulty = tuple(map(lambda x: mpz(x[0][x[1]]), zip(yc, y)))
res = t.next_ots(x, xc)[1]
self.assertEqual(res, resultx)
res2 = t.next_ots(y, yc)[1]
self.assertEqual(res2, resulty)
# self.failUnlessRaises(OverflowError, t.next_ots, t, ((1,),(5,7)))
def test_ot_protocol_performance(self):
""" testing available OT protocols """
# for security_level, ot_type in product(("short","medium","long"), ("Paillier","EC_c","EC")):
for security_level, ot_type in product(("short","medium","long"), ("EC_c","EC")):
print security_level, ot_type,
start_time = time.clock()
#protocols = [ECNaorPinkasOT.NP_EC_OT_secp192r1, ECNaorPinkasOT.NP_EC_OT_secp192r1_c, \
# ECNaorPinkasOT.NP_EC_OT_secp224r1, ECNaorPinkasOT.NP_EC_OT_secp224r1_c, \
# ECNaorPinkasOT.NP_EC_OT_secp256r1, ECNaorPinkasOT.NP_EC_OT_secp256r1_c]
#protocols = state.config.ot_chain
n = state.config.symmetric_security_parameter
# for prot in protocols:
# print prot.__name__
# state.config.ot_chain = [prot]
self.ot = t = OTTest(n)
x = tuple(utils.get_random(0,1,n/2))
y = tuple(utils.get_random(0,1,n/2))
xc = [tuple(utils.get_random(0,2**n-1,2)) for i in xrange(n/2)] # n-bit messages
yc = [tuple(utils.get_random(0,2**n-1,2)) for i in xrange(n/2)] # n-bit messages
resultx = tuple(map(lambda x: mpz(x[0][x[1]]), zip(xc, x)))
resulty = tuple(map(lambda x: mpz(x[0][x[1]]), zip(yc, y)))
res = t.next_ots(x, xc)[1]
self.assertEqual(res, resultx)
res2 = t.next_ots(y, yc)[1]
self.assertEqual(res2, resulty)
print "%fs" % (time.clock()-start_time)
class OTTest(object):
#TODO: @Immo: please document how OTTest works
def __init__(self, num):
self.num = num
p = Process(target=OTTest.client,
args=(self, num))
p.start()
self.init_server()
atexit.register(self.__del__)
def next_ots(self, choices, transfer):
# debug ("starting online phase")
self.csock.sendobj("online")
self.csock.sendobj(choices)
sres = self.server_online(transfer)
cres = self.csock.recvobj()
return (sres, cres)
def init_client(self):
sleep(.1) #give the server time to set up
sock = ClientObjectSocket(host="::1", port=8000)
sleep(.1)
self.csock = ClientObjectSocket(host="::1", port=8001)
self.party = Party(role=Party.CLIENT, sock=sock)
def client(self, num):
self.init_client()
self.ot = TastyOT(self.party, num)
while True:
next = self.csock.recvobj()
# debug ("executing command %s"%next)
if next == "online":
self.client_online(self.csock.recvobj())
elif next == "quit":
exit(0)
else:
raise NotImplementedError(next)
def init_server(self):
sock = ServerObjectSocket(host="::1", port=8000).accept()[0]
self.csock = ServerObjectSocket(host="::1", port=8001).accept()[0]
self.party = party = Party(role=Party.SERVER, sock=sock)
self.ot = TastyOT(self.party, self.num)
def client_online(self, arg):
self.csock.sendobj(self.ot.next_ots(arg))
def server_online(self, arg):
self.ot.next_ots(arg)
def __del__(self):
try:
self.csock.sendobj("quit")
self.csock.close()
except socket.error: #server side has already exited
pass
def suite():
suite = unittest.TestSuite()
# suite.addTest(TastyOTTestCase("test_tastyot"))
suite.addTest(TastyOTTestCase("test_ot_protocol_performance"))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | unknown | codeparrot/codeparrot-clean | ||
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef LINUX_KALLSYMS_SELFTEST_H_
#define LINUX_KALLSYMS_SELFTEST_H_
#include <linux/types.h>
extern int kallsyms_test_var_bss;
extern int kallsyms_test_var_data;
extern int kallsyms_test_func(void);
extern int kallsyms_test_func_weak(void);
#endif // LINUX_KALLSYMS_SELFTEST_H_ | c | github | https://github.com/torvalds/linux | kernel/kallsyms_selftest.h |
#ifndef Py_INTERNAL_FLOATOBJECT_H
#define Py_INTERNAL_FLOATOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_unicodeobject.h" // _PyUnicodeWriter
/* runtime lifecycle */
extern void _PyFloat_InitState(PyInterpreterState *);
extern PyStatus _PyFloat_InitTypes(PyInterpreterState *);
extern void _PyFloat_FiniType(PyInterpreterState *);
PyAPI_FUNC(void) _PyFloat_ExactDealloc(PyObject *op);
extern void _PyFloat_DebugMallocStats(FILE* out);
/* Format the object based on the format_spec, as defined in PEP 3101
(Advanced String Formatting). */
extern int _PyFloat_FormatAdvancedWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
PyObject *format_spec,
Py_ssize_t start,
Py_ssize_t end);
extern PyObject* _Py_string_to_number_with_underscores(
const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg,
PyObject *(*innerfunc)(const char *, Py_ssize_t, void *));
extern double _Py_parse_inf_or_nan(const char *p, char **endptr);
extern int _Py_convert_int_to_double(PyObject **v, double *dbl);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FLOATOBJECT_H */ | c | github | https://github.com/python/cpython | Include/internal/pycore_floatobject.h |
# mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 1.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0.1'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps as original_wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
f.__wrapped__ = original
return f
return inner
else:
if sys.version_info[:2] >= (3, 3):
wraps = original_wraps
else:
def wraps(func):
def inner(f):
f = original_wraps(func)(f)
f.__wrapped__ = func
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2015_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.OperationListResult"
"""Lists all of the available operations from Microsoft.Insights provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationListResult, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2015_04_01.models.OperationListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OperationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/providers/microsoft.insights/operations'} # type: ignore | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.conf import settings
from django.utils.unittest import skipUnless
from .base import SitemapTestsBase
class FlatpagesSitemapTests(SitemapTestsBase):
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS,
"django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url='/public/',
title='Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url='/private/',
title='Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>%s%s</loc>' % (self.base_url, public.url))
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>%s%s</loc>' % (self.base_url, private.url)) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Broadcasting\Broadcasters;
use Psr\Log\LoggerInterface;
class LogBroadcaster extends Broadcaster
{
/**
* The logger implementation.
*
* @var \Psr\Log\LoggerInterface
*/
protected $logger;
/**
* Create a new broadcaster instance.
*
* @param \Psr\Log\LoggerInterface $logger
*/
public function __construct(LoggerInterface $logger)
{
$this->logger = $logger;
}
/**
* {@inheritdoc}
*/
public function auth($request)
{
//
}
/**
* {@inheritdoc}
*/
public function validAuthenticationResponse($request, $result)
{
//
}
/**
* {@inheritdoc}
*/
public function broadcast(array $channels, $event, array $payload = [])
{
$channels = implode(', ', $this->formatChannels($channels));
$payload = json_encode($payload, JSON_PRETTY_PRINT);
$this->logger->info('Broadcasting ['.$event.'] on channels ['.$channels.'] with payload:'.PHP_EOL.$payload);
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Broadcasting/Broadcasters/LogBroadcaster.php |
import logging
import sys
from pip._vendor.contextlib2 import suppress
from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import Version
from pip._internal.exceptions import HashError, MetadataInconsistent
from pip._internal.network.lazy_wheel import (
HTTPRangeRequestUnsupported,
dist_from_wheel_url,
)
from pip._internal.req.constructors import (
install_req_from_editable,
install_req_from_line,
)
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import dist_is_editable, normalize_version_info
from pip._internal.utils.packaging import get_requires_python
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Candidate, format_name
if MYPY_CHECK_RUNNING:
from typing import Any, FrozenSet, Iterable, Optional, Tuple, Union
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.pkg_resources import Distribution
from pip._internal.distributions import AbstractDistribution
from pip._internal.models.link import Link
from .base import Requirement
from .factory import Factory
BaseCandidate = Union[
"AlreadyInstalledCandidate",
"EditableCandidate",
"LinkCandidate",
]
logger = logging.getLogger(__name__)
def make_install_req_from_link(link, template):
# type: (Link, InstallRequirement) -> InstallRequirement
assert not template.editable, "template is editable"
if template.req:
line = str(template.req)
else:
line = link.url
ireq = install_req_from_line(
line,
user_supplied=template.user_supplied,
comes_from=template.comes_from,
use_pep517=template.use_pep517,
isolated=template.isolated,
constraint=template.constraint,
options=dict(
install_options=template.install_options,
global_options=template.global_options,
hashes=template.hash_options
),
)
ireq.original_link = template.original_link
ireq.link = link
return ireq
def make_install_req_from_editable(link, template):
# type: (Link, InstallRequirement) -> InstallRequirement
assert template.editable, "template not editable"
return install_req_from_editable(
link.url,
user_supplied=template.user_supplied,
comes_from=template.comes_from,
use_pep517=template.use_pep517,
isolated=template.isolated,
constraint=template.constraint,
options=dict(
install_options=template.install_options,
global_options=template.global_options,
hashes=template.hash_options
),
)
def make_install_req_from_dist(dist, template):
# type: (Distribution, InstallRequirement) -> InstallRequirement
project_name = canonicalize_name(dist.project_name)
if template.req:
line = str(template.req)
elif template.link:
line = "{} @ {}".format(project_name, template.link.url)
else:
line = "{}=={}".format(project_name, dist.parsed_version)
ireq = install_req_from_line(
line,
user_supplied=template.user_supplied,
comes_from=template.comes_from,
use_pep517=template.use_pep517,
isolated=template.isolated,
constraint=template.constraint,
options=dict(
install_options=template.install_options,
global_options=template.global_options,
hashes=template.hash_options
),
)
ireq.satisfied_by = dist
return ireq
class _InstallRequirementBackedCandidate(Candidate):
"""A candidate backed by an ``InstallRequirement``.
This represents a package request with the target not being already
in the environment, and needs to be fetched and installed. The backing
``InstallRequirement`` is responsible for most of the leg work; this
class exposes appropriate information to the resolver.
:param link: The link passed to the ``InstallRequirement``. The backing
``InstallRequirement`` will use this link to fetch the distribution.
:param source_link: The link this candidate "originates" from. This is
different from ``link`` when the link is found in the wheel cache.
``link`` would point to the wheel cache, while this points to the
found remote link (e.g. from pypi.org).
"""
is_installed = False
def __init__(
self,
link, # type: Link
source_link, # type: Link
ireq, # type: InstallRequirement
factory, # type: Factory
name=None, # type: Optional[str]
version=None, # type: Optional[_BaseVersion]
):
# type: (...) -> None
self._link = link
self._source_link = source_link
self._factory = factory
self._ireq = ireq
self._name = name
self._version = version
self._dist = None # type: Optional[Distribution]
self._prepared = False
def __repr__(self):
# type: () -> str
return "{class_name}({link!r})".format(
class_name=self.__class__.__name__,
link=str(self._link),
)
def __hash__(self):
# type: () -> int
return hash((self.__class__, self._link))
def __eq__(self, other):
# type: (Any) -> bool
if isinstance(other, self.__class__):
return self._link == other._link
return False
# Needed for Python 2, which does not implement this by default
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
@property
def source_link(self):
# type: () -> Optional[Link]
return self._source_link
@property
def name(self):
# type: () -> str
"""The normalised name of the project the candidate refers to"""
if self._name is None:
self._name = canonicalize_name(self.dist.project_name)
return self._name
@property
def version(self):
# type: () -> _BaseVersion
if self._version is None:
self._version = self.dist.parsed_version
return self._version
def format_for_error(self):
# type: () -> str
return "{} {} (from {})".format(
self.name,
self.version,
self._link.file_path if self._link.is_file else self._link
)
def _prepare_abstract_distribution(self):
# type: () -> AbstractDistribution
raise NotImplementedError("Override in subclass")
def _check_metadata_consistency(self):
# type: () -> None
"""Check for consistency of project name and version of dist."""
# TODO: (Longer term) Rather than abort, reject this candidate
# and backtrack. This would need resolvelib support.
dist = self._dist # type: Distribution
name = canonicalize_name(dist.project_name)
if self._name is not None and self._name != name:
raise MetadataInconsistent(self._ireq, "name", dist.project_name)
version = dist.parsed_version
if self._version is not None and self._version != version:
raise MetadataInconsistent(self._ireq, "version", dist.version)
def _prepare(self):
# type: () -> None
if self._prepared:
return
try:
abstract_dist = self._prepare_abstract_distribution()
except HashError as e:
e.req = self._ireq
raise
self._dist = abstract_dist.get_pkg_resources_distribution()
assert self._dist is not None, "Distribution already installed"
self._check_metadata_consistency()
self._prepared = True
def _fetch_metadata(self):
# type: () -> None
"""Fetch metadata, using lazy wheel if possible."""
preparer = self._factory.preparer
use_lazy_wheel = self._factory.use_lazy_wheel
remote_wheel = self._link.is_wheel and not self._link.is_file
if use_lazy_wheel and remote_wheel and not preparer.require_hashes:
assert self._name is not None
logger.info('Collecting %s', self._ireq.req or self._ireq)
# If HTTPRangeRequestUnsupported is raised, fallback silently.
with indent_log(), suppress(HTTPRangeRequestUnsupported):
logger.info(
'Obtaining dependency information from %s %s',
self._name, self._version,
)
url = self._link.url.split('#', 1)[0]
session = preparer.downloader._session
self._dist = dist_from_wheel_url(self._name, url, session)
self._check_metadata_consistency()
if self._dist is None:
self._prepare()
@property
def dist(self):
# type: () -> Distribution
if self._dist is None:
self._fetch_metadata()
return self._dist
def _get_requires_python_specifier(self):
# type: () -> Optional[SpecifierSet]
requires_python = get_requires_python(self.dist)
if requires_python is None:
return None
try:
spec = SpecifierSet(requires_python)
except InvalidSpecifier as e:
logger.warning(
"Package %r has an invalid Requires-Python: %s", self.name, e,
)
return None
return spec
def iter_dependencies(self, with_requires):
# type: (bool) -> Iterable[Optional[Requirement]]
if not with_requires:
return
for r in self.dist.requires():
yield self._factory.make_requirement_from_spec(str(r), self._ireq)
python_dep = self._factory.make_requires_python_requirement(
self._get_requires_python_specifier(),
)
if python_dep:
yield python_dep
def get_install_requirement(self):
# type: () -> Optional[InstallRequirement]
self._prepare()
return self._ireq
class LinkCandidate(_InstallRequirementBackedCandidate):
is_editable = False
def __init__(
self,
link, # type: Link
template, # type: InstallRequirement
factory, # type: Factory
name=None, # type: Optional[str]
version=None, # type: Optional[_BaseVersion]
):
# type: (...) -> None
source_link = link
cache_entry = factory.get_wheel_cache_entry(link, name)
if cache_entry is not None:
logger.debug("Using cached wheel link: %s", cache_entry.link)
link = cache_entry.link
ireq = make_install_req_from_link(link, template)
if (cache_entry is not None and
cache_entry.persistent and
template.link is template.original_link):
ireq.original_link_is_in_wheel_cache = True
super(LinkCandidate, self).__init__(
link=link,
source_link=source_link,
ireq=ireq,
factory=factory,
name=name,
version=version,
)
def _prepare_abstract_distribution(self):
# type: () -> AbstractDistribution
return self._factory.preparer.prepare_linked_requirement(
self._ireq, parallel_builds=True,
)
class EditableCandidate(_InstallRequirementBackedCandidate):
is_editable = True
def __init__(
self,
link, # type: Link
template, # type: InstallRequirement
factory, # type: Factory
name=None, # type: Optional[str]
version=None, # type: Optional[_BaseVersion]
):
# type: (...) -> None
super(EditableCandidate, self).__init__(
link=link,
source_link=link,
ireq=make_install_req_from_editable(link, template),
factory=factory,
name=name,
version=version,
)
def _prepare_abstract_distribution(self):
# type: () -> AbstractDistribution
return self._factory.preparer.prepare_editable_requirement(self._ireq)
class AlreadyInstalledCandidate(Candidate):
is_installed = True
source_link = None
def __init__(
self,
dist, # type: Distribution
template, # type: InstallRequirement
factory, # type: Factory
):
# type: (...) -> None
self.dist = dist
self._ireq = make_install_req_from_dist(dist, template)
self._factory = factory
# This is just logging some messages, so we can do it eagerly.
# The returned dist would be exactly the same as self.dist because we
# set satisfied_by in make_install_req_from_dist.
# TODO: Supply reason based on force_reinstall and upgrade_strategy.
skip_reason = "already satisfied"
factory.preparer.prepare_installed_requirement(self._ireq, skip_reason)
def __repr__(self):
# type: () -> str
return "{class_name}({distribution!r})".format(
class_name=self.__class__.__name__,
distribution=self.dist,
)
def __hash__(self):
# type: () -> int
return hash((self.__class__, self.name, self.version))
def __eq__(self, other):
# type: (Any) -> bool
if isinstance(other, self.__class__):
return self.name == other.name and self.version == other.version
return False
# Needed for Python 2, which does not implement this by default
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
@property
def name(self):
# type: () -> str
return canonicalize_name(self.dist.project_name)
@property
def version(self):
# type: () -> _BaseVersion
return self.dist.parsed_version
@property
def is_editable(self):
# type: () -> bool
return dist_is_editable(self.dist)
def format_for_error(self):
# type: () -> str
return "{} {} (Installed)".format(self.name, self.version)
def iter_dependencies(self, with_requires):
# type: (bool) -> Iterable[Optional[Requirement]]
if not with_requires:
return
for r in self.dist.requires():
yield self._factory.make_requirement_from_spec(str(r), self._ireq)
def get_install_requirement(self):
# type: () -> Optional[InstallRequirement]
return None
class ExtrasCandidate(Candidate):
"""A candidate that has 'extras', indicating additional dependencies.
Requirements can be for a project with dependencies, something like
foo[extra]. The extras don't affect the project/version being installed
directly, but indicate that we need additional dependencies. We model that
by having an artificial ExtrasCandidate that wraps the "base" candidate.
The ExtrasCandidate differs from the base in the following ways:
1. It has a unique name, of the form foo[extra]. This causes the resolver
to treat it as a separate node in the dependency graph.
2. When we're getting the candidate's dependencies,
a) We specify that we want the extra dependencies as well.
b) We add a dependency on the base candidate.
See below for why this is needed.
3. We return None for the underlying InstallRequirement, as the base
candidate will provide it, and we don't want to end up with duplicates.
The dependency on the base candidate is needed so that the resolver can't
decide that it should recommend foo[extra1] version 1.0 and foo[extra2]
version 2.0. Having those candidates depend on foo=1.0 and foo=2.0
respectively forces the resolver to recognise that this is a conflict.
"""
def __init__(
self,
base, # type: BaseCandidate
extras, # type: FrozenSet[str]
):
# type: (...) -> None
self.base = base
self.extras = extras
def __repr__(self):
# type: () -> str
return "{class_name}(base={base!r}, extras={extras!r})".format(
class_name=self.__class__.__name__,
base=self.base,
extras=self.extras,
)
def __hash__(self):
# type: () -> int
return hash((self.base, self.extras))
def __eq__(self, other):
# type: (Any) -> bool
if isinstance(other, self.__class__):
return self.base == other.base and self.extras == other.extras
return False
# Needed for Python 2, which does not implement this by default
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
@property
def name(self):
# type: () -> str
"""The normalised name of the project the candidate refers to"""
return format_name(self.base.name, self.extras)
@property
def version(self):
# type: () -> _BaseVersion
return self.base.version
def format_for_error(self):
# type: () -> str
return "{} [{}]".format(
self.base.format_for_error(),
", ".join(sorted(self.extras))
)
@property
def is_installed(self):
# type: () -> bool
return self.base.is_installed
@property
def is_editable(self):
# type: () -> bool
return self.base.is_editable
@property
def source_link(self):
# type: () -> Optional[Link]
return self.base.source_link
def iter_dependencies(self, with_requires):
# type: (bool) -> Iterable[Optional[Requirement]]
factory = self.base._factory
# Add a dependency on the exact base
# (See note 2b in the class docstring)
yield factory.make_requirement_from_candidate(self.base)
if not with_requires:
return
# The user may have specified extras that the candidate doesn't
# support. We ignore any unsupported extras here.
valid_extras = self.extras.intersection(self.base.dist.extras)
invalid_extras = self.extras.difference(self.base.dist.extras)
for extra in sorted(invalid_extras):
logger.warning(
"%s %s does not provide the extra '%s'",
self.base.name,
self.version,
extra
)
for r in self.base.dist.requires(valid_extras):
requirement = factory.make_requirement_from_spec(
str(r), self.base._ireq, valid_extras,
)
if requirement:
yield requirement
def get_install_requirement(self):
# type: () -> Optional[InstallRequirement]
# We don't return anything here, because we always
# depend on the base candidate, and we'll get the
# install requirement from that.
return None
class RequiresPythonCandidate(Candidate):
is_installed = False
source_link = None
def __init__(self, py_version_info):
# type: (Optional[Tuple[int, ...]]) -> None
if py_version_info is not None:
version_info = normalize_version_info(py_version_info)
else:
version_info = sys.version_info[:3]
self._version = Version(".".join(str(c) for c in version_info))
# We don't need to implement __eq__() and __ne__() since there is always
# only one RequiresPythonCandidate in a resolution, i.e. the host Python.
# The built-in object.__eq__() and object.__ne__() do exactly what we want.
@property
def name(self):
# type: () -> str
# Avoid conflicting with the PyPI package "Python".
return "<Python from Requires-Python>"
@property
def version(self):
# type: () -> _BaseVersion
return self._version
def format_for_error(self):
# type: () -> str
return "Python {}".format(self.version)
def iter_dependencies(self, with_requires):
# type: (bool) -> Iterable[Optional[Requirement]]
return ()
def get_install_requirement(self):
# type: () -> Optional[InstallRequirement]
return None | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class PixelTestsPage(page_module.Page):
def __init__(self, url, name, test_rect, revision, page_set):
super(PixelTestsPage, self).__init__(url=url, page_set=page_set, name=name)
self.user_agent_type = 'desktop'
self.test_rect = test_rect
self.revision = revision
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'domAutomationController._finished', timeout_in_seconds=30)
class PixelTestsPageSet(page_set_module.PageSet):
""" Some basic test cases for GPU. """
def __init__(self):
super(PixelTestsPageSet, self).__init__(
user_agent_type='desktop')
self.AddPage(PixelTestsPage(
url='file://../../data/gpu/pixel_canvas2d.html',
name='Pixel.Canvas2DRedBox',
test_rect=[0, 0, 300, 300],
revision=4,
page_set=self))
self.AddPage(PixelTestsPage(
url='file://../../data/gpu/pixel_css3d.html',
name='Pixel.CSS3DBlueBox',
test_rect=[0, 0, 300, 300],
revision=9,
page_set=self))
self.AddPage(PixelTestsPage(
url='file://../../data/gpu/pixel_webgl.html',
name='Pixel.WebGLGreenTriangle',
test_rect=[0, 0, 300, 300],
revision=8,
page_set=self)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates company comments.
To determine which companies exist, run get_all_companies.py.
Tags: CompanyService.updateCompanies
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the company to get.
COMPANY_ID = 'INSERT_COMPANY_ID_HERE'
def main(client, company_id):
# Initialize appropriate service.
company_service = client.GetService('CompanyService', version='v201302')
# Get company.
company = company_service.GetCompany(company_id)[0]
if company:
company['comment'] += ' Updated.'
# Update the companies on the server.
companies = company_service.UpdateCompanies([company])
# Display results.
if companies:
for company in companies:
print (('Company with ID \'%s\', name \'%s\', and comment \'%s\''
'was updated.')
% (company['id'], company['name'], company['comment']))
else:
print 'No companies were updated.'
else:
print 'No companies found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, COMPANY_ID) | unknown | codeparrot/codeparrot-clean | ||
import sys
from boto.compat import json
from boto.exception import BotoServerError
def simple(e):
code = e.code
if code.endswith('Exception'):
code = code.rstrip('Exception')
try:
# Dynamically get the error class.
simple_e = getattr(sys.modules[__name__], code)(e)
except AttributeError:
# Return original exception on failure.
return e
return simple_e
class SimpleException(BotoServerError):
def __init__(self, e):
super(SimpleException, self).__init__(e.status, e.reason, e.body)
self.error_message = self.message
def __repr__(self):
return self.__class__.__name__ + ': ' + self.error_message
def __str__(self):
return self.__class__.__name__ + ': ' + self.error_message
class ValidationError(SimpleException): pass
# Common beanstalk exceptions.
class IncompleteSignature(SimpleException): pass
class InternalFailure(SimpleException): pass
class InvalidAction(SimpleException): pass
class InvalidClientTokenId(SimpleException): pass
class InvalidParameterCombination(SimpleException): pass
class InvalidParameterValue(SimpleException): pass
class InvalidQueryParameter(SimpleException): pass
class MalformedQueryString(SimpleException): pass
class MissingAction(SimpleException): pass
class MissingAuthenticationToken(SimpleException): pass
class MissingParameter(SimpleException): pass
class OptInRequired(SimpleException): pass
class RequestExpired(SimpleException): pass
class ServiceUnavailable(SimpleException): pass
class Throttling(SimpleException): pass
# Action specific exceptions.
class TooManyApplications(SimpleException): pass
class InsufficientPrivileges(SimpleException): pass
class S3LocationNotInServiceRegion(SimpleException): pass
class TooManyApplicationVersions(SimpleException): pass
class TooManyConfigurationTemplates(SimpleException): pass
class TooManyEnvironments(SimpleException): pass
class S3SubscriptionRequired(SimpleException): pass
class TooManyBuckets(SimpleException): pass
class OperationInProgress(SimpleException): pass
class SourceBundleDeletion(SimpleException): pass | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from lxml import etree
from oslo_utils import timeutils
import webob
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder import volume
PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9'
def fake_volume_get(*args, **kwargs):
return {
'id': 'fake',
'host': 'host001',
'status': 'available',
'size': 5,
'availability_zone': 'somewhere',
'created_at': timeutils.utcnow(),
'attach_status': None,
'display_name': 'anothervolume',
'display_description': 'Just another volume!',
'volume_type_id': None,
'snapshot_id': None,
'project_id': PROJECT_ID,
'migration_status': None,
'_name_id': 'fake2',
}
def fake_volume_get_all(*args, **kwargs):
return [fake_volume_get()]
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeTenantAttributeTest(test.TestCase):
def setUp(self):
super(VolumeTenantAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertEqual(PROJECT_ID, vol['os-vol-tenant-attr:tenant_id'])
def test_get_volume_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol)
def test_list_detail_volumes_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertEqual(PROJECT_ID, vol[0]['os-vol-tenant-attr:tenant_id'])
def test_list_detail_volumes_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0])
def test_list_simple_volumes_no_tenant_id(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0])
def test_get_volume_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = etree.XML(res.body)
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v2}tenant_id')
self.assertEqual(PROJECT_ID, vol.get(tenant_key))
def test_list_volumes_detail_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v2}tenant_id')
self.assertEqual(PROJECT_ID, vol.get(tenant_key)) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
# Copyright IBM Corp. 2016, 2025
# SPDX-License-Identifier: BUSL-1.1
set -e
# All of these environment variables are required or an error will be returned.
[ "${GITHUB_TOKEN:?}" ]
[ "${PR_NUMBER:?}" ]
[ "${REPO:?}" ]
[ "${RUN_ID:?}" ]
# list of build jobs
[ "${ARTIFACTS:?}" ]
[ "${TEST:?}" ]
[ "${TEST_CONTAINERS:?}" ]
[ "${UI:?}" ]
# Build jobs
jobs=("artifacts:$ARTIFACTS" "test:$TEST" "test-containers:$TEST_CONTAINERS" "ui:$UI")
# Sometimes failed jobs can have a result of "cancelled". Handle both.
failed_jobs=()
for job in "${jobs[@]}";do
if [[ "$job" == *"failure"* || "$job" == *"cancelled"* ]]; then
failed_jobs+=("$job")
fi
done
# Create a comment body to set on the pull request which reports failed jobs with a url to the
# failed workflow.
if [ ${#failed_jobs[@]} -eq 0 ]; then
new_body="Build Results:
All builds succeeded! :white_check_mark:"
else
new_body="Build Results:
Build failed for these jobs: ${failed_jobs[*]}. Please refer to this workflow to learn more: https://github.com/hashicorp/vault/actions/runs/$RUN_ID"
fi
source ./.github/scripts/gh-comment.sh
update_or_create_comment "$REPO" "$PR_NUMBER" "Build Results:" "$new_body" | unknown | github | https://github.com/hashicorp/vault | .github/scripts/report-build-status.sh |
// Copyright 2023 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package backup
import (
"context"
"fmt"
"reflect"
"slices"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/backup/backuppb"
"github.com/cockroachdb/cockroach/pkg/backup/backuptestutils"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/exprutil"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/testutils/jobutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestRestoreResolveOptionsForJobDescription tests that
// resolveOptionsForRestoreJobDescription handles every field in the
// RestoreOptions struct.
func TestRestoreResolveOptionsForJobDescription(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := tree.MakeSemaContext(nil /* resolver */)
s := cluster.MakeTestingClusterSettings()
exprEval := exprutil.MakeEvaluator(
"test", &sc, eval.NewTestingEvalContext(s),
)
// The input struct must have a non-zero value for every
// element of the struct.
input := tree.RestoreOptions{
SkipMissingFKs: true,
SkipMissingSequences: true,
SkipMissingSequenceOwners: true,
SkipMissingViews: true,
SkipMissingUDFs: true,
Detached: true,
SkipLocalitiesCheck: true,
AsTenant: tree.NewDString("test expr"),
ForceTenantID: tree.NewDInt(42),
SchemaOnly: true,
VerifyData: true,
UnsafeRestoreIncompatibleVersion: true,
ExecutionLocality: tree.NewDString("test expr"),
ExperimentalOnline: true,
ExperimentalCopy: true,
RemoveRegions: true,
IntoDB: tree.NewDString("test expr"),
NewDBName: tree.NewDString("test expr"),
DecryptionKMSURI: []tree.Expr{tree.NewDString("http://example.com")},
EncryptionPassphrase: tree.NewDString("test expr"),
}
ensureAllStructFieldsSet := func(s tree.RestoreOptions, name string) {
structType := reflect.TypeOf(s)
require.Equal(t, reflect.Struct, structType.Kind())
sv := reflect.ValueOf(s)
for i := 0; i < sv.NumField(); i++ {
field := sv.Field(i)
fieldName := structType.Field(i).Name
require.True(t, field.IsValid(), "RestoreOptions field %s in %s is not valid", fieldName, name)
require.False(t, field.IsZero(), "RestoreOptions field %s in %s is not non-zero", fieldName, name)
}
}
ensureAllStructFieldsSet(input, "input")
output, err := resolveOptionsForRestoreJobDescription(
context.Background(),
exprEval,
input,
"into_db",
"newDBName",
)
require.NoError(t, err)
ensureAllStructFieldsSet(output, "output")
}
func TestBackupManifestVersionCompatibility(t *testing.T) {
defer leaktest.AfterTest(t)()
type testCase struct {
name string
backupVersion roachpb.Version
clusterVersion roachpb.Version
minimumSupportedVersion roachpb.Version
expectedError string
}
binaryVersion := roachpb.Version{Major: 23, Minor: 1}
tests := []testCase{
{
name: "same-version-restore",
backupVersion: roachpb.Version{Major: 23, Minor: 1},
clusterVersion: roachpb.Version{Major: 23, Minor: 1},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
},
{
name: "previous-version-restore",
backupVersion: roachpb.Version{Major: 23, Minor: 1},
clusterVersion: roachpb.Version{Major: 23, Minor: 1},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
},
{
name: "unfinalized-restore",
backupVersion: roachpb.Version{Major: 23, Minor: 1},
clusterVersion: roachpb.Version{Major: 22, Minor: 2},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
expectedError: "backup from version 23.1 is newer than current version 22.2",
},
{
name: "alpha-restore",
backupVersion: roachpb.Version{Major: 1000022, Minor: 2, Internal: 14},
clusterVersion: roachpb.Version{Major: 23, Minor: 1},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
expectedError: "backup from version 1000022.2-upgrading-to-1000023.1-step-014 is newer than current version 23.1",
},
{
name: "old-backup",
backupVersion: roachpb.Version{Major: 22, Minor: 1},
clusterVersion: roachpb.Version{Major: 23, Minor: 1},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
expectedError: "backup from version 22.1 is older than the minimum restorable version 22.2",
},
{
name: "legacy-version-backup",
backupVersion: roachpb.Version{},
clusterVersion: roachpb.Version{Major: 23, Minor: 1},
minimumSupportedVersion: roachpb.Version{Major: 22, Minor: 2},
expectedError: "the backup is from a version older than our minimum restorable version 22.2",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
settings := cluster.MakeTestingClusterSettingsWithVersions(binaryVersion, tc.minimumSupportedVersion, false)
require.NoError(t, clusterversion.Initialize(context.Background(), tc.clusterVersion, &settings.SV))
version := clusterversion.MakeVersionHandle(&settings.SV, binaryVersion, tc.minimumSupportedVersion)
manifest := []backuppb.BackupManifest{{ClusterVersion: tc.backupVersion}}
err := checkBackupManifestVersionCompatability(context.Background(), version, manifest /*unsafe=*/, false)
if tc.expectedError == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedError)
}
require.NoError(t, checkBackupManifestVersionCompatability(context.Background(), version, manifest /*unsafe=*/, true))
})
}
}
func TestAllocateDescriptorRewrites(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
opName := redact.SafeString("allocate-descriptor-rewrites")
s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
var defaultDB *dbdesc.Mutable
var db1 *dbdesc.Mutable
var db2 *dbdesc.Mutable
var schema1 *schemadesc.Mutable
var schema2 *schemadesc.Mutable
var table1 *tabledesc.Mutable
var table2 *tabledesc.Mutable
var type1 *typedesc.Mutable
var type2 *typedesc.Mutable
var type1array *typedesc.Mutable
var type2array *typedesc.Mutable
var func1 *funcdesc.Mutable
var func2 *funcdesc.Mutable
var planner sql.PlanHookState
srv := s.ApplicationLayer()
execCfg := srv.ExecutorConfig().(sql.ExecutorConfig)
setupPlanner := func() {
plannerAsInterface, cleanup := sql.NewInternalPlanner(
opName,
srv.DB().NewTxn(ctx, "test-allocate-descriptor-rewrite"),
username.NodeUserName(),
&sql.MemoryMetrics{},
&execCfg,
sql.NewInternalSessionData(ctx, execCfg.Settings, opName))
defer cleanup()
planner = plannerAsInterface.(sql.PlanHookState)
}
// This is a fairly expensive call. Individual tests should only call it
// when they specifically need it for cleanup, e.g. after dropping a
// database.
setup := func() {
query := `
DROP DATABASE IF EXISTS db1 cascade;
DROP DATABASE IF EXISTS db2 cascade;
DROP DATABASE IF EXISTS defaultdb cascade;
CREATE DATABASE db1;
CREATE DATABASE db2;
CREATE DATABASE defaultdb;
CREATE SCHEMA schema1;
CREATE SCHEMA schema2;
CREATE TABLE schema1.table1(id int)
CREATE TABLE schema1.table2(id int)
CREATE FUNCTION schema1.func1(a INT, b INT) RETURNS INT IMMUTABLE LEAKPROOF LANGUAGE SQL AS 'SELECT a + b';
CREATE FUNCTION schema1.func2(a INT, b INT) RETURNS INT IMMUTABLE LEAKPROOF LANGUAGE SQL AS 'SELECT a - b';
CREATE TYPE schema1.type1 AS (x INT, y INT);
CREATE TYPE schema1.type2 AS ENUM ('a', 'b');
`
for _, cmd := range strings.Split(query, "\n") {
_, err := db.ExecContext(ctx, cmd)
require.NoError(t, err)
}
setupPlanner()
txn := planner.InternalSQLTxn()
col := txn.Descriptors()
cat, err := col.GetAll(ctx, kvDB.NewTxn(ctx, "test-get-all"))
require.NoError(t, err)
sqlDescs := cat.OrderedDescriptors()
type nameAndType struct {
name string
objType string
}
asMutable := func(sqlDesc catalog.Descriptor) catalog.MutableDescriptor {
return sqlDesc.NewBuilder().BuildExistingMutable()
}
for _, sqlDesc := range sqlDescs {
name := sqlDesc.GetName()
objType := sqlDesc.GetObjectType()
nt := nameAndType{name: name, objType: string(objType)}
switch nt {
case nameAndType{name: "defaultdb", objType: "database"}:
defaultDB = asMutable(sqlDesc).(*dbdesc.Mutable)
case nameAndType{name: "db1", objType: "database"}:
db1 = asMutable(sqlDesc).(*dbdesc.Mutable)
case nameAndType{name: "db2", objType: "database"}:
db2 = asMutable(sqlDesc).(*dbdesc.Mutable)
case nameAndType{name: "schema1", objType: "schema"}:
schema1 = asMutable(sqlDesc).(*schemadesc.Mutable)
case nameAndType{name: "schema2", objType: "schema"}:
schema2 = asMutable(sqlDesc).(*schemadesc.Mutable)
case nameAndType{name: "table1", objType: "table"}:
table1 = asMutable(sqlDesc).(*tabledesc.Mutable)
case nameAndType{name: "table2", objType: "table"}:
table2 = asMutable(sqlDesc).(*tabledesc.Mutable)
case nameAndType{name: "type1", objType: "type"}:
type1 = asMutable(sqlDesc).(*typedesc.Mutable)
case nameAndType{name: "type2", objType: "type"}:
type2 = asMutable(sqlDesc).(*typedesc.Mutable)
case nameAndType{name: "_type1", objType: "type"}:
type1array = asMutable(sqlDesc).(*typedesc.Mutable)
case nameAndType{name: "_type2", objType: "type"}:
type2array = asMutable(sqlDesc).(*typedesc.Mutable)
case nameAndType{name: "func1", objType: "routine"}:
func1 = asMutable(sqlDesc).(*funcdesc.Mutable)
case nameAndType{name: "func2", objType: "routine"}:
func2 = asMutable(sqlDesc).(*funcdesc.Mutable)
}
}
}
setup()
validateSelfIDs := func(
rewrites jobspb.DescRewriteMap,
expected []catalog.Descriptor,
) error {
if len(rewrites) != len(expected) {
return errors.Newf("expected %d rewrites, got %d", len(expected), len(rewrites))
}
for _, desc := range expected {
rewrite, ok := rewrites[desc.GetID()]
if !ok {
return errors.Newf("no rewrite found for %v", desc)
}
if rewrite.ID == desc.GetID() {
return errors.Newf("expected new ID for %v", desc)
}
}
return nil
}
t.Run("allocateDescriptorRewrite", func(t *testing.T) {
t.Run("succeeds on empty input", func(t *testing.T) {
rewrites, _, err := allocateDescriptorRewrites(
ctx,
planner,
nil,
nil,
nil,
nil,
nil,
nil,
0,
tree.RestoreOptions{},
"",
"",
false)
require.NoError(t, err)
require.Equal(t, jobspb.DescRewriteMap{}, rewrites)
})
t.Run("allocates into existing db", func(t *testing.T) {
rewrites, _, err := allocateDescriptorRewrites(
ctx,
planner,
map[descpb.ID]*dbdesc.Mutable{
defaultDB.GetID(): defaultDB,
},
map[descpb.ID]*schemadesc.Mutable{
schema1.GetID(): schema1,
schema2.GetID(): schema2,
},
map[descpb.ID]*tabledesc.Mutable{
table1.GetID(): table1,
table2.GetID(): table2,
},
map[descpb.ID]*typedesc.Mutable{
type1.GetID(): type1,
type2.GetID(): type2,
type1array.GetID(): type1array,
type2array.GetID(): type2array,
},
nil,
nil,
0,
tree.RestoreOptions{},
db2.GetName(),
"",
false)
require.NoError(t, err)
// DB objects are not reallocated
require.NoError(t, validateSelfIDs(rewrites, []catalog.Descriptor{
schema1, schema2, table2, table2, type1, type2, type1array, type2array,
}))
// New objects' parent ID points to the ID of the target db (db2).
for _, rewrite := range rewrites {
require.Equal(t, db2.GetID(), rewrite.ParentID,
"expected rewrite to have db2 ID as parentID: %v", rewrite)
}
// New schema objects have no parent schema
for _, obj := range []catalog.Descriptor{
schema1, schema2,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, descpb.InvalidID, rewrite.ParentSchemaID,
"expected rewrite to have no parent schema, obj: %v, rewrite: %v", obj, rewrite)
}
// New non-schema objects point to the new ID of the new schema
newSchema1ID := rewrites[schema1.GetID()].ID
for _, obj := range []catalog.Descriptor{
table2, table2, type1, type2, type1array, type2array,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, newSchema1ID, rewrite.ParentSchemaID,
"expected rewrite to have new parent schema, obj: %v, rewrite: %v", obj, rewrite,
)
}
})
t.Run("allocates into new db", func(t *testing.T) {
rewrites, _, err := allocateDescriptorRewrites(
ctx,
planner,
map[descpb.ID]*dbdesc.Mutable{
defaultDB.GetID(): defaultDB,
},
map[descpb.ID]*schemadesc.Mutable{
schema1.GetID(): schema1,
schema2.GetID(): schema2,
},
map[descpb.ID]*tabledesc.Mutable{
table1.GetID(): table1,
table2.GetID(): table2,
},
map[descpb.ID]*typedesc.Mutable{
type1.GetID(): type1,
type2.GetID(): type2,
type1array.GetID(): type1array,
type2array.GetID(): type2array,
},
nil,
[]catalog.DatabaseDescriptor{
defaultDB,
},
0,
tree.RestoreOptions{},
"",
"db3",
false)
require.NoError(t, err)
require.NoError(t, validateSelfIDs(rewrites, []catalog.Descriptor{
defaultDB, schema1, schema2, table2, table2, type1, type2, type1array, type2array,
}))
defaultDBID := defaultDB.GetID()
require.Equal(t, descpb.InvalidID, rewrites[defaultDBID].ParentID)
db3ID := rewrites[defaultDBID].ID
// New objects' parent ID points to the ID of the target db (db3).
for id, rewrite := range rewrites {
if id == defaultDBID {
continue
}
require.Equal(t, db3ID, rewrite.ParentID,
"expected rewrite to have db3 ID as parentID: %v", rewrite)
}
// New schema objects have no parent schema
for _, obj := range []catalog.Descriptor{
schema1, schema2,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, descpb.InvalidID, rewrite.ParentSchemaID,
"expected rewrite to have no parent schema, obj: %v, rewrite: %v", obj, rewrite)
}
// New non-schema objects point to the new ID of the new schema
newSchema1ID := rewrites[schema1.GetID()].ID
for _, obj := range []catalog.Descriptor{
table2, table2, type1, type2, type1array, type2array,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, newSchema1ID, rewrite.ParentSchemaID,
"expected rewrite to have new parent schema, obj: %v, rewrite: %v", obj, rewrite,
)
}
})
t.Run("allocates functions into new db", func(t *testing.T) {
rewrites, _, err := allocateDescriptorRewrites(
ctx,
planner,
map[descpb.ID]*dbdesc.Mutable{
defaultDB.GetID(): defaultDB,
},
nil,
nil,
nil,
map[descpb.ID]*funcdesc.Mutable{
func1.GetID(): func1,
func2.GetID(): func2,
},
[]catalog.DatabaseDescriptor{defaultDB},
0,
tree.RestoreOptions{},
"",
"db3",
false)
require.NoError(t, err)
require.NoError(t, validateSelfIDs(rewrites, []catalog.Descriptor{
defaultDB,
func1,
func2,
}))
newDBID := rewrites[defaultDB.GetID()].ID
require.Equal(t, newDBID, rewrites[func1.GetID()].ParentID)
require.Equal(t, newDBID, rewrites[func2.GetID()].ParentID)
})
t.Run("allocates multiple dbs", func(t *testing.T) {
_, err := db.ExecContext(ctx, "DROP DATABASE IF EXISTS defaultdb")
require.NoError(t, err)
_, err = db.ExecContext(ctx, "DROP DATABASE IF EXISTS db1")
require.NoError(t, err)
_, err = db.ExecContext(ctx, "DROP DATABASE IF EXISTS db2")
require.NoError(t, err)
defer setup()
// Get a new plan state after dropping the DB.
setupPlanner()
rewrites, _, err := allocateDescriptorRewrites(
ctx,
planner,
map[descpb.ID]*dbdesc.Mutable{
defaultDB.GetID(): defaultDB,
db1.GetID(): db1,
db2.GetID(): db2,
},
map[descpb.ID]*schemadesc.Mutable{
schema1.GetID(): schema1,
schema2.GetID(): schema2,
},
map[descpb.ID]*tabledesc.Mutable{
table1.GetID(): table1,
table2.GetID(): table2,
},
map[descpb.ID]*typedesc.Mutable{
type1.GetID(): type1,
type2.GetID(): type2,
type1array.GetID(): type1array,
type2array.GetID(): type2array,
},
map[descpb.ID]*funcdesc.Mutable{
func1.GetID(): func1,
func2.GetID(): func2,
},
[]catalog.DatabaseDescriptor{defaultDB, db1, db2},
0,
tree.RestoreOptions{},
"",
"",
false)
require.NoError(t, err)
// DB objects are reallocated
require.NoError(t, validateSelfIDs(rewrites, []catalog.Descriptor{
defaultDB, db1, db2, schema1, schema2, table2, table2, type1, type2, type1array, type2array, func1, func2,
}))
oldDBIDs := []descpb.ID{defaultDB.GetID(), db1.GetID(), db2.GetID()}
newDefaultDBID := rewrites[defaultDB.GetID()].ID
for oldID, rewrite := range rewrites {
if slices.Contains(oldDBIDs, oldID) {
// This is a DB rewrite and has no parent.
require.Equal(t, descpb.InvalidID, rewrite.ParentID)
continue
}
// This is an object rewrite and its parent is defaultDB.
require.Equal(t, newDefaultDBID, rewrite.ParentID,
"expected rewrite to have new defaultDB ID as parentID: %v", rewrite)
}
// New schema objects have no parent schema
for _, obj := range []catalog.Descriptor{
schema1, schema2,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, descpb.InvalidID, rewrite.ParentSchemaID,
"expected rewrite to have no parent schema, descriptor: %v, rewrite: %v", obj, rewrite)
}
// New non-schema objects point to the new ID of the new schema
newSchema1ID := rewrites[schema1.GetID()].ID
for _, obj := range []catalog.Descriptor{
table2, table2, type1, type2, type1array, type2array, func1, func2,
} {
rewrite := rewrites[obj.GetID()]
require.Equalf(t, newSchema1ID, rewrite.ParentSchemaID,
"expected rewrite to have new parent schema, descriptor: %v, rewrite: %v", obj, rewrite,
)
}
})
t.Run("allocates system descriptors with setupTempDB", func(t *testing.T) {
namespaceTable := systemschema.NamespaceTable.
NewBuilder().BuildExistingMutable().(*tabledesc.Mutable)
usersTable := systemschema.UsersTable.
NewBuilder().BuildExistingMutable().(*tabledesc.Mutable)
rewrites, tempSysDBID, err := allocateDescriptorRewrites(
ctx,
planner,
nil,
nil,
map[descpb.ID]*tabledesc.Mutable{
namespaceTable.GetID(): namespaceTable,
usersTable.GetID(): usersTable,
},
nil,
nil,
nil,
0,
tree.RestoreOptions{},
"",
"",
true,
)
require.NoError(t, err)
require.NotEqual(t, descpb.InvalidID, tempSysDBID,
"tempSysDBID should be allocated when setupTempDB=true")
require.Len(t, rewrites, 2, "should have rewrites for both system tables")
for _, table := range []*tabledesc.Mutable{namespaceTable, usersTable} {
rewrite, ok := rewrites[table.GetID()]
require.True(t, ok, "no rewrite found for system table %s", table.GetName())
require.Equal(t, tempSysDBID, rewrite.ParentID,
"system table %s should have tempSysDBID as parent", table.GetName())
require.Equal(t, descpb.ID(keys.PublicSchemaIDForBackup), rewrite.ParentSchemaID,
"system table %s should have public schema as parent schema", table.GetName())
require.NotEqual(t, descpb.InvalidID, rewrite.ID,
"system table %s should be assigned new ID", table.GetName())
require.NotEqual(t, table.GetID(), rewrite.ID,
"system table %s should be assigned new ID different from original", table.GetName())
}
})
})
}
func TestRestoreWithBackupIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
_, sqlDB, _, cleanupFn := backuptestutils.StartBackupRestoreTestCluster(
t, singleNode, backuptestutils.WithInitFunc(InitManualReplication),
)
defer cleanupFn()
const classicColl = "nodelocal://1/classic"
const rhColl = "nodelocal://1/revision_history"
// Maps collection URIs to the backup IDs within them. Backup IDs are sorted
// in chronological order from oldest to newest.
var backupIDsByColl = make(map[string][]string)
// Maps collection URIs to the full backup subdirectory names within them.
// Subdirs are sorted in chronological order from oldest to newest.
var backupSubdirsByColl = make(map[string][]string)
// Timestamps for specific points in time during backup creation. See comment
// below for details.
classicTimes := make([]string, 4)
rhTimes := make([]string, 6)
// Create a set of backups to use for this test. There exist three chains
// spread across two collections:
//
// Collection 1
// 1. Classic backup chain with incrementals
// a. Full backup @ t0 (0 rows)
// b. Incremental backup @ t1 (1 row)
// c. Incremental backup @ t2 (2 rows)
// d. Incremental backup @ t3 (3 rows)
// 2. Single classic full backup whose end time is before the first chain's
// last incremental.
// a. Full backup @ t2 (2 rows)
//
// Collection 2
// 1. Revision history backup chain
// a. Full backup @ t2
// i. t0 (0 rows)
// ii. t1 (1 row)
// iii. t2 (2 rows)
// b. Incremental backup @ t4
// i. t3 (3 row)
// ii. t4 (4 rows)
{
// Collection 1
sqlDB.Exec(t, "SET SESSION use_backups_with_ids = true")
sqlDB.Exec(t, "CREATE TABLE foo (i INT)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&classicTimes[0])
sqlDB.Exec(
t, "BACKUP TABLE foo INTO $1 AS OF SYSTEM TIME $2::STRING",
classicColl, classicTimes[0],
)
sqlDB.Exec(t, "INSERT INTO foo VALUES (1)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&classicTimes[1])
sqlDB.Exec(
t, "BACKUP TABLE foo INTO LATEST IN $1 AS OF SYSTEM TIME $2::STRING",
classicColl, classicTimes[1],
)
sqlDB.Exec(t, "INSERT INTO foo VALUES (2)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&classicTimes[2])
sqlDB.Exec(
t, "BACKUP TABLE foo INTO LATEST IN $1 AS OF SYSTEM TIME $2::STRING",
classicColl, classicTimes[2],
)
sqlDB.Exec(t, `SET CLUSTER SETTING jobs.debug.pausepoints = 'backup.after.write_first_checkpoint'`)
var fullJobID jobspb.JobID
sqlDB.QueryRow(
t, "BACKUP TABLE FOO INTO $1 AS OF SYSTEM TIME $2::STRING WITH detached",
classicColl, classicTimes[2],
).Scan(&fullJobID)
jobutils.WaitForJobToPause(t, sqlDB, fullJobID)
sqlDB.Exec(t, `SET CLUSTER SETTING jobs.debug.pausepoints = ''`)
sqlDB.Exec(t, "INSERT INTO foo VALUES (3)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&classicTimes[3])
sqlDB.Exec(
t, "BACKUP TABLE foo INTO LATEST IN $1 AS OF SYSTEM TIME $2::STRING",
classicColl, classicTimes[3],
)
sqlDB.Exec(t, "RESUME JOB $1", fullJobID)
jobutils.WaitForJobToSucceed(t, sqlDB, fullJobID)
// Collection 2
sqlDB.Exec(t, "CREATE TABLE bar (i INT)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[0])
sqlDB.Exec(t, "INSERT INTO bar VALUES (1)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[1])
sqlDB.Exec(t, "INSERT INTO bar VALUES (2)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[2])
sqlDB.Exec(
t, "BACKUP TABLE bar INTO $1 AS OF SYSTEM TIME $2::STRING WITH revision_history",
rhColl, rhTimes[2],
)
sqlDB.Exec(t, "INSERT INTO bar VALUES (3)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[3])
sqlDB.Exec(t, "INSERT INTO bar VALUES (4)")
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[4])
sqlDB.Exec(
t, "BACKUP TABLE bar INTO LATEST IN $1 AS OF SYSTEM TIME $2::STRING WITH revision_history",
rhColl, rhTimes[4],
)
sqlDB.QueryRow(t, "SELECT now()").Scan(&rhTimes[5]) // Out of revision-history bounds time
// Collect IDs from both collections. We append in reverse order just to
// make testing easier so that we start from the full backup and go forward
// in time.
for _, coll := range []string{classicColl, rhColl} {
var ids []string
rows := sqlDB.Query(t, fmt.Sprintf("SHOW BACKUPS IN '%s'", coll))
for rows.Next() {
var id string
var unused any
require.NoError(t, rows.Scan(&id, &unused, &unused))
ids = append([]string{id}, ids...)
}
backupIDsByColl[coll] = ids
}
// Collect subdirs from both collections.
sqlDB.Exec(t, "SET SESSION use_backups_with_ids = false")
for _, coll := range []string{classicColl, rhColl} {
var subdirs []string
rows := sqlDB.Query(t, fmt.Sprintf("SHOW BACKUPS IN '%s'", coll))
for rows.Next() {
var subdir string
require.NoError(t, rows.Scan(&subdir))
subdirs = append(subdirs, subdir)
}
backupSubdirsByColl[coll] = subdirs
}
sqlDB.Exec(t, "SET SESSION use_backups_with_ids = true")
}
testcases := []struct {
name string
collection string
token string
aost string
expectedRows int
expectedErr string
disableIDs bool
}{
{
name: "non-RH restore with ids/full backup",
collection: classicColl,
token: backupIDsByColl[classicColl][0],
expectedRows: 0,
},
{
name: "non-RH restore with ids/non-latest incremental",
collection: classicColl,
token: backupIDsByColl[classicColl][2],
expectedRows: 2,
},
{
name: "non-RH restore with ids/latest incremental",
collection: classicColl,
token: backupIDsByColl[classicColl][4],
expectedRows: 3,
},
{
name: "non-RH restore from latest",
collection: classicColl,
token: "LATEST",
expectedRows: 3,
},
{
name: "legacy/non-RH restore from latest",
collection: classicColl,
token: "LATEST",
expectedRows: 2,
disableIDs: true,
},
{
name: "legacy/non-RH restore from non-latest subdir",
collection: classicColl,
token: backupSubdirsByColl[classicColl][0],
expectedRows: 3,
disableIDs: true,
},
{
name: "RH restore with ids/time before full backup",
collection: rhColl,
token: backupIDsByColl[rhColl][0],
aost: rhTimes[1],
expectedRows: 1,
},
{
name: "RH restore with ids/time before incremental backup",
collection: rhColl,
token: backupIDsByColl[rhColl][1],
aost: rhTimes[3],
expectedRows: 3,
},
{
name: "RH restore before LATEST",
collection: rhColl,
token: "LATEST",
aost: rhTimes[3],
expectedRows: 3,
},
{
name: "legacy/RH restore before incremental backup",
collection: rhColl,
token: backupSubdirsByColl[rhColl][0],
aost: rhTimes[3],
expectedRows: 3,
disableIDs: true,
},
{
name: "legacy/RH restore before LATEST",
collection: rhColl,
token: "LATEST",
aost: rhTimes[3],
expectedRows: 3,
disableIDs: true,
},
{
name: "error/RH restore with time out of bounds of chain",
collection: rhColl,
token: backupIDsByColl[rhColl][1],
aost: rhTimes[5],
expectedErr: "does not cover the specified AS OF SYSTEM TIME",
},
{
name: "error/RH restore with time out of bounds of specified backup",
collection: rhColl,
token: backupIDsByColl[rhColl][1],
aost: rhTimes[1],
expectedErr: "does not cover the specified AS OF SYSTEM TIME",
},
{
name: "error/RH restore from non-RH backup",
collection: classicColl,
token: backupIDsByColl[classicColl][0],
aost: classicTimes[2],
expectedErr: "not a revision history backup and cannot be used for AS OF SYSTEM TIME restores",
},
{
name: "legacy/restore works on subdir",
collection: classicColl,
token: backupSubdirsByColl[classicColl][0],
expectedRows: 3,
},
{
name: "legacy/LATEST resolves to legacy path",
collection: classicColl,
token: "LATEST",
expectedRows: 2,
disableIDs: true,
},
{
name: "legacy/AOST restore works on subdir",
collection: classicColl,
token: backupSubdirsByColl[classicColl][0],
aost: classicTimes[2],
expectedRows: 2,
},
{
name: "legacy/AOST restore works on LATEST",
collection: classicColl,
token: "LATEST",
aost: classicTimes[2],
expectedRows: 2,
disableIDs: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
if tc.disableIDs {
sqlDB.Exec(t, "SET SESSION use_backups_with_ids = false")
defer sqlDB.Exec(t, "SET SESSION use_backups_with_ids = true")
}
sqlDB.Exec(t, "DROP TABLE IF EXISTS foo")
sqlDB.Exec(t, "DROP TABLE IF EXISTS bar")
var table string
if tc.collection == classicColl {
table = "foo"
} else {
table = "bar"
}
sqlQuery := fmt.Sprintf("RESTORE TABLE %s FROM '%s' IN '%s'", table, tc.token, tc.collection)
if tc.aost != "" {
sqlQuery += fmt.Sprintf(" AS OF SYSTEM TIME '%s'", tc.aost)
}
if tc.expectedErr == "" {
sqlDB.Exec(t, sqlQuery)
var rowCount int
sqlDB.QueryRow(t, fmt.Sprintf("SELECT count(*) FROM %s", table)).Scan(&rowCount)
require.Equal(t, tc.expectedRows, rowCount)
} else {
sqlDB.ExpectErr(t, tc.expectedErr, sqlQuery)
}
})
}
} | go | github | https://github.com/cockroachdb/cockroach | pkg/backup/restore_planning_test.go |
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_target_proxy
version_added: "2.4"
short_description: Create, Update or Destroy a Target_Proxy.
description:
- Create, Update or Destroy a Target_Proxy. See
U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
More details on the Target_Proxy API can be found at
U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations).
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Currently only supports global HTTP proxy.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
target_proxy_name:
description:
- Name of the Target_Proxy.
required: true
target_proxy_type:
description:
- Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported.
required: true
url_map_name:
description:
- Name of the Url Map. Required if type is HTTP or HTTPS proxy.
required: false
'''
EXAMPLES = '''
- name: Create Minimum HTTP Target_Proxy
gcp_target_proxy:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
target_proxy_name: my-target_proxy
target_proxy_type: HTTP
url_map_name: my-url-map
state: present
'''
RETURN = '''
state:
description: state of the Target_Proxy
returned: Always.
type: str
sample: present
updated_target_proxy:
description: True if the target_proxy has been updated. Will not appear on
initial target_proxy creation.
returned: if the target_proxy has been updated.
type: bool
sample: true
target_proxy_name:
description: Name of the Target_Proxy
returned: Always
type: str
sample: my-target-proxy
target_proxy_type:
description: Type of Target_Proxy. One of HTTP, HTTPS or SSL.
returned: Always
type: str
sample: HTTP
target_proxy:
description: GCP Target_Proxy dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-target-proxy", "urlMap": "..." }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-target_proxy'
USER_AGENT_VERSION = '0.0.1'
def _build_target_proxy_dict(params, project_id=None):
"""
Reformat services in Ansible Params.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP UrlMap API.
:rtype ``dict``
"""
url = ''
if project_id:
url = GCPUtils.build_googleapi_url(project_id)
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name')
if 'urlMap' in gcp_dict:
gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url,
gcp_dict['urlMap'])
return gcp_dict
def get_target_http_proxy(client, name, project_id=None):
"""
Get a Target HTTP Proxy from GCP.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Target Proxy.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
req = client.targetHttpProxies().get(project=project_id,
targetHttpProxy=name)
return GCPUtils.execute_api_client_req(req, raise_404=False)
def create_target_http_proxy(client, params, project_id):
"""
Create a new Target_Proxy.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_target_proxy_dict(params, project_id)
try:
req = client.targetHttpProxies().insert(project=project_id,
body=gcp_dict)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_target_http_proxy(client,
name=params['target_proxy_name'],
project_id=project_id)
return (True, return_data)
except:
raise
def delete_target_http_proxy(client, name, project_id):
"""
Delete a Target_Proxy.
:param client: An initialized GCE Compute Discovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Target Proxy.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
req = client.targetHttpProxies().delete(
project=project_id, targetHttpProxy=name)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except:
raise
def update_target_http_proxy(client, target_proxy, params, name, project_id):
"""
Update a HTTP Target_Proxy. Currently only the Url Map can be updated.
If the target_proxy has not changed, the update will not occur.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param target_proxy: Name of the Target Proxy.
:type target_proxy: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Target Proxy.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_target_proxy_dict(params, project_id)
GCPUtils.are_params_equal(target_proxy, gcp_dict)
if target_proxy['urlMap'] == gcp_dict['urlMap']:
return (False, 'no update necessary')
try:
req = client.targetHttpProxies().setUrlMap(project=project_id,
targetHttpProxy=name,
body={"urlMap": gcp_dict['urlMap']})
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except:
raise
def main():
module = AnsibleModule(argument_spec=dict(
target_proxy_name=dict(required=True),
target_proxy_type=dict(required=True, choices=['HTTP']),
url_map_name=dict(required=False),
state=dict(required=True, choices=['absent', 'present']),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['state'] = module.params.get('state')
params['target_proxy_name'] = module.params.get('target_proxy_name')
params['target_proxy_type'] = module.params.get('target_proxy_type')
params['url_map'] = module.params.get('url_map_name', None)
changed = False
json_output = {'state': params['state']}
target_proxy = get_target_http_proxy(client,
name=params['target_proxy_name'],
project_id=conn_params['project_id'])
if not target_proxy:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown target_proxy: %s" %
(params['target_proxy_name']))
else:
# Create
changed, json_output['target_proxy'] = create_target_http_proxy(client,
params=params,
project_id=conn_params['project_id'])
elif params['state'] == 'absent':
# Delete
changed, json_output['target_proxy'] = delete_target_http_proxy(client,
name=params['target_proxy_name'],
project_id=conn_params['project_id'])
else:
changed, json_output['target_proxy'] = update_target_http_proxy(client,
target_proxy=target_proxy,
params=params,
name=params['target_proxy_name'],
project_id=conn_params['project_id'])
json_output['updated_target_proxy'] = changed
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import timeit
from concurrent.futures import ThreadPoolExecutor, wait
from .common import Benchmark, safe_import
with safe_import():
from scipy.signal import (lfilter, firwin, decimate, butter, sosfilt,
medfilt2d)
class Decimate(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
class ParallelSosfilt(Benchmark):
timeout = 100
timer = timeit.default_timer
param_names = ['n_samples', 'threads']
params = [
[1e3, 10e3],
[1, 2, 4]
]
def setup(self, n_samples, threads):
self.filt = butter(8, 8e-6, "lowpass", output="sos")
self.data = np.arange(int(n_samples) * 3000).reshape(int(n_samples), 3000)
self.chunks = np.array_split(self.data, threads)
def time_sosfilt(self, n_samples, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
futures = []
for i in range(threads):
futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))
wait(futures)
class Sosfilt(Benchmark):
param_names = ['n_samples', 'order']
params = [
[1000, 1000000],
[6, 20]
]
def setup(self, n_samples, order):
self.sos = butter(order, [0.1575, 0.1625], 'band', output='sos')
self.y = np.random.RandomState(0).randn(n_samples)
def time_sosfilt_basic(self, n_samples, order):
sosfilt(self.sos, self.y)
class MedFilt2D(Benchmark):
param_names = ['threads']
params = [[1, 2, 4]]
def setup(self, threads):
np.random.seed(8176)
self.chunks = np.array_split(np.random.randn(250, 349), threads)
def _medfilt2d(self, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
wait({pool.submit(medfilt2d, chunk, 5) for chunk in self.chunks})
def time_medfilt2d(self, threads):
self._medfilt2d(threads)
def peakmem_medfilt2d(self, threads):
self._medfilt2d(threads) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "HR Payroll Account Operating Unit",
"version": "1.0",
"license": 'AGPL-3',
"author": "Eficent",
"category": "Generic Modules/Human Resources",
"depends": ["hr_payroll_account", "hr_contract_operating_unit"],
"description": """
HR Payroll Account Operating Unit
=================================
Adds a the operating unit to the account moves created by the payslip,
based on the employee's Operating Unit defined in the Contract.
""",
"data": [],
'demo': [],
'test': [
],
'installable': True,
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Inception V3 application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
class InceptionV3Test(test.TestCase):
def test_with_top(self):
model = keras.applications.InceptionV3(weights=None)
self.assertEqual(model.output_shape, (None, 1000))
def test_no_top(self):
model = keras.applications.InceptionV3(weights=None, include_top=False)
self.assertEqual(model.output_shape, (None, None, None, 2048))
def test_with_pooling(self):
model = keras.applications.InceptionV3(weights=None,
include_top=False,
pooling='avg')
self.assertEqual(model.output_shape, (None, 2048))
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
import sys
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
try:
from requests.exceptions import RequestException
except ImportError:
# Either docker-py is no longer using requests, or docker-py isn't around either,
# or docker-py's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Check whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def is_valid_tag(tag, allow_empty=False):
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def get_connect_params(auth, fail_function):
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
else:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = _get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = _get_tls_config(verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
"Hint: if you do not need Python 2.6 support, try "
"`pip uninstall docker-py` instead, followed by `pip install docker`.")
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module if no "
"support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
"can leave the other module in a broken state." % (platform.node(), sys.executable))
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = missing_required_lib("Docker SDK for Python: docker")
msg = msg + ", for example via `pip install docker`. The error was: %s"
else:
msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
self.docker_api_version_str = self.version()['ApiVersion']
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if min_docker_api_version is not None:
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['validate_certs'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def get_network(self, name=None, network_id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
result = self.inspect_network(network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, image_id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
try:
inspection = self.inspect_image(image_id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))
def inspect_distribution(self, image, **kwargs):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, datatype):
'''
Compare values a and b as described by method and datatype.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``datatype == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``datatype`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if datatype == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if datatype == 'value':
return a == b
elif datatype == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif datatype == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif datatype == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif datatype == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
def omit_none_from_dict(d):
"""
Return a copy of the dictionary with all keys with value None omitted.
"""
return dict((k, v) for (k, v) in d.items() if v is not None) | unknown | codeparrot/codeparrot-clean | ||
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"errors"
"fmt"
"maps"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog"
)
// histogramRecord combines both RefHistogramSample and RefFloatHistogramSample
// to simplify the WAL replay.
type histogramRecord struct {
ref chunks.HeadSeriesRef
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
type seriesRefSet struct {
refs map[chunks.HeadSeriesRef]struct{}
mtx sync.Mutex
}
func (s *seriesRefSet) merge(other map[chunks.HeadSeriesRef]struct{}) {
s.mtx.Lock()
defer s.mtx.Unlock()
maps.Copy(s.refs, other)
}
func (s *seriesRefSet) count() int {
s.mtx.Lock()
defer s.mtx.Unlock()
return len(s.refs)
}
func counterAddNonZero(v *prometheus.CounterVec, value float64, lvs ...string) {
if value > 0 {
v.WithLabelValues(lvs...).Add(value)
}
}
func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
// Track number of missing series records that were referenced by other records.
unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}}
// Track number of different records that referenced a series we don't know about
// for error reporting.
var unknownSampleRefs atomic.Uint64
var unknownExemplarRefs atomic.Uint64
var unknownHistogramRefs atomic.Uint64
var unknownMetadataRefs atomic.Uint64
var unknownTombstoneRefs atomic.Uint64
// Track number of series records that had overlapping m-map chunks.
var mmapOverlappingChunks atomic.Uint64
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
concurrency = h.opts.WALReplayConcurrency
processors = make([]walSubsetProcessor, concurrency)
exemplarsInput chan record.RefExemplar
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decoded = make(chan any, 10)
decodeErr, seriesCreationErr error
)
defer func() {
// For CorruptionErr ensure to terminate all workers before exiting.
_, ok := err.(*wlog.CorruptionErr)
if ok || seriesCreationErr != nil {
for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
wg.Wait()
}
}()
wg.Add(concurrency)
for i := range concurrency {
processors[i].setup()
go func(wp *walSubsetProcessor) {
missingSeries, unknownSamples, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
unknownSeriesRefs.merge(missingSeries)
unknownSampleRefs.Add(unknownSamples)
mmapOverlappingChunks.Add(overlapping)
unknownHistogramRefs.Add(unknownHistograms)
wg.Done()
}(&processors[i])
}
wg.Add(1)
exemplarsInput = make(chan record.RefExemplar, 300)
go func(input <-chan record.RefExemplar) {
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
var err error
defer wg.Done()
for e := range input {
ms := h.series.getByID(e.Ref)
if ms == nil {
unknownExemplarRefs.Inc()
missingSeries[e.Ref] = struct{}{}
continue
}
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
// replaying the WAL, so lets just log the error if it's not that type.
err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) {
h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err)
}
}
unknownSeriesRefs.merge(missingSeries)
}(exemplarsInput)
go func() {
defer close(decoded)
var err error
dec := record.NewDecoder(syms, h.logger)
for r.Next() {
switch dec.Type(r.Record()) {
case record.Series:
series := h.wlReplaySeriesPool.Get()[:0]
series, err = dec.Series(r.Record(), series)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode series: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- series
case record.Samples:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(r.Record(), samples)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- samples
case record.Tombstones:
tstones := h.wlReplaytStonesPool.Get()[:0]
tstones, err = dec.Tombstones(r.Record(), tstones)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode tombstones: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- tstones
case record.Exemplars:
exemplars := h.wlReplayExemplarsPool.Get()[:0]
exemplars, err = dec.Exemplars(r.Record(), exemplars)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode exemplars: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- exemplars
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
hists := h.wlReplayHistogramsPool.Get()[:0]
hists, err = dec.HistogramSamples(r.Record(), hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- hists
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
hists := h.wlReplayFloatHistogramsPool.Get()[:0]
hists, err = dec.FloatHistogramSamples(r.Record(), hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- hists
case record.Metadata:
meta := h.wlReplayMetadataPool.Get()[:0]
meta, err := dec.Metadata(r.Record(), meta)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode metadata: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- meta
default:
// Noop.
}
}
}()
// The records are always replayed from the oldest to the newest.
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
Outer:
for d := range decoded {
switch v := d.(type) {
case []record.RefSeries:
for _, walSeries := range v {
mSeries, created, err := h.getOrCreateWithOptionalID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false)
if err != nil {
seriesCreationErr = err
break Outer
}
if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < walSeries.Ref {
h.lastSeriesID.Store(uint64(walSeries.Ref))
}
if !created {
multiRef[walSeries.Ref] = mSeries.ref
}
idx := uint64(mSeries.ref) % uint64(concurrency)
processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries}
}
h.wlReplaySeriesPool.Put(v)
case []record.RefSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplaySamplesPool.Put(v)
case []tombstones.Stone:
// Tombstone records will be fairly rare, so not trying to optimise the allocations here.
deleteSeriesShards := make([][]chunks.HeadSeriesRef, concurrency)
for _, s := range v {
if len(s.Intervals) == 1 && s.Intervals[0].Mint == math.MinInt64 && s.Intervals[0].Maxt == math.MaxInt64 {
// This series was fully deleted at this point. This record is only done for stale series at the moment.
mod := uint64(s.Ref) % uint64(concurrency)
deleteSeriesShards[mod] = append(deleteSeriesShards[mod], chunks.HeadSeriesRef(s.Ref))
// If the series is with a different reference, try deleting that.
if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok {
mod := uint64(r) % uint64(concurrency)
deleteSeriesShards[mod] = append(deleteSeriesShards[mod], r)
}
continue
}
for _, itv := range s.Intervals {
if itv.Maxt < h.minValidTime.Load() {
continue
}
if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok {
// This is a tombstone for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(chunks.HeadSeriesRef(s.Ref), itv.Maxt)
s.Ref = storage.SeriesRef(r)
}
if m := h.series.getByID(chunks.HeadSeriesRef(s.Ref)); m == nil {
unknownTombstoneRefs.Inc()
missingSeries[chunks.HeadSeriesRef(s.Ref)] = struct{}{}
continue
}
h.tombstones.AddInterval(s.Ref, itv)
}
}
for i := range concurrency {
if len(deleteSeriesShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{deletedSeriesRefs: deleteSeriesShards[i]}
deleteSeriesShards[i] = nil
}
}
h.wlReplaytStonesPool.Put(v)
case []record.RefExemplar:
for _, e := range v {
if e.T < h.minValidTime.Load() {
continue
}
if r, ok := multiRef[e.Ref]; ok {
// This is an exemplar for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(e.Ref, e.T)
e.Ref = r
}
exemplarsInput <- e
}
h.wlReplayExemplarsPool.Put(v)
case []record.RefHistogramSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayHistogramsPool.Put(v)
case []record.RefFloatHistogramSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
// This is a float histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayFloatHistogramsPool.Put(v)
case []record.RefMetadata:
for _, m := range v {
if r, ok := multiRef[m.Ref]; ok {
m.Ref = r
}
s := h.series.getByID(m.Ref)
if s == nil {
unknownMetadataRefs.Inc()
missingSeries[m.Ref] = struct{}{}
continue
}
s.meta = &metadata.Metadata{
Type: record.ToMetricType(m.Type),
Unit: m.Unit,
Help: m.Help,
}
}
h.wlReplayMetadataPool.Put(v)
default:
panic(fmt.Errorf("unexpected decoded type: %T", d))
}
}
unknownSeriesRefs.merge(missingSeries)
if decodeErr != nil {
return decodeErr
}
if seriesCreationErr != nil {
// Drain the channel to unblock the goroutine.
for range decoded {
}
return seriesCreationErr
}
// Signal termination to each worker and wait for it to close its output channel.
for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
wg.Wait()
if err := r.Err(); err != nil {
return fmt.Errorf("read records: %w", err)
}
if unknownSampleRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load()+unknownTombstoneRefs.Load() > 0 {
h.logger.Warn(
"Unknown series references",
"series", unknownSeriesRefs.count(),
"samples", unknownSampleRefs.Load(),
"exemplars", unknownExemplarRefs.Load(),
"histograms", unknownHistogramRefs.Load(),
"metadata", unknownMetadataRefs.Load(),
"tombstones", unknownTombstoneRefs.Load(),
)
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownExemplarRefs.Load()), "exemplars")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownMetadataRefs.Load()), "metadata")
counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownTombstoneRefs.Load()), "tombstones")
}
if count := mmapOverlappingChunks.Load(); count > 0 {
h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count)
}
return nil
}
// resetSeriesWithMMappedChunks is only used during the WAL replay.
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) {
if mSeries.ref != walSeriesRef {
// Checking if the new m-mapped chunks overlap with the already existing ones.
if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 {
if overlapsClosedInterval(
mSeries.mmappedChunks[0].minTime,
mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
mmc[0].minTime,
mmc[len(mmc)-1].maxTime,
) {
h.logger.Debug(
"M-mapped chunks overlap on a duplicate series record",
"series", mSeries.labels().String(),
"oldref", mSeries.ref,
"oldmint", mSeries.mmappedChunks[0].minTime,
"oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
"newref", walSeriesRef,
"newmint", mmc[0].minTime,
"newmaxt", mmc[len(mmc)-1].maxTime,
)
overlapped = true
}
}
}
h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc)))
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks)))
if mSeries.ooo != nil {
h.metrics.chunksRemoved.Add(float64(len(mSeries.ooo.oooMmappedChunks)))
h.metrics.chunks.Sub(float64(len(mSeries.ooo.oooMmappedChunks)))
}
mSeries.mmappedChunks = mmc
if len(oooMmc) == 0 {
mSeries.ooo = nil
} else {
if mSeries.ooo == nil {
mSeries.ooo = &memSeriesOOOFields{}
}
*mSeries.ooo = memSeriesOOOFields{oooMmappedChunks: oooMmc}
}
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64
} else {
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime)
}
if len(oooMmc) != 0 {
// Mint and maxt can be in any chunk, they are not sorted.
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
for _, ch := range oooMmc {
if ch.minTime < mint {
mint = ch.minTime
}
if ch.maxTime > maxt {
maxt = ch.maxTime
}
}
h.updateMinOOOMaxOOOTime(mint, maxt)
}
// Any samples replayed till now would already be compacted. Resetting the head chunk.
mSeries.nextAt = 0
mSeries.headChunks = nil
mSeries.app = nil
return overlapped
}
type walSubsetProcessor struct {
input chan walSubsetProcessorInputItem
output chan []record.RefSample
histogramsOutput chan []histogramRecord
}
type walSubsetProcessorInputItem struct {
samples []record.RefSample
histogramSamples []histogramRecord
existingSeries *memSeries
walSeriesRef chunks.HeadSeriesRef
deletedSeriesRefs []chunks.HeadSeriesRef
}
func (wp *walSubsetProcessor) setup() {
wp.input = make(chan walSubsetProcessorInputItem, 300)
wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []histogramRecord, 300)
}
func (wp *walSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
for range wp.histogramsOutput {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
select {
case buf := <-wp.output:
return buf[:0]
default:
}
return nil
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select {
case buf := <-wp.histogramsOutput:
return buf[:0]
default:
}
return nil
}
// processWALSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse.
// Samples before the minValidTime timestamp are discarded.
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (map[chunks.HeadSeriesRef]struct{}, uint64, uint64, uint64) {
defer close(wp.output)
defer close(wp.histogramsOutput)
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
var unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks uint64
minValidTime := h.minValidTime.Load()
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
appendChunkOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
chunkRange: h.chunkRange.Load(),
samplesPerChunk: h.opts.SamplesPerChunk,
}
for in := range wp.input {
if in.existingSeries != nil {
mmc := mmappedChunks[in.walSeriesRef]
oooMmc := oooMmappedChunks[in.walSeriesRef]
if h.resetSeriesWithMMappedChunks(in.existingSeries, mmc, oooMmc, in.walSeriesRef) {
mmapOverlappingChunks++
}
continue
}
for _, s := range in.samples {
ms := h.series.getByID(s.Ref)
if ms == nil {
unknownSampleRefs++
missingSeries[s.Ref] = struct{}{}
continue
}
if s.T <= ms.mmMaxTime {
continue
}
if !value.IsStaleNaN(ms.lastValue) && value.IsStaleNaN(s.V) {
h.numStaleSeries.Inc()
}
if value.IsStaleNaN(ms.lastValue) && !value.IsStaleNaN(s.V) {
h.numStaleSeries.Dec()
}
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
_ = ms.mmapChunks(h.chunkDiskMapper)
}
if s.T > maxt {
maxt = s.T
}
if s.T < mint {
mint = s.T
}
}
select {
case wp.output <- in.samples:
default:
}
for _, s := range in.histogramSamples {
if s.t < minValidTime {
continue
}
ms := h.series.getByID(s.ref)
if ms == nil {
unknownHistogramRefs++
missingSeries[s.ref] = struct{}{}
continue
}
if s.t <= ms.mmMaxTime {
continue
}
var chunkCreated, newlyStale, staleToNonStale bool
if s.h != nil {
newlyStale = value.IsStaleNaN(s.h.Sum)
if ms.lastHistogramValue != nil {
newlyStale = newlyStale && !value.IsStaleNaN(ms.lastHistogramValue.Sum)
staleToNonStale = value.IsStaleNaN(ms.lastHistogramValue.Sum) && !value.IsStaleNaN(s.h.Sum)
}
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts)
} else {
newlyStale = value.IsStaleNaN(s.fh.Sum)
if ms.lastFloatHistogramValue != nil {
newlyStale = newlyStale && !value.IsStaleNaN(ms.lastFloatHistogramValue.Sum)
staleToNonStale = value.IsStaleNaN(ms.lastFloatHistogramValue.Sum) && !value.IsStaleNaN(s.fh.Sum)
}
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts)
}
if newlyStale {
h.numStaleSeries.Inc()
}
if staleToNonStale {
h.numStaleSeries.Dec()
}
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
}
if s.t > maxt {
maxt = s.t
}
if s.t < mint {
mint = s.t
}
}
select {
case wp.histogramsOutput <- in.histogramSamples:
default:
}
if len(in.deletedSeriesRefs) > 0 {
h.deleteSeriesByID(in.deletedSeriesRefs)
}
}
h.updateMinMaxTime(mint, maxt)
return missingSeries, unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks
}
func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
// Track number of missing series records that were referenced by other records.
unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}}
// Track number of samples, histogram samples, and m-map markers that referenced a series we don't know about
// for error reporting.
var unknownSampleRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64
lastSeq, lastOff := lastMmapRef.Unpack()
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
concurrency = h.opts.WALReplayConcurrency
processors = make([]wblSubsetProcessor, concurrency)
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decodedCh = make(chan any, 10)
decodeErr error
)
defer func() {
// For CorruptionErr ensure to terminate all workers before exiting.
// We also wrap it to identify OOO WBL corruption.
_, ok := err.(*wlog.CorruptionErr)
if ok {
err = &errLoadWbl{err: err}
for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
}
}()
wg.Add(concurrency)
for i := range concurrency {
processors[i].setup()
go func(wp *wblSubsetProcessor) {
missingSeries, unknownSamples, unknownHistograms := wp.processWBLSamples(h)
unknownSeriesRefs.merge(missingSeries)
unknownSampleRefs.Add(unknownSamples)
unknownHistogramRefs.Add(unknownHistograms)
wg.Done()
}(&processors[i])
}
go func() {
defer close(decodedCh)
dec := record.NewDecoder(syms, h.logger)
for r.Next() {
var err error
rec := r.Record()
switch dec.Type(rec) {
case record.Samples:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode samples: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- samples
case record.MmapMarkers:
markers := h.wlReplayMmapMarkersPool.Get()[:0]
markers, err = dec.MmapMarkers(rec, markers)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode mmap markers: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- markers
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
hists := h.wlReplayHistogramsPool.Get()[:0]
hists, err = dec.HistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
hists := h.wlReplayFloatHistogramsPool.Get()[:0]
hists, err = dec.FloatHistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
default:
// Noop.
}
}
}()
// The records are always replayed from the oldest to the newest.
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
for d := range decodedCh {
switch v := d.(type) {
case []record.RefSample:
samples := v
// We split up the samples into parts of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplaySamplesPool.Put(v)
case []record.RefMmapMarker:
markers := v
for _, rm := range markers {
seq, off := rm.MmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
// This m-map chunk from markers was not present during
// the load of mmapped chunks that happened in the head
// initialization.
continue
}
if r, ok := multiRef[rm.Ref]; ok {
rm.Ref = r
}
ms := h.series.getByID(rm.Ref)
if ms == nil {
mmapMarkerUnknownRefs.Inc()
missingSeries[rm.Ref] = struct{}{}
continue
}
idx := uint64(ms.ref) % uint64(concurrency)
processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms}
}
case []record.RefHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayHistogramsPool.Put(v)
case []record.RefFloatHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
h.wlReplayFloatHistogramsPool.Put(v)
default:
panic(fmt.Errorf("unexpected decodedCh type: %T", d))
}
}
unknownSeriesRefs.merge(missingSeries)
if decodeErr != nil {
return decodeErr
}
// Signal termination to each worker and wait for it to close its output channel.
for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
if err := r.Err(); err != nil {
return fmt.Errorf("read records: %w", err)
}
if unknownSampleRefs.Load()+unknownHistogramRefs.Load()+mmapMarkerUnknownRefs.Load() > 0 {
h.logger.Warn(
"Unknown series references for ooo WAL replay",
"series", unknownSeriesRefs.count(),
"samples", unknownSampleRefs.Load(),
"histograms", unknownHistogramRefs.Load(),
"mmap_markers", mmapMarkerUnknownRefs.Load(),
)
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms")
counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(mmapMarkerUnknownRefs.Load()), "mmap_markers")
}
return nil
}
type errLoadWbl struct {
err error
}
func (e errLoadWbl) Error() string {
return e.err.Error()
}
func (e errLoadWbl) Cause() error {
return e.err
}
func (e errLoadWbl) Unwrap() error {
return e.err
}
type wblSubsetProcessor struct {
input chan wblSubsetProcessorInputItem
output chan []record.RefSample
histogramsOutput chan []histogramRecord
}
type wblSubsetProcessorInputItem struct {
mmappedSeries *memSeries
samples []record.RefSample
histogramSamples []histogramRecord
}
func (wp *wblSubsetProcessor) setup() {
wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []histogramRecord, 300)
wp.input = make(chan wblSubsetProcessorInputItem, 300)
}
func (wp *wblSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
for range wp.histogramsOutput {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample {
select {
case buf := <-wp.output:
return buf[:0]
default:
}
return nil
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *wblSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select {
case buf := <-wp.histogramsOutput:
return buf[:0]
default:
}
return nil
}
// processWBLSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse.
func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (map[chunks.HeadSeriesRef]struct{}, uint64, uint64) {
defer close(wp.output)
defer close(wp.histogramsOutput)
missingSeries := make(map[chunks.HeadSeriesRef]struct{})
var unknownSampleRefs, unknownHistogramRefs uint64
oooCapMax := h.opts.OutOfOrderCapMax.Load()
// We don't check for minValidTime for ooo samples.
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
for in := range wp.input {
if in.mmappedSeries != nil && in.mmappedSeries.ooo != nil {
// All samples till now have been m-mapped. Hence clear out the headChunk.
// In case some samples slipped through and went into m-map chunks because of changed
// chunk size parameters, we are not taking care of that here.
// TODO(codesome): see if there is a way to avoid duplicate m-map chunks if
// the size of ooo chunk was reduced between restart.
in.mmappedSeries.ooo.oooHeadChunk = nil
continue
}
for _, s := range in.samples {
ms := h.series.getByID(s.Ref)
if ms == nil {
unknownSampleRefs++
missingSeries[s.Ref] = struct{}{}
continue
}
ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger)
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
}
if ok {
if s.T < mint {
mint = s.T
}
if s.T > maxt {
maxt = s.T
}
}
}
select {
case wp.output <- in.samples:
default:
}
for _, s := range in.histogramSamples {
ms := h.series.getByID(s.ref)
if ms == nil {
unknownHistogramRefs++
missingSeries[s.ref] = struct{}{}
continue
}
var chunkCreated bool
var ok bool
if s.h != nil {
ok, chunkCreated, _ = ms.insert(s.t, 0, s.h, nil, h.chunkDiskMapper, oooCapMax, h.logger)
} else {
ok, chunkCreated, _ = ms.insert(s.t, 0, nil, s.fh, h.chunkDiskMapper, oooCapMax, h.logger)
}
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
}
if ok {
if s.t > maxt {
maxt = s.t
}
if s.t < mint {
mint = s.t
}
}
}
select {
case wp.histogramsOutput <- in.histogramSamples:
default:
}
}
h.updateMinOOOMaxOOOTime(mint, maxt)
return missingSeries, unknownSampleRefs, unknownHistogramRefs
}
const (
chunkSnapshotRecordTypeSeries uint8 = 1
chunkSnapshotRecordTypeTombstones uint8 = 2
chunkSnapshotRecordTypeExemplars uint8 = 3
)
type chunkSnapshotRecord struct {
ref chunks.HeadSeriesRef
lset labels.Labels
mc *memChunk
lastValue float64
lastHistogramValue *histogram.Histogram
lastFloatHistogramValue *histogram.FloatHistogram
}
func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(chunkSnapshotRecordTypeSeries)
buf.PutBE64(uint64(s.ref))
record.EncodeLabels(&buf, s.labels())
buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused.
s.Lock()
if s.headChunks == nil {
buf.PutUvarint(0)
} else {
enc := s.headChunks.chunk.Encoding()
buf.PutUvarint(1)
buf.PutBE64int64(s.headChunks.minTime)
buf.PutBE64int64(s.headChunks.maxTime)
buf.PutByte(byte(enc))
buf.PutUvarintBytes(s.headChunks.chunk.Bytes())
switch enc {
case chunkenc.EncXOR:
// Backwards compatibility for old sampleBuf which had last 4 samples.
for range 3 {
buf.PutBE64int64(0)
buf.PutBEFloat64(0)
}
buf.PutBE64int64(0)
buf.PutBEFloat64(s.lastValue)
case chunkenc.EncHistogram:
record.EncodeHistogram(&buf, s.lastHistogramValue)
default: // chunkenc.FloatHistogram.
record.EncodeFloatHistogram(&buf, s.lastFloatHistogramValue)
}
}
s.Unlock()
return buf.Get()
}
func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapshotRecord, err error) {
dec := encoding.Decbuf{B: b}
if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries {
return csr, fmt.Errorf("invalid record type %x", flag)
}
csr.ref = chunks.HeadSeriesRef(dec.Be64())
// The label set written to the disk is already sorted.
// TODO: figure out why DecodeLabels calls Sort(), and perhaps remove it.
csr.lset = d.DecodeLabels(&dec)
_ = dec.Be64int64() // Was chunkRange but now unused.
if dec.Uvarint() == 0 {
return csr, err
}
csr.mc = &memChunk{}
csr.mc.minTime = dec.Be64int64()
csr.mc.maxTime = dec.Be64int64()
enc := chunkenc.Encoding(dec.Byte())
// The underlying bytes gets re-used later, so make a copy.
chunkBytes := dec.UvarintBytes()
chunkBytesCopy := make([]byte, len(chunkBytes))
copy(chunkBytesCopy, chunkBytes)
chk, err := chunkenc.FromData(enc, chunkBytesCopy)
if err != nil {
return csr, fmt.Errorf("chunk from data: %w", err)
}
csr.mc.chunk = chk
switch enc {
case chunkenc.EncXOR:
// Backwards-compatibility for old sampleBuf which had last 4 samples.
for range 3 {
_ = dec.Be64int64()
_ = dec.Be64Float64()
}
_ = dec.Be64int64()
csr.lastValue = dec.Be64Float64()
case chunkenc.EncHistogram:
csr.lastHistogramValue = &histogram.Histogram{}
record.DecodeHistogram(&dec, csr.lastHistogramValue)
default: // chunkenc.FloatHistogram.
csr.lastFloatHistogramValue = &histogram.FloatHistogram{}
record.DecodeFloatHistogram(&dec, csr.lastFloatHistogramValue)
}
err = dec.Err()
if err != nil && len(dec.B) > 0 {
err = fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return csr, err
}
func encodeTombstonesToSnapshotRecord(tr tombstones.Reader) ([]byte, error) {
buf := encoding.Encbuf{}
buf.PutByte(chunkSnapshotRecordTypeTombstones)
b, err := tombstones.Encode(tr)
if err != nil {
return nil, fmt.Errorf("encode tombstones: %w", err)
}
buf.PutUvarintBytes(b)
return buf.Get(), nil
}
func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) {
dec := encoding.Decbuf{B: b}
if flag := dec.Byte(); flag != chunkSnapshotRecordTypeTombstones {
return nil, fmt.Errorf("invalid record type %x", flag)
}
tr, err := tombstones.Decode(dec.UvarintBytes())
if err != nil {
return tr, fmt.Errorf("decode tombstones: %w", err)
}
return tr, nil
}
const chunkSnapshotPrefix = "chunk_snapshot."
// ChunkSnapshot creates a snapshot of all the series and tombstones in the head.
// It deletes the old chunk snapshots if the chunk snapshot creation is successful.
//
// The chunk snapshot is stored in a directory named chunk_snapshot.N.M and is written
// using the WAL package. N is the last WAL segment present during snapshotting and
// M is the offset in segment N upto which data was written.
//
// The snapshot first contains all series (each in individual records and not sorted), followed by
// tombstones (a single record), and finally exemplars (>= 1 record). Exemplars are in the order they
// were written to the circular buffer.
func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
if h.wal == nil {
// If we are not storing any WAL, does not make sense to take a snapshot too.
h.logger.Warn("skipping chunk snapshotting as WAL is disabled")
return &ChunkSnapshotStats{}, nil
}
h.chunkSnapshotMtx.Lock()
defer h.chunkSnapshotMtx.Unlock()
stats := &ChunkSnapshotStats{}
wlast, woffset, err := h.wal.LastSegmentAndOffset()
if err != nil && !errors.Is(err, record.ErrNotFound) {
return stats, fmt.Errorf("get last wal segment and offset: %w", err)
}
_, cslast, csoffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot)
if err != nil && !errors.Is(err, record.ErrNotFound) {
return stats, fmt.Errorf("find last chunk snapshot: %w", err)
}
if wlast == cslast && woffset == csoffset {
// Nothing has been written to the WAL/Head since the last snapshot.
return stats, nil
}
snapshotName := chunkSnapshotDir(wlast, woffset)
cpdir := filepath.Join(h.opts.ChunkDirRoot, snapshotName)
cpdirtmp := cpdir + ".tmp"
stats.Dir = cpdir
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
return stats, fmt.Errorf("create chunk snapshot dir: %w", err)
}
cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType())
if err != nil {
return stats, fmt.Errorf("open chunk snapshot: %w", err)
}
// Ensures that an early return caused by an error doesn't leave any tmp files.
defer func() {
cp.Close()
os.RemoveAll(cpdirtmp)
}()
var (
buf []byte
recs [][]byte
)
// Add all series to the snapshot.
stripeSize := h.series.size
for i := range stripeSize {
h.series.locks[i].RLock()
for _, s := range h.series.series[i] {
start := len(buf)
buf = s.encodeToSnapshotRecord(buf)
if len(buf[start:]) == 0 {
continue // All contents discarded.
}
recs = append(recs, buf[start:])
// Flush records in 10 MB increments.
if len(buf) > 10*1024*1024 {
if err := cp.Log(recs...); err != nil {
h.series.locks[i].RUnlock()
return stats, fmt.Errorf("flush records: %w", err)
}
buf, recs = buf[:0], recs[:0]
}
}
stats.TotalSeries += len(h.series.series[i])
h.series.locks[i].RUnlock()
}
// Add tombstones to the snapshot.
tombstonesReader, err := h.Tombstones()
if err != nil {
return stats, fmt.Errorf("get tombstones: %w", err)
}
rec, err := encodeTombstonesToSnapshotRecord(tombstonesReader)
if err != nil {
return stats, fmt.Errorf("encode tombstones: %w", err)
}
recs = append(recs, rec)
// Flush remaining series records and tombstones.
if err := cp.Log(recs...); err != nil {
return stats, fmt.Errorf("flush records: %w", err)
}
buf = buf[:0]
// Add exemplars in the snapshot.
// We log in batches, with each record having upto 10000 exemplars.
// Assuming 100 bytes (overestimate) per exemplar, that's ~1MB.
maxExemplarsPerRecord := 10000
batch := make([]record.RefExemplar, 0, maxExemplarsPerRecord)
enc := record.Encoder{}
flushExemplars := func() error {
if len(batch) == 0 {
return nil
}
buf = buf[:0]
encbuf := encoding.Encbuf{B: buf}
encbuf.PutByte(chunkSnapshotRecordTypeExemplars)
enc.EncodeExemplarsIntoBuffer(batch, &encbuf)
if err := cp.Log(encbuf.Get()); err != nil {
return fmt.Errorf("log exemplars: %w", err)
}
buf, batch = buf[:0], batch[:0]
return nil
}
err = h.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
if len(batch) >= maxExemplarsPerRecord {
if err := flushExemplars(); err != nil {
return fmt.Errorf("flush exemplars: %w", err)
}
}
ms := h.series.getByHash(seriesLabels.Hash(), seriesLabels)
if ms == nil {
// It is possible that exemplar refers to some old series. We discard such exemplars.
return nil
}
batch = append(batch, record.RefExemplar{
Ref: ms.ref,
T: e.Ts,
V: e.Value,
Labels: e.Labels,
})
return nil
})
if err != nil {
return stats, fmt.Errorf("iterate exemplars: %w", err)
}
// Flush remaining exemplars.
if err := flushExemplars(); err != nil {
return stats, fmt.Errorf("flush exemplars at the end: %w", err)
}
if err := cp.Close(); err != nil {
return stats, fmt.Errorf("close chunk snapshot: %w", err)
}
if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
return stats, fmt.Errorf("rename chunk snapshot directory: %w", err)
}
if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, wlast, woffset); err != nil {
// Leftover old chunk snapshots do not cause problems down the line beyond
// occupying disk space.
// They will just be ignored since a higher chunk snapshot exists.
h.logger.Error("delete old chunk snapshots", "err", err)
}
return stats, nil
}
func chunkSnapshotDir(wlast, woffset int) string {
return fmt.Sprintf(chunkSnapshotPrefix+"%06d.%010d", wlast, woffset)
}
func (h *Head) performChunkSnapshot() error {
h.logger.Info("creating chunk snapshot")
startTime := time.Now()
stats, err := h.ChunkSnapshot()
elapsed := time.Since(startTime)
if err != nil {
return fmt.Errorf("chunk snapshot: %w", err)
}
h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir)
return nil
}
// ChunkSnapshotStats returns stats about a created chunk snapshot.
type ChunkSnapshotStats struct {
TotalSeries int
Dir string
}
// LastChunkSnapshot returns the directory name and index of the most recent chunk snapshot.
// If dir does not contain any chunk snapshots, ErrNotFound is returned.
func LastChunkSnapshot(dir string) (string, int, int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return "", 0, 0, err
}
maxIdx, maxOffset := -1, -1
maxFileName := ""
for i := range files {
fi := files[i]
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
continue
}
if !fi.IsDir() {
return "", 0, 0, fmt.Errorf("chunk snapshot %s is not a directory", fi.Name())
}
splits := strings.Split(fi.Name()[len(chunkSnapshotPrefix):], ".")
if len(splits) != 2 {
// Chunk snapshots is not in the right format, we do not care about it.
continue
}
idx, err := strconv.Atoi(splits[0])
if err != nil {
continue
}
offset, err := strconv.Atoi(splits[1])
if err != nil {
continue
}
if idx > maxIdx || (idx == maxIdx && offset > maxOffset) {
maxIdx, maxOffset = idx, offset
maxFileName = filepath.Join(dir, fi.Name())
}
}
if maxFileName == "" {
return "", 0, 0, record.ErrNotFound
}
return maxFileName, maxIdx, maxOffset, nil
}
// DeleteChunkSnapshots deletes all chunk snapshots in a directory below a given index.
func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
files, err := os.ReadDir(dir)
if err != nil {
return err
}
var errs []error
for _, fi := range files {
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
continue
}
splits := strings.Split(fi.Name()[len(chunkSnapshotPrefix):], ".")
if len(splits) != 2 {
continue
}
idx, err := strconv.Atoi(splits[0])
if err != nil {
continue
}
offset, err := strconv.Atoi(splits[1])
if err != nil {
continue
}
if idx < maxIndex || (idx == maxIndex && offset < maxOffset) {
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
errs = append(errs, err)
}
}
}
return errors.Join(errs...)
}
// loadChunkSnapshot replays the chunk snapshot and restores the Head state from it. If there was any error returned,
// it is the responsibility of the caller to clear the contents of the Head.
func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSeries, error) {
dir, snapIdx, snapOffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot)
if err != nil {
if errors.Is(err, record.ErrNotFound) {
return snapIdx, snapOffset, nil, nil
}
return snapIdx, snapOffset, nil, fmt.Errorf("find last chunk snapshot: %w", err)
}
start := time.Now()
sr, err := wlog.NewSegmentsReader(dir)
if err != nil {
return snapIdx, snapOffset, nil, fmt.Errorf("open chunk snapshot: %w", err)
}
defer func() {
if err := sr.Close(); err != nil {
h.logger.Warn("error while closing the wal segments reader", "err", err)
}
}()
var (
numSeries = 0
unknownRefs = int64(0)
concurrency = h.opts.WALReplayConcurrency
wg sync.WaitGroup
recordChan = make(chan chunkSnapshotRecord, 5*concurrency)
shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, concurrency)
errChan = make(chan error, concurrency)
refSeries map[chunks.HeadSeriesRef]*memSeries
exemplarBuf []record.RefExemplar
syms = labels.NewSymbolTable() // New table for the whole snapshot.
dec = record.NewDecoder(syms, h.logger)
)
wg.Add(concurrency)
for i := range concurrency {
go func(idx int, rc <-chan chunkSnapshotRecord) {
defer wg.Done()
defer func() {
// If there was an error, drain the channel
// to unblock the main thread.
for range rc {
}
}()
shardedRefSeries[idx] = make(map[chunks.HeadSeriesRef]*memSeries)
localRefSeries := shardedRefSeries[idx]
for csr := range rc {
series, _, err := h.getOrCreateWithOptionalID(csr.ref, csr.lset.Hash(), csr.lset, false)
if err != nil {
errChan <- err
return
}
localRefSeries[csr.ref] = series
for {
seriesID := uint64(series.ref)
lastSeriesID := h.lastSeriesID.Load()
if lastSeriesID >= seriesID || h.lastSeriesID.CompareAndSwap(lastSeriesID, seriesID) {
break
}
}
if csr.mc == nil {
continue
}
series.nextAt = csr.mc.maxTime // This will create a new chunk on append.
series.headChunks = csr.mc
series.lastValue = csr.lastValue
series.lastHistogramValue = csr.lastHistogramValue
series.lastFloatHistogramValue = csr.lastFloatHistogramValue
if value.IsStaleNaN(series.lastValue) ||
(series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) ||
(series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum)) {
h.numStaleSeries.Inc()
}
app, err := series.headChunks.chunk.Appender()
if err != nil {
errChan <- err
return
}
series.app = app
h.updateMinMaxTime(csr.mc.minTime, csr.mc.maxTime)
}
}(i, recordChan)
}
r := wlog.NewReader(sr)
var loopErr error
Outer:
for r.Next() {
select {
case err := <-errChan:
errChan <- err
break Outer
default:
}
rec := r.Record()
switch rec[0] {
case chunkSnapshotRecordTypeSeries:
numSeries++
csr, err := decodeSeriesFromChunkSnapshot(&dec, rec)
if err != nil {
loopErr = fmt.Errorf("decode series record: %w", err)
break Outer
}
recordChan <- csr
case chunkSnapshotRecordTypeTombstones:
tr, err := decodeTombstonesSnapshotRecord(rec)
if err != nil {
loopErr = fmt.Errorf("decode tombstones: %w", err)
break Outer
}
if err = tr.Iter(func(ref storage.SeriesRef, ivs tombstones.Intervals) error {
h.tombstones.AddInterval(ref, ivs...)
return nil
}); err != nil {
loopErr = fmt.Errorf("iterate tombstones: %w", err)
break Outer
}
case chunkSnapshotRecordTypeExemplars:
// Exemplars are at the end of snapshot. So all series are loaded at this point.
if len(refSeries) == 0 {
close(recordChan)
wg.Wait()
refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries)
for _, shard := range shardedRefSeries {
maps.Copy(refSeries, shard)
}
}
if !h.opts.EnableExemplarStorage || h.opts.MaxExemplars.Load() <= 0 {
// Exemplar storage is disabled.
continue Outer
}
decbuf := encoding.Decbuf{B: rec[1:]}
exemplarBuf = exemplarBuf[:0]
exemplarBuf, err = dec.ExemplarsFromBuffer(&decbuf, exemplarBuf)
if err != nil {
loopErr = fmt.Errorf("exemplars from buffer: %w", err)
break Outer
}
for _, e := range exemplarBuf {
ms, ok := refSeries[e.Ref]
if !ok {
unknownRefs++
continue
}
if err := h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{
Labels: e.Labels,
Value: e.V,
Ts: e.T,
}); err != nil {
loopErr = fmt.Errorf("add exemplar: %w", err)
break Outer
}
}
default:
// This is a record type we don't understand. It is either an old format from earlier versions,
// or a new format and the code was rolled back to old version.
loopErr = fmt.Errorf("unsupported snapshot record type 0b%b", rec[0])
break Outer
}
}
if len(refSeries) == 0 {
close(recordChan)
wg.Wait()
}
close(errChan)
var errs []error
if loopErr != nil {
errs = append(errs, fmt.Errorf("decode loop: %w", loopErr))
}
for err := range errChan {
errs = append(errs, fmt.Errorf("record processing: %w", err))
}
if err := errors.Join(errs...); err != nil {
return -1, -1, nil, err
}
if err := r.Err(); err != nil {
return -1, -1, nil, fmt.Errorf("read records: %w", err)
}
if len(refSeries) == 0 {
// We had no exemplar record, so we have to build the map here.
refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries)
for _, shard := range shardedRefSeries {
maps.Copy(refSeries, shard)
}
}
elapsed := time.Since(start)
h.logger.Info("chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String())
if unknownRefs > 0 {
h.logger.Warn("unknown series references during chunk snapshot replay", "count", unknownRefs)
}
return snapIdx, snapOffset, refSeries, nil
} | go | github | https://github.com/prometheus/prometheus | tsdb/head_wal.go |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/imx7ulp-pcc-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale i.MX7ULP Peripheral Clock Control (PCC) modules Clock Controller
maintainers:
- A.s. Dong <aisheng.dong@nxp.com>
description: |
i.MX7ULP Clock functions are under joint control of the System
Clock Generation (SCG) modules, Peripheral Clock Control (PCC)
modules, and Core Mode Controller (CMC)1 blocks
The clocking scheme provides clear separation between M4 domain
and A7 domain. Except for a few clock sources shared between two
domains, such as the System Oscillator clock, the Slow IRC (SIRC),
and and the Fast IRC clock (FIRCLK), clock sources and clock
management are separated and contained within each domain.
M4 clock management consists of SCG0, PCC0, PCC1, and CMC0 modules.
A7 clock management consists of SCG1, PCC2, PCC3, and CMC1 modules.
Note: this binding doc is only for A7 clock domain.
The Peripheral Clock Control (PCC) is responsible for clock selection,
optional division and clock gating mode for peripherals in their
respected power domain.
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell.
See include/dt-bindings/clock/imx7ulp-clock.h for the full list of
i.MX7ULP clock IDs of each module.
properties:
compatible:
enum:
- fsl,imx7ulp-pcc2
- fsl,imx7ulp-pcc3
reg:
maxItems: 1
'#clock-cells':
const: 1
clocks:
items:
- description: nic1 bus clock
- description: nic1 clock
- description: ddr clock
- description: apll pfd2
- description: apll pfd1
- description: apll pfd0
- description: usb pll
- description: system osc bus clock
- description: fast internal reference clock bus
- description: rtc osc
- description: system pll bus clock
clock-names:
items:
- const: nic1_bus_clk
- const: nic1_clk
- const: ddr_clk
- const: apll_pfd2
- const: apll_pfd1
- const: apll_pfd0
- const: upll
- const: sosc_bus_clk
- const: firc_bus_clk
- const: rosc
- const: spll_bus_clk
required:
- compatible
- reg
- '#clock-cells'
- clocks
- clock-names
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/imx7ulp-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
clock-controller@403f0000 {
compatible = "fsl,imx7ulp-pcc2";
reg = <0x403f0000 0x10000>;
#clock-cells = <1>;
clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
<&scg1 IMX7ULP_CLK_NIC1_DIV>,
<&scg1 IMX7ULP_CLK_DDR_DIV>,
<&scg1 IMX7ULP_CLK_APLL_PFD2>,
<&scg1 IMX7ULP_CLK_APLL_PFD1>,
<&scg1 IMX7ULP_CLK_APLL_PFD0>,
<&scg1 IMX7ULP_CLK_UPLL>,
<&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>,
<&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>,
<&scg1 IMX7ULP_CLK_ROSC>,
<&scg1 IMX7ULP_CLK_SPLL_BUS_CLK>;
clock-names = "nic1_bus_clk", "nic1_clk", "ddr_clk",
"apll_pfd2", "apll_pfd1", "apll_pfd0",
"upll", "sosc_bus_clk", "firc_bus_clk",
"rosc", "spll_bus_clk";
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ScaleTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ScaleTriLBijectorTest(test.TestCase):
"""Tests the correctness of the ScaleTriL bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testComputesCorrectValues(self):
shift = 1.61803398875
x = np.float32(np.array([-1, .5, 2]))
y = np.float32(np.array([[np.exp(2) + shift, 0.],
[.5, np.exp(-1) + shift]]))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Exp(),
diag_shift=shift)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
@test_util.run_in_graph_and_eager_modes
def testInvertible(self):
# Generate random inputs from an unconstrained space, with
# event size 6 to specify 3x3 triangular matrices.
batch_shape = [2, 1]
x = np.float32(np.random.randn(*(batch_shape + [6])))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Softplus(),
diag_shift=3.14159)
y = self.evaluate(b.forward(x))
self.assertAllEqual(y.shape, batch_shape + [3, 3])
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(fldj, -ildj)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package providercache
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/http/httptest"
"path/filepath"
"strings"
"testing"
"github.com/apparentlymart/go-versions/versions"
"github.com/apparentlymart/go-versions/versions/constraints"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
svchost "github.com/hashicorp/terraform-svchost"
"github.com/hashicorp/terraform-svchost/disco"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/depsfile"
"github.com/hashicorp/terraform/internal/getproviders"
)
func TestEnsureProviderVersions(t *testing.T) {
// This is a sort of hybrid between table-driven and imperative-style
// testing, because the overall sequence of steps is the same for all
// of the test cases but the setup and verification have enough different
// permutations that it ends up being more concise to express them as
// normal code.
type Test struct {
Source getproviders.Source
Prepare func(*testing.T, *Installer, *Dir)
LockFile string
Reqs getproviders.Requirements
Mode InstallMode
Check func(*testing.T, *Dir, *depsfile.Locks)
WantErr string
WantEvents func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem
}
// noProvider is just the zero value of addrs.Provider, which we're
// using in this test as the key for installer events that are not
// specific to a particular provider.
var noProvider addrs.Provider
beepProvider := addrs.MustParseProviderSourceString("example.com/foo/beep")
beepProviderDir := getproviders.PackageLocalDir("testdata/beep-provider")
fakePlatform := getproviders.Platform{OS: "bleep", Arch: "bloop"}
wrongPlatform := getproviders.Platform{OS: "wrong", Arch: "wrong"}
beepProviderHash := getproviders.HashScheme1.New("2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=")
terraformProvider := addrs.MustParseProviderSourceString("terraform.io/builtin/terraform")
// Testing a provider with invalid semver version using a registry source,
// so that we can test the behavior of the installer when it encounters
// a provider with an invalid semver version from the registry.
signedProviderPkg := createTestProvider(t, "invalidsemver", "1.2.0")
services, baseDir, closeFn := testServices(t, signedProviderPkg)
defer closeFn()
wantVersion := getproviders.MustParseVersion("1.2.0")
invalidSemverProvider := addrs.MustParseProviderSourceString("example.com/awesomesauce/invalidsemver")
invalidSemverProviderDir := getproviders.PackageHTTPURL(fmt.Sprintf("%s/pkg/awesomesauce/invalidsemver_1.2.0.zip", baseDir))
h1Hash := getproviders.HashScheme1.New("z+Ic+uNhBTO9zbYfr9tAfhYw4R+rxOq8n7ivKlCOgPI=")
zipHash := getproviders.HashSchemeZip.New("d595832bf433c79cd90b447872d250be213d443080120899e965ab2b2cce0d4b")
tests := map[string]Test{
// The registry returns a mix of valid and invalid semver,
// but we will only care about the valid ones.
"success: registry response with mix of valid and invalid semver versions": {
Source: getproviders.NewRegistrySource(services),
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
invalidSemverProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(invalidSemverProvider)
wantLock := depsfile.NewProviderLock(
invalidSemverProvider,
wantVersion,
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{h1Hash, zipHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(invalidSemverProvider)
wantEntry := &CachedProvider{
Provider: invalidSemverProvider,
Version: getproviders.MustParseVersion("1.2.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/awesomesauce/invalidsemver/1.2.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
invalidSemverProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
invalidSemverProvider: getproviders.NewPackageAuthenticationResult(3, signedProviderPkg.keyID),
},
},
},
invalidSemverProvider: {
{
Event: "QueryPackagesBegin",
Provider: invalidSemverProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: invalidSemverProvider,
Args: "1.2.0",
},
{
Event: "FetchPackageMeta",
Provider: invalidSemverProvider,
Args: "1.2.0",
},
{
Event: "FetchPackageBegin",
Provider: invalidSemverProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"1.2.0", invalidSemverProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: invalidSemverProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"1.2.0",
[]getproviders.Hash{h1Hash},
[]getproviders.Hash{zipHash},
nil,
},
},
{
Event: "FetchPackageSuccess",
Provider: invalidSemverProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"1.2.0",
filepath.Join(dir.BasePath(), "example.com/awesomesauce/invalidsemver/1.2.0/bleep_bloop"),
"self-signed",
},
},
},
}
},
},
"error: no valid version matches the constraint": {
Source: getproviders.NewRegistrySource(services),
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
invalidSemverProvider: getproviders.MustParseVersionConstraints(">= 1.5.0"),
},
WantErr: `example.com/awesomesauce/invalidsemver: no available releases match the given constraints >= 1.5.0`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
invalidSemverProvider: getproviders.MustParseVersionConstraints(">= 1.5.0"),
},
},
},
invalidSemverProvider: {
{
Event: "QueryPackagesBegin",
Provider: invalidSemverProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.5.0", false},
},
{
Event: "QueryPackagesFailure",
Provider: invalidSemverProvider,
Args: "no available releases match the given constraints >= 1.5.0",
},
},
}
},
},
"no dependencies": {
Mode: InstallNewProvidersOnly,
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 0 {
t.Errorf("unexpected cache directory entries\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 0 {
t.Errorf("unexpected provider lock entries\n%s", spew.Sdump(allLocked))
}
},
WantEvents: func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints(nil),
},
},
}
},
},
"successful initial install of one provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"2.1.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
nil,
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"successful initial install of one provider through a cold global cache": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
inst.SetGlobalCacheDir(globalCacheDir)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"2.1.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
nil,
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"successful initial install of one provider through a warm global cache but without a lock file entry": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
_, err := globalCacheDir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
nil,
)
if err != nil {
t.Fatalf("failed to populate global cache: %s", err)
}
inst.SetGlobalCacheDir(globalCacheDir)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
// Existing cache entry is ineligible for linking because
// we have no lock file checksums to compare it to.
// Instead, we install from upstream and lock with
// whatever checksums we learn in that process.
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{
"2.1.0",
beepProviderDir,
},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
nil,
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"successful initial install of one provider through a warm global cache and correct locked checksum": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
# The existing cache entry is valid only if it matches a
# checksum already recorded in the lock file.
provider "example.com/foo/beep" {
version = "2.1.0"
constraints = ">= 1.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
_, err := globalCacheDir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
nil,
)
if err != nil {
t.Fatalf("failed to populate global cache: %s", err)
}
inst.SetGlobalCacheDir(globalCacheDir)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "LinkFromCacheBegin",
Provider: beepProvider,
Args: struct {
Version string
CacheRoot string
}{
"2.1.0",
inst.globalCacheDir.BasePath(),
},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
},
},
{
Event: "LinkFromCacheSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"),
},
},
},
}
},
},
"successful initial install of one provider through a warm global cache with an incompatible checksum": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
# This is approximating the awkward situation where the lock
# file was populated by someone who installed from a location
# other than the origin registry annd so the set of checksums
# is incomplete. In this case we can't prove that our cache
# entry is valid and so we silently ignore the cache entry
# and try to install from upstream anyway, in the hope that
# this will give us an opportunity to access the origin
# registry and get a checksum that works for the current
# platform.
provider "example.com/foo/beep" {
version = "2.1.0"
constraints = ">= 1.0.0"
hashes = [
# NOTE: This is the correct checksum for the
# beepProviderDir package, but we're going to
# intentionally install from a different directory
# below so that the entry in the cache will not
# match this checksum.
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
// This is another "beep provider" package directory that
// has a different checksum than the one in beepProviderDir.
// We're mimicking the situation where the lock file was
// originally built from beepProviderDir but the local system
// is running on a different platform and so its existing
// cache entry doesn't match the checksum.
beepProviderOtherPlatformDir := getproviders.PackageLocalDir("testdata/beep-provider-other-platform")
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
_, err := globalCacheDir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderOtherPlatformDir,
},
nil,
)
if err != nil {
t.Fatalf("failed to populate global cache: %s", err)
}
inst.SetGlobalCacheDir(globalCacheDir)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{
"2.1.0",
beepProviderDir,
},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"successful initial install of one provider through a warm global cache without a lock file entry but allowing the cache to break the lock file": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
# (intentionally empty)
`,
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
_, err := globalCacheDir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
nil,
)
if err != nil {
t.Fatalf("failed to populate global cache: %s", err)
}
inst.SetGlobalCacheDir(globalCacheDir)
inst.SetGlobalCacheDirMayBreakDependencyLockFile(true)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{beepProviderHash},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "LinkFromCacheBegin",
Provider: beepProvider,
Args: struct {
Version string
CacheRoot string
}{
"2.1.0",
inst.globalCacheDir.BasePath(),
},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
nil,
},
},
{
Event: "LinkFromCacheSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"),
},
},
},
}
},
},
"failing install of one provider through a warm global cache with an incorrect locked checksum while allowing the cache to break the lock file": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
# The existing cache entry is valid only if it matches a
# checksum already recorded in the lock file, but this
# test is overriding that rule using a special setting.
provider "example.com/foo/beep" {
version = "2.1.0"
constraints = ">= 1.0.0"
hashes = [
"h1:wrong-not-matchy",
]
}
`,
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
globalCacheDirPath := tmpDir(t)
globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform)
_, err := globalCacheDir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
nil,
)
if err != nil {
t.Fatalf("failed to populate global cache: %s", err)
}
inst.SetGlobalCacheDir(globalCacheDir)
inst.SetGlobalCacheDirMayBreakDependencyLockFile(true)
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 0 {
t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
// The lock file entry hasn't changed because the cache
// entry didn't match the existing lock file entry.
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{"h1:wrong-not-matchy"},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
// The provider wasn't installed into the local cache directory
// because that would make the local cache mismatch the
// lock file.
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := (*CachedProvider)(nil)
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantErr: `doesn't match any of the checksums`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "LinkFromCacheBegin",
Provider: beepProvider,
Args: struct {
Version string
CacheRoot string
}{
"2.1.0",
inst.globalCacheDir.BasePath(),
},
},
{
Event: "LinkFromCacheFailure",
Provider: beepProvider,
Args: struct {
Version string
Error string
}{
"2.1.0",
fmt.Sprintf(
"the provider cache at %s has a copy of example.com/foo/beep 2.1.0 that doesn't match any of the checksums recorded in the dependency lock file",
dir.BasePath(),
),
},
},
},
}
},
},
"successful reinstall of one previously-locked provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "2.0.0"
constraints = ">= 2.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.0.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.0.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.0.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"2.0.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.0.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.0.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"skipped install of one previously-locked and installed provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "2.0.0"
constraints = ">= 2.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
_, err := dir.InstallPackage(
context.Background(),
getproviders.PackageMeta{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
nil,
)
if err != nil {
t.Fatalf("installation to the test dir failed: %s", err)
}
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.0.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.0.0",
},
{
Event: "ProviderAlreadyInstalled",
Provider: beepProvider,
Args: versions.Version{Major: 2, Minor: 0, Patch: 0},
},
},
}
},
},
"successful upgrade of one previously-locked provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "2.0.0"
constraints = ">= 2.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Mode: InstallUpgrades,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("2.1.0"),
getproviders.MustParseVersionConstraints(">= 2.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.1.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "2.1.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"2.1.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"2.1.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
nil,
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"2.1.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"successful install of a built-in provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{},
nil,
),
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
inst.SetBuiltInProviderTypes([]string{"terraform"})
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
terraformProvider: nil,
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
// Built-in providers are neither included in the cache
// directory nor mentioned in the lock file, because they
// are compiled directly into the Terraform executable.
if allCached := dir.AllAvailablePackages(); len(allCached) != 0 {
t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 0 {
t.Errorf("wrong number of provider lock entries; want none\n%s", spew.Sdump(allLocked))
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
terraformProvider: constraints.IntersectionSpec(nil),
},
},
},
terraformProvider: {
{
Event: "BuiltInProviderAvailable",
Provider: terraformProvider,
},
},
}
},
},
"remove no-longer-needed provider from lock file": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "1.0.0"
constraints = ">= 1.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
provider "example.com/foo/obsolete" {
version = "2.0.0"
constraints = ">= 2.0.0"
hashes = [
"no:irrelevant",
]
}
`,
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("1.0.0"),
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
// Note: intentionally no entries for example.com/foo/obsolete
// here, because it's no longer needed and therefore not
// installed.
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"1.0.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"1.0.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"1.0.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
"failed install of a non-existing built-in provider": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{},
nil,
),
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
// NOTE: We're intentionally not calling
// inst.SetBuiltInProviderTypes to make the "terraform"
// built-in provider available here, so requests for it
// should fail.
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
terraformProvider: nil,
},
WantErr: `some providers could not be installed:
- terraform.io/builtin/terraform: this Terraform release has no built-in provider named "terraform"`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
terraformProvider: constraints.IntersectionSpec(nil),
},
},
},
terraformProvider: {
{
Event: "BuiltInProviderFailure",
Provider: terraformProvider,
Args: `this Terraform release has no built-in provider named "terraform"`,
},
},
}
},
},
"failed install when a built-in provider has a version constraint": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{},
nil,
),
Prepare: func(t *testing.T, inst *Installer, dir *Dir) {
inst.SetBuiltInProviderTypes([]string{"terraform"})
},
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
WantErr: `some providers could not be installed:
- terraform.io/builtin/terraform: built-in providers do not support explicit version constraints`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
},
terraformProvider: {
{
Event: "BuiltInProviderFailure",
Provider: terraformProvider,
Args: `built-in providers do not support explicit version constraints`,
},
},
}
},
},
"locked version is excluded by new version constraint": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "1.0.0"
constraints = ">= 1.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 0 {
t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("1.0.0"),
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
},
WantErr: `some providers could not be installed:
- example.com/foo/beep: locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use terraform init -upgrade to allow selection of new versions`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", true},
},
{
Event: "QueryPackagesFailure",
Provider: beepProvider,
Args: `locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use terraform init -upgrade to allow selection of new versions`,
},
},
}
},
},
"locked version is no longer available": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("2.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "1.2.0"
constraints = ">= 1.0.0"
hashes = [
"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=",
]
}
`,
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 0 {
t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("1.2.0"),
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
},
WantErr: `some providers could not be installed:
- example.com/foo/beep: the previously-selected version 1.2.0 is no longer available`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", true},
},
{
Event: "QueryPackagesFailure",
Provider: beepProvider,
Args: `the previously-selected version 1.2.0 is no longer available`,
},
},
}
},
},
"no versions match the version constraint": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
WantErr: `some providers could not be installed:
- example.com/foo/beep: no available releases match the given constraints >= 2.0.0`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 2.0.0", false},
},
{
Event: "QueryPackagesFailure",
Provider: beepProvider,
Args: `no available releases match the given constraints >= 2.0.0`,
},
},
}
},
},
"version exists but doesn't support the current platform": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: wrongPlatform,
Location: beepProviderDir,
},
},
nil,
),
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
WantErr: `some providers could not be installed:
- example.com/foo/beep: provider example.com/foo/beep 1.0.0 is not available for bleep_bloop`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", false},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageFailure",
Provider: beepProvider,
Args: struct {
Version string
Error string
}{
"1.0.0",
"provider example.com/foo/beep 1.0.0 is not available for bleep_bloop",
},
},
},
}
},
},
"available package doesn't match locked hash": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "1.0.0"
constraints = ">= 1.0.0"
hashes = [
"h1:does-not-match",
]
}
`,
Mode: InstallNewProvidersOnly,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
WantErr: `some providers could not be installed:
- example.com/foo/beep: the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https://developer.hashicorp.com/terraform/language/files/dependency-lock#checksum-verification`,
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"1.0.0", beepProviderDir},
},
{
Event: "FetchPackageFailure",
Provider: beepProvider,
Args: struct {
Version string
Error string
}{
"1.0.0",
`the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https://developer.hashicorp.com/terraform/language/files/dependency-lock#checksum-verification`,
},
},
},
}
},
},
"force mode ignores hashes": {
Source: getproviders.NewMockSource(
[]getproviders.PackageMeta{
{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
TargetPlatform: fakePlatform,
Location: beepProviderDir,
},
},
nil,
),
LockFile: `
provider "example.com/foo/beep" {
version = "1.0.0"
constraints = ">= 1.0.0"
hashes = [
"h1:does-not-match",
]
}
`,
Mode: InstallNewProvidersForce,
Reqs: getproviders.Requirements{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) {
if allCached := dir.AllAvailablePackages(); len(allCached) != 1 {
t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached))
}
if allLocked := locks.AllProviders(); len(allLocked) != 1 {
t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked))
}
gotLock := locks.Provider(beepProvider)
wantLock := depsfile.NewProviderLock(
beepProvider,
getproviders.MustParseVersion("1.0.0"),
getproviders.MustParseVersionConstraints(">= 1.0.0"),
[]getproviders.Hash{beepProviderHash, "h1:does-not-match"},
)
if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong lock entry\n%s", diff)
}
gotEntry := dir.ProviderLatestVersion(beepProvider)
wantEntry := &CachedProvider{
Provider: beepProvider,
Version: getproviders.MustParseVersion("1.0.0"),
PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"),
}
if diff := cmp.Diff(wantEntry, gotEntry); diff != "" {
t.Errorf("wrong cache entry\n%s", diff)
}
},
WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem {
return map[addrs.Provider][]*testInstallerEventLogItem{
noProvider: {
{
Event: "PendingProviders",
Args: map[addrs.Provider]getproviders.VersionConstraints{
beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"),
},
},
{
Event: "ProvidersFetched",
Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{
beepProvider: nil,
},
},
},
beepProvider: {
{
Event: "QueryPackagesBegin",
Provider: beepProvider,
Args: struct {
Constraints string
Locked bool
}{">= 1.0.0", true},
},
{
Event: "QueryPackagesSuccess",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageMeta",
Provider: beepProvider,
Args: "1.0.0",
},
{
Event: "FetchPackageBegin",
Provider: beepProvider,
Args: struct {
Version string
Location getproviders.PackageLocation
}{"1.0.0", beepProviderDir},
},
{
Event: "ProvidersLockUpdated",
Provider: beepProvider,
Args: struct {
Version string
Local []getproviders.Hash
Signed []getproviders.Hash
Prior []getproviders.Hash
}{
"1.0.0",
[]getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="},
nil,
[]getproviders.Hash{"h1:does-not-match"},
},
},
{
Event: "FetchPackageSuccess",
Provider: beepProvider,
Args: struct {
Version string
LocalDir string
AuthResult string
}{
"1.0.0",
filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"),
"unauthenticated",
},
},
},
}
},
},
}
ctx := context.Background()
for name, test := range tests {
t.Run(name, func(t *testing.T) {
if test.Check == nil && test.WantEvents == nil && test.WantErr == "" {
t.Fatalf("invalid test: must set at least one of Check, WantEvents, or WantErr")
}
outputDir := NewDirWithPlatform(tmpDir(t), fakePlatform)
source := test.Source
if source == nil {
source = getproviders.NewMockSource(nil, nil)
}
inst := NewInstaller(outputDir, source)
if test.Prepare != nil {
test.Prepare(t, inst, outputDir)
} /* boop */
locks, lockDiags := depsfile.LoadLocksFromBytes([]byte(test.LockFile), "test.lock.hcl")
if lockDiags.HasErrors() {
t.Fatalf("invalid lock file: %s", lockDiags.Err().Error())
}
providerEvents := make(map[addrs.Provider][]*testInstallerEventLogItem)
eventsCh := make(chan *testInstallerEventLogItem)
var newLocks *depsfile.Locks
var instErr error
go func(ch chan *testInstallerEventLogItem) {
events := installerLogEventsForTests(ch)
ctx := events.OnContext(ctx)
newLocks, instErr = inst.EnsureProviderVersions(ctx, locks, test.Reqs, test.Mode)
close(eventsCh) // exits the event loop below
}(eventsCh)
for evt := range eventsCh {
// We do the event collection in the main goroutine, rather than
// running the installer itself in the main goroutine, so that
// we can safely t.Log in here without violating the testing.T
// usage rules.
if evt.Provider == (addrs.Provider{}) {
t.Logf("%s(%s)", evt.Event, spew.Sdump(evt.Args))
} else {
t.Logf("%s: %s(%s)", evt.Provider, evt.Event, spew.Sdump(evt.Args))
}
providerEvents[evt.Provider] = append(providerEvents[evt.Provider], evt)
}
if test.WantErr != "" {
if instErr == nil {
t.Errorf("succeeded; want error\nwant: %s", test.WantErr)
} else if got, want := instErr.Error(), test.WantErr; !strings.Contains(got, want) {
t.Errorf("wrong error\ngot: %s\nwant substring: %s", got, want)
}
} else if instErr != nil {
t.Errorf("unexpected error\ngot: %s", instErr.Error())
}
if test.Check != nil {
test.Check(t, outputDir, newLocks)
}
if test.WantEvents != nil {
wantEvents := test.WantEvents(inst, outputDir)
if diff := cmp.Diff(wantEvents, providerEvents, cmp.AllowUnexported(getproviders.PackageAuthenticationResult{})); diff != "" {
t.Errorf("wrong installer events\n%s", diff)
}
}
})
}
}
func TestEnsureProviderVersions_local_source(t *testing.T) {
// create filesystem source using the test provider cache dir
source := getproviders.NewFilesystemMirrorSource("testdata/cachedir")
// create a temporary workdir
tmpDirPath := t.TempDir()
// set up the installer using the temporary directory and filesystem source
platform := getproviders.Platform{OS: "linux", Arch: "amd64"}
dir := NewDirWithPlatform(tmpDirPath, platform)
installer := NewInstaller(dir, source)
tests := map[string]struct {
provider string
version string
wantHash getproviders.Hash // getproviders.NilHash if not expected to be installed
err string
}{
"install-unpacked": {
provider: "null",
version: "2.0.0",
wantHash: getproviders.HashScheme1.New("qjsREM4DqEWECD43FcPqddZ9oxCG+IaMTxvWPciS05g="),
},
"invalid-zip-file": {
provider: "null",
version: "2.1.0",
wantHash: getproviders.NilHash,
err: "zip: not a valid zip file",
},
"version-constraint-unmet": {
provider: "null",
version: "2.2.0",
wantHash: getproviders.NilHash,
err: "no available releases match the given constraints 2.2.0",
},
"missing-executable": {
provider: "missing/executable",
version: "2.0.0",
wantHash: getproviders.NilHash, // installation fails for a provider with no executable
err: "provider binary not found: could not find executable file starting with terraform-provider-executable",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
ctx := context.TODO()
provider := addrs.MustParseProviderSourceString(test.provider)
versionConstraint := getproviders.MustParseVersionConstraints(test.version)
version := getproviders.MustParseVersion(test.version)
reqs := getproviders.Requirements{
provider: versionConstraint,
}
newLocks, err := installer.EnsureProviderVersions(ctx, depsfile.NewLocks(), reqs, InstallNewProvidersOnly)
gotProviderlocks := newLocks.AllProviders()
wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{
provider: depsfile.NewProviderLock(
provider,
version,
getproviders.MustParseVersionConstraints("= 2.0.0"),
[]getproviders.Hash{
test.wantHash,
},
),
}
if test.wantHash == getproviders.NilHash {
wantProviderLocks = map[addrs.Provider]*depsfile.ProviderLock{}
}
if diff := cmp.Diff(wantProviderLocks, gotProviderlocks, depsfile.ProviderLockComparer); diff != "" {
t.Errorf("wrong selected\n%s", diff)
}
if test.err == "" && err == nil {
return
}
switch err := err.(type) {
case InstallerError:
providerError, ok := err.ProviderErrors[provider]
if !ok {
t.Fatalf("did not get error for provider %s", provider)
}
if got := providerError.Error(); got != test.err {
t.Fatalf("wrong result\ngot: %s\nwant: %s\n", got, test.err)
}
default:
t.Fatalf("wrong error type. Expected InstallerError, got %T", err)
}
})
}
}
// This test only verifies protocol errors and does not try for successfull
// installation (at the time of writing, the test files aren't signed so the
// signature verification fails); that's left to the e2e tests.
func TestEnsureProviderVersions_protocol_errors(t *testing.T) {
signedProviderPkg := createTestProvider(t, "happycloud", "1.2.0")
services, _, close := testServices(t, signedProviderPkg)
source := getproviders.NewRegistrySource(services)
defer close()
// create a temporary workdir
tmpDirPath := t.TempDir()
version0 := getproviders.MustParseVersionConstraints("0.1.0") // supports protocol version 1.0
version1 := getproviders.MustParseVersion("1.2.0") // this is the expected result in tests with a match
version2 := getproviders.MustParseVersionConstraints("2.0") // supports protocol version 99
// set up the installer using the temporary directory and mock source
platform := getproviders.Platform{OS: "gameboy", Arch: "lr35902"}
dir := NewDirWithPlatform(tmpDirPath, platform)
installer := NewInstaller(dir, source)
tests := map[string]struct {
provider addrs.Provider
inputVersion getproviders.VersionConstraints
wantVersion getproviders.Version
}{
"too old": {
addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"),
version0,
version1,
},
"too new": {
addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"),
version2,
version1,
},
"unsupported": {
addrs.MustParseProviderSourceString("example.com/weaksauce/unsupported-protocol"),
version0,
getproviders.UnspecifiedVersion,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
reqs := getproviders.Requirements{
test.provider: test.inputVersion,
}
ctx := context.TODO()
_, err := installer.EnsureProviderVersions(ctx, depsfile.NewLocks(), reqs, InstallNewProvidersOnly)
switch err := err.(type) {
case nil:
t.Fatalf("expected error, got success")
case InstallerError:
providerError, ok := err.ProviderErrors[test.provider]
if !ok {
t.Fatalf("did not get error for provider %s", test.provider)
}
switch providerError := providerError.(type) {
case getproviders.ErrProtocolNotSupported:
if !providerError.Suggestion.Same(test.wantVersion) {
t.Fatalf("wrong result\ngot: %s\nwant: %s\n", providerError.Suggestion, test.wantVersion)
}
default:
t.Fatalf("wrong error type. Expected ErrProtocolNotSupported, got %T", err)
}
default:
t.Fatalf("wrong error type. Expected InstallerError, got %T", err)
}
})
}
}
// testServices starts up a local HTTP server running a fake provider registry
// service and returns a service discovery object pre-configured to consider
// the host "example.com" to be served by the fake registry service.
//
// The returned discovery object also knows the hostname "not.example.com"
// which does not have a provider registry at all and "too-new.example.com"
// which has a "providers.v99" service that is inoperable but could be useful
// to test the error reporting for detecting an unsupported protocol version.
// It also knows fails.example.com but it refers to an endpoint that doesn't
// correctly speak HTTP, to simulate a protocol error.
//
// The second return value is a function to call at the end of a test function
// to shut down the test server. After you call that function, the discovery
// object becomes useless.
func testServices(t *testing.T, signedProviderPkg *providerPkg) (services *disco.Disco, baseURL string, cleanup func()) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fakeRegistryHandler(signedProviderPkg, w, r)
}))
services = disco.New()
services.ForceHostServices(svchost.Hostname("example.com"), map[string]interface{}{
"providers.v1": server.URL + "/providers/v1/",
})
services.ForceHostServices(svchost.Hostname("not.example.com"), map[string]interface{}{})
services.ForceHostServices(svchost.Hostname("too-new.example.com"), map[string]interface{}{
// This service doesn't actually work; it's here only to be
// detected as "too new" by the discovery logic.
"providers.v99": server.URL + "/providers/v99/",
})
services.ForceHostServices(svchost.Hostname("fails.example.com"), map[string]interface{}{
"providers.v1": server.URL + "/fails-immediately/",
})
// We'll also permit registry.terraform.io here just because it's our
// default and has some unique features that are not allowed on any other
// hostname. It behaves the same as example.com, which should be preferred
// if you're not testing something specific to the default registry in order
// to ensure that most things are hostname-agnostic.
services.ForceHostServices(svchost.Hostname("registry.terraform.io"), map[string]interface{}{
"providers.v1": server.URL + "/providers/v1/",
})
return services, server.URL, func() {
server.Close()
}
}
func fakeRegistryHandler(providerPackage *providerPkg, resp http.ResponseWriter, req *http.Request) {
path := req.URL.EscapedPath()
if strings.HasPrefix(path, "/fails-immediately/") {
// Here we take over the socket and just close it immediately, to
// simulate one possible way a server might not be an HTTP server.
hijacker, ok := resp.(http.Hijacker)
if !ok {
// Not hijackable, so we'll just fail normally.
// If this happens, tests relying on this will fail.
resp.WriteHeader(500)
resp.Write([]byte(`cannot hijack`))
return
}
conn, _, err := hijacker.Hijack()
if err != nil {
resp.WriteHeader(500)
resp.Write([]byte(`hijack failed`))
return
}
conn.Close()
return
}
if strings.HasPrefix(path, "/pkg/") {
switch path {
case fmt.Sprintf("/pkg/awesomesauce/%s.zip", providerPackage.filePrefix):
resp.Write(providerPackage.zipContent)
case fmt.Sprintf("/pkg/awesomesauce/%s_SHA256SUMS", providerPackage.filePrefix):
resp.Write(providerPackage.shaSumContent)
case fmt.Sprintf("/pkg/awesomesauce/%s_SHA256SUMS.sig", providerPackage.filePrefix):
resp.Write(providerPackage.sigContent)
default:
resp.WriteHeader(404)
resp.Write([]byte("unknown package file download"))
}
return
}
if !strings.HasPrefix(path, "/providers/v1/") {
resp.WriteHeader(404)
resp.Write([]byte(`not a provider registry endpoint`))
return
}
pathParts := strings.Split(path, "/")[3:]
if len(pathParts) < 2 {
resp.WriteHeader(404)
resp.Write([]byte(`unexpected number of path parts`))
return
}
log.Printf("[TRACE] fake provider registry request for %#v", pathParts)
if len(pathParts) == 2 {
switch pathParts[0] + "/" + pathParts[1] {
case "-/legacy":
// NOTE: This legacy lookup endpoint is specific to
// registry.terraform.io and not expected to work on any other
// registry host.
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write([]byte(`{"namespace":"legacycorp"}`))
default:
resp.WriteHeader(404)
resp.Write([]byte(`unknown namespace or provider type for direct lookup`))
}
}
if len(pathParts) < 3 {
resp.WriteHeader(404)
resp.Write([]byte(`unexpected number of path parts`))
return
}
if pathParts[2] == "versions" {
if len(pathParts) != 3 {
resp.WriteHeader(404)
resp.Write([]byte(`extraneous path parts`))
return
}
switch pathParts[0] + "/" + pathParts[1] {
case "awesomesauce/happycloud":
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
// Note that these version numbers are intentionally misordered
// so we can test that the client-side code places them in the
// correct order (lowest precedence first).
resp.Write([]byte(`{"versions":[{"version":"0.1.0","protocols":["1.0"]},{"version":"2.0.0","protocols":["99.0"]},{"version":"1.2.0","protocols":["5.0"]}, {"version":"1.0.0","protocols":["5.0"]}]}`))
case "awesomesauce/invalidsemver":
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
// This response includes an invalid semver version to test that the client properly ignores it
resp.Write([]byte(`{"versions":[{"version":"1.2.0","protocols":["5.0"]},{"version":"2.0.not-a-semver","protocols":["5.0"]},{"version":"1.0.0","protocols":["5.0"]}]}`))
case "weaksauce/unsupported-protocol":
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write([]byte(`{"versions":[{"version":"0.1.0","protocols":["0.1"]}]}`))
case "weaksauce/no-versions":
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write([]byte(`{"versions":[]}`))
default:
resp.WriteHeader(404)
resp.Write([]byte(`unknown namespace or provider type`))
}
return
}
if len(pathParts) == 6 && pathParts[3] == "download" {
switch fmt.Sprintf("%s/%s", pathParts[0], pathParts[1]) {
case "awesomesauce/happycloud":
if pathParts[4] == "nonexist" {
resp.WriteHeader(404)
resp.Write([]byte(`unsupported OS`))
return
}
version := pathParts[2]
body := map[string]interface{}{
"protocols": []string{"99.0"},
"os": pathParts[4],
"arch": pathParts[5],
"filename": "happycloud_" + version + ".zip",
"shasum": "000000000000000000000000000000000000000000000000000000000000f00d",
"download_url": "/pkg/awesomesauce/happycloud_" + version + ".zip",
"shasums_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS",
"shasums_signature_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS.sig",
"signing_keys": map[string]interface{}{
"gpg_public_keys": []map[string]interface{}{
{
"ascii_armor": getproviders.HashicorpPublicKey,
},
},
},
}
enc, err := json.Marshal(body)
if err != nil {
resp.WriteHeader(500)
resp.Write([]byte("failed to encode body"))
}
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write(enc)
case "awesomesauce/invalidsemver":
version := pathParts[2]
fileSha := bytes.Split(providerPackage.shaSumContent, []byte(" "))
if len(fileSha) < 2 {
resp.WriteHeader(500)
resp.Write([]byte("invalid split size: failed to encode body"))
}
body := map[string]any{
"protocols": []string{"5.0"},
"os": pathParts[4],
"arch": pathParts[5],
"filename": fmt.Sprintf("%s_%s.zip", pathParts[1], version),
"shasum": string(fileSha[0]),
"download_url": fmt.Sprintf("/pkg/awesomesauce/%s_%s.zip", pathParts[1], version),
"shasums_url": fmt.Sprintf("/pkg/awesomesauce/%s_%s_SHA256SUMS", pathParts[1], version),
"shasums_signature_url": fmt.Sprintf("/pkg/awesomesauce/%s_%s_SHA256SUMS.sig", pathParts[1], version),
"signing_keys": map[string]any{
"gpg_public_keys": []map[string]any{
{
"ascii_armor": providerPackage.key.String(),
},
},
},
}
enc, err := json.Marshal(body)
if err != nil {
resp.WriteHeader(500)
resp.Write([]byte("failed to encode body"))
}
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write(enc)
case "weaksauce/unsupported-protocol":
var protocols []string
version := pathParts[2]
switch version {
case "0.1.0":
protocols = []string{"1.0"}
case "2.0.0":
protocols = []string{"99.0"}
default:
protocols = []string{"5.0"}
}
body := map[string]interface{}{
"protocols": protocols,
"os": pathParts[4],
"arch": pathParts[5],
"filename": "happycloud_" + version + ".zip",
"shasum": "000000000000000000000000000000000000000000000000000000000000f00d",
"download_url": "/pkg/awesomesauce/happycloud_" + version + ".zip",
"shasums_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS",
"shasums_signature_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS.sig",
"signing_keys": map[string]interface{}{
"gpg_public_keys": []map[string]interface{}{
{
"ascii_armor": getproviders.HashicorpPublicKey,
},
},
},
}
enc, err := json.Marshal(body)
if err != nil {
resp.WriteHeader(500)
resp.Write([]byte("failed to encode body"))
}
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write(enc)
default:
resp.WriteHeader(404)
resp.Write([]byte(`unknown namespace/provider/version/architecture`))
}
return
}
resp.WriteHeader(404)
resp.Write([]byte(`unrecognized path scheme`))
}
// In order to be able to compare the recorded temp dir paths, we need to
// normalize the path to match what the installer would report.
func tmpDir(t *testing.T) string {
unlinked, err := filepath.EvalSymlinks(t.TempDir())
if err != nil {
t.Fatal(err)
}
return filepath.Clean(unlinked)
} | go | github | https://github.com/hashicorp/terraform | internal/providercache/installer_test.go |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-18 23:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0023_auto_20160819_0709'),
]
operations = [
migrations.AlterField(
model_name='event',
name='region',
field=models.CharField(choices=[('Mie', '三重県'), ('Okinawa', '沖縄県'), ('Toyama', '富山県'), ('Hokkaido', '北海道'), ('Shizuoka', '静岡県'), ('Yamanashi', '山梨県'), ('Tokushima', '徳島県'), ('Shiga', '滋賀県'), ('Tottori', '鳥取県'), ('Shimane', '島根県'), ('Gunnma', '群馬県'), ('Aichi', '愛知県'), ('Miyagi', '宮城県'), ('Tochigi', '栃木県'), ('Fukushima', '福島県'), ('Miyazaki', '宮崎県'), ('Yamaguchi', '山口県'), ('Yamagata', '山形県'), ('Fukuoka', '福岡県'), ('Saga', '佐賀県'), ('Kanagawa', '神奈川県'), ('Ishikawa', '石川県'), ('Nara', '奈良県'), ('Ooita', '大分県'), ('Ibaraki', '茨城県'), ('Kouchi', '高知県'), ('Tokyo', '東京都'), ('Fukui', '福井県'), ('Hyogo', '兵庫県'), ('Kyoto', '京都府'), ('Kumamoto', '熊本県'), ('Nagano', '長野県'), ('Gifu', '岐阜県'), ('Akita', '秋田県'), ('Kagoshima', '鹿児島県'), ('Osaka', '大阪府'), ('Wakayama', '和歌山県'), ('Kagawa', '香川県'), ('Chiba', '千葉県'), ('Aomori', '青森県'), ('Iwate', '岩手県'), ('Hiroshima', '広島県'), ('Niigata', '新潟県'), ('Nagasaki', '長崎県'), ('Okayama', '岡山県'), ('Saitama', '埼玉県'), ('Ehime', '愛媛県')], max_length=10),
),
] | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/argumentsUsedInClassFieldInitializerOrStaticInitializationBlock.ts] ////
//// [argumentsUsedInClassFieldInitializerOrStaticInitializationBlock.ts]
function A() {
return class T {
a = arguments
}
}
function A1() {
return new class T {
a = arguments
}
}
function B() {
return class T {
a = { b: arguments }
}
}
function B1() {
return new class T {
a = { b: arguments }
}
}
function C() {
return class T {
a = function () { arguments }
}
}
function D() {
return class T {
a = () => arguments // should error
}
}
function D1() {
return class T {
a = () => {
arguments; // should error
const b = () => {
return arguments; // should error
}
function f() {
return arguments; // ok
}
}
}
}
function D2() {
return class {
constructor() {
arguments; // ok
}
get foo() {
return arguments; // ok
}
set foo(foo: any) {
arguments; // ok
}
bar() {
arguments; // ok
}
[Symbol.iterator]() {
arguments; // ok
}
}
}
function D3() {
return class T {
static {
arguments; // should error
while(1) {
arguments // should error
}
}
}
}
function D4() {
return class T {
static {
function f() {
arguments; // ok
}
}
}
}
function D5() {
return class T {
a = (() => { return arguments; })() // should error
}
}
function D6() {
return class T {
a = (x = arguments) => {} // should error
}
}
function D7() {
return class T {
a(x = arguments){ // ok
}
}
}
//// [argumentsUsedInClassFieldInitializerOrStaticInitializationBlock.js]
"use strict";
function A() {
return class T {
constructor() {
this.a = arguments;
}
};
}
function A1() {
return new class T {
constructor() {
this.a = arguments;
}
};
}
function B() {
return class T {
constructor() {
this.a = { b: arguments };
}
};
}
function B1() {
return new class T {
constructor() {
this.a = { b: arguments };
}
};
}
function C() {
return class T {
constructor() {
this.a = function () { arguments; };
}
};
}
function D() {
return class T {
constructor() {
this.a = () => arguments; // should error
}
};
}
function D1() {
return class T {
constructor() {
this.a = () => {
arguments; // should error
const b = () => {
return arguments; // should error
};
function f() {
return arguments; // ok
}
};
}
};
}
function D2() {
return class {
constructor() {
arguments; // ok
}
get foo() {
return arguments; // ok
}
set foo(foo) {
arguments; // ok
}
bar() {
arguments; // ok
}
[Symbol.iterator]() {
arguments; // ok
}
};
}
function D3() {
var _a;
return _a = class T {
},
(() => {
arguments; // should error
while (1) {
arguments; // should error
}
})(),
_a;
}
function D4() {
var _a;
return _a = class T {
},
(() => {
function f() {
arguments; // ok
}
})(),
_a;
}
function D5() {
return class T {
constructor() {
this.a = (() => { return arguments; })(); // should error
}
};
}
function D6() {
return class T {
constructor() {
this.a = (x = arguments) => { }; // should error
}
};
}
function D7() {
return class T {
a(x = arguments) {
}
};
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/argumentsUsedInClassFieldInitializerOrStaticInitializationBlock.js |
#!/usr/bin/env bash
source ../collection/setup.sh
set -eux
export ANSIBLE_DEPRECATION_WARNINGS=1
export ANSIBLE_COLLECTIONS_PATH="${WORK_DIR}"
export ANSIBLE_STRATEGY=ns.col.external
output="$(ansible localhost -m debug 2>&1 | tee -a /dev/stderr)"
if [[ "${output}" != *"Use of strategy plugins not included in ansible.builtin"* ]]; then
echo 'ERROR: Did not find deprecation warning for removal of strategy plugins'
exit 1
fi | unknown | github | https://github.com/ansible/ansible | test/integration/targets/strategy-external/runme.sh |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package planfile
import (
"archive/zip"
"fmt"
"os"
"time"
"github.com/hashicorp/terraform/internal/configs/configload"
"github.com/hashicorp/terraform/internal/depsfile"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states/statefile"
)
type CreateArgs struct {
// ConfigSnapshot is a snapshot of the configuration that the plan
// was created from.
ConfigSnapshot *configload.Snapshot
// PreviousRunStateFile is a representation of the state snapshot we used
// as the original input when creating this plan, containing the same
// information as recorded at the end of the previous apply except for
// upgrading managed resource instance data to the provider's latest
// schema versions.
PreviousRunStateFile *statefile.File
// BaseStateFile is a representation of the state snapshot we used to
// create the plan, which is the result of asking the providers to refresh
// all previously-stored objects to match the current situation in the
// remote system. (If this plan was created with refreshing disabled,
// this should be the same as PreviousRunStateFile.)
StateFile *statefile.File
// Plan records the plan itself, which is the main artifact inside a
// saved plan file.
Plan *plans.Plan
// DependencyLocks records the dependency lock information that we
// checked prior to creating the plan, so we can make sure that all of the
// same dependencies are still available when applying the plan.
DependencyLocks *depsfile.Locks
}
// Create creates a new plan file with the given filename, overwriting any
// file that might already exist there.
//
// A plan file contains both a snapshot of the configuration and of the latest
// state file in addition to the plan itself, so that Terraform can detect
// if the world has changed since the plan was created and thus refuse to
// apply it.
func Create(filename string, args CreateArgs) error {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
zw := zip.NewWriter(f)
defer zw.Close()
// tfplan file
{
w, err := zw.CreateHeader(&zip.FileHeader{
Name: tfplanFilename,
Method: zip.Deflate,
Modified: time.Now(),
})
if err != nil {
return fmt.Errorf("failed to create tfplan file: %s", err)
}
err = writeTfplan(args.Plan, w)
if err != nil {
return fmt.Errorf("failed to write plan: %s", err)
}
}
// tfstate file
{
w, err := zw.CreateHeader(&zip.FileHeader{
Name: tfstateFilename,
Method: zip.Deflate,
Modified: time.Now(),
})
if err != nil {
return fmt.Errorf("failed to create embedded tfstate file: %s", err)
}
err = statefile.Write(args.StateFile, w)
if err != nil {
return fmt.Errorf("failed to write state snapshot: %s", err)
}
}
// tfstate-prev file
{
w, err := zw.CreateHeader(&zip.FileHeader{
Name: tfstatePreviousFilename,
Method: zip.Deflate,
Modified: time.Now(),
})
if err != nil {
return fmt.Errorf("failed to create embedded tfstate-prev file: %s", err)
}
err = statefile.Write(args.PreviousRunStateFile, w)
if err != nil {
return fmt.Errorf("failed to write previous state snapshot: %s", err)
}
}
// tfconfig directory
{
err := writeConfigSnapshot(args.ConfigSnapshot, zw)
if err != nil {
return fmt.Errorf("failed to write config snapshot: %s", err)
}
}
// .terraform.lock.hcl file, containing dependency lock information
if args.DependencyLocks != nil { // (this was a later addition, so not all callers set it, but main callers should)
src, diags := depsfile.SaveLocksToBytes(args.DependencyLocks)
if diags.HasErrors() {
return fmt.Errorf("failed to write embedded dependency lock file: %s", diags.Err().Error())
}
w, err := zw.CreateHeader(&zip.FileHeader{
Name: dependencyLocksFilename,
Method: zip.Deflate,
Modified: time.Now(),
})
if err != nil {
return fmt.Errorf("failed to create embedded dependency lock file: %s", err)
}
_, err = w.Write(src)
if err != nil {
return fmt.Errorf("failed to write embedded dependency lock file: %s", err)
}
}
return nil
} | go | github | https://github.com/hashicorp/terraform | internal/plans/planfile/writer.go |
//// [tests/cases/compiler/assignToFn.ts] ////
//// [assignToFn.ts]
namespace M {
interface I {
f(n:number):boolean;
}
var x:I={ f:function(n) { return true; } };
x.f="hello";
}
//// [assignToFn.js]
"use strict";
var M;
(function (M) {
var x = { f: function (n) { return true; } };
x.f = "hello";
})(M || (M = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/assignToFn.js |
"""
Tests for the stats.mstats module (support for masked arrays)
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from common_tests import check_named_results
from numpy.testing import TestCase, run_module_suite
from numpy.testing.decorators import skipif
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_raises, assert_array_equal)
class TestMquantiles(TestCase):
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.hmean(a, dtype=np.float96)
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),
dtype=np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr(TestCase):
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau(self):
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
res = mstats.kendalltau(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming(TestCase):
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
class TestMoments(TestCase):
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_almost_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047,10)
y = mstats.kurtosis(self.testcase,0,0)
assert_almost_equal(y,1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
def test_mode_modifies_input(self):
# regression test for gh-6428: mode(..., axis=None) may not modify
# the input array
im = np.zeros((100, 100))
im[:50, :] += 1
im[:, :50] += 1
cp = im.copy()
a = mstats.mode(im, None)
assert_equal(im, cp)
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
y = mstats.signaltonoise(self.testcase)
assert_almost_equal(y, 2.236067977)
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc(TestCase):
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
result,4)
def test_kstwosamp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter,spring,summer,fall) = x.T
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),
(0.1818,0.9892))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),
(0.1469,0.7734))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),
(0.1818,0.6744))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)
assert_almost_equal(slope, 0.19644990055858422)
assert_almost_equal(intercept, 10.211269918932341)
# test for namedtuple attributes
res = mstats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes, ma=True)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0,1,100,1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2,-1,0,1,2,3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
def test_result_attributes(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
res = mstats.mannwhitneyu(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
#TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2),
mask=[[1, 1, 1], [0, 0, 0]])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_rel(outcome[:, 0], outcome[:, 1]),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_rel([np.nan, np.nan], [1.0, 2.0]),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),
(np.nan, np.nan))
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
# Check equal_var
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_ind(outcome[:, 0], outcome[:, 1]),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_ind([np.nan, np.nan], [1.0, 2.0]),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),
(np.nan, np.nan))
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
equal_var=False),
(np.nan, np.nan))
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_1samp(outcome, 0.0),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_1samp((np.nan, np.nan), 0.0),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_1samp([0, 0, 0], 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_1samp([0, 0, 0], 0),
(np.nan, np.nan))
class TestCompareWithStats(TestCase):
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.ones(len(x) + 5) * 1e16
ym = np.ones(len(y) + 5) * 1e16
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.ones((n, nx)) * np.nan
y = np.ones((n, nx)) * np.nan
xm = np.ones((n+5, nx)) * np.nan
ym = np.ones((n+5, nx)) * np.nan
for i in range(nx):
x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.linregress(x, y)
res2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(res1), np.asarray(res2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_signaltonoise(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.signaltonoise(x)
rm = stats.mstats.signaltonoise(xm)
assert_almost_equal(r, rm, 10)
r = stats.signaltonoise(y)
rm = stats.mstats.signaltonoise(ym)
assert_almost_equal(r, rm, 10)
def test_betai(self):
np.random.seed(12345)
for i in range(10):
a = np.random.rand() * 5.
b = np.random.rand() * 200.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
assert_equal(stats.betai(a, b, 0.), 0.)
assert_equal(stats.betai(a, b, 1.), 1.)
assert_equal(stats.mstats.betai(a, b, 0.), 0.)
assert_equal(stats.mstats.betai(a, b, 1.), 1.)
x = np.random.rand()
assert_almost_equal(stats.betai(a, b, x),
stats.mstats.betai(a, b, x), decimal=13)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
#reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
#validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
#compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5,4)
am = np.ma.array(a)
r = stats.sem(a,ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x),stats.mstats.tmin(xm))
assert_equal(stats.tmin(y),stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x,lowerlimit=-1.),
stats.mstats.tmin(xm,lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y,lowerlimit=-1.),
stats.mstats.tmin(ym,lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x,y)
zm = stats.mstats.zmap(xm,ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)
assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)
assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),
stats.mstats.tsem(xm,limits=(-2.,2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0], rm[0], rtol=1e-15)
# TODO this test is not performed as it is a known issue that
# mstats returns a slightly different p-value what is a bit
# strange is that other tests like test_maskedarray_input don't
# fail!
#~ assert_almost_equal(r[1], rm[1])
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0],rm[0][0])
assert_equal(r[0][1],rm[0][1])
def test_normaltest(self):
np.seterr(over='raise')
for n in self.get_n():
if n > 8:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')
tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
if __name__ == "__main__":
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
/*
* alloc.c - specialized allocator for internal objects
*
* Copyright (C) 2006 Linus Torvalds
*
* The standard malloc/free wastes too much space for objects, partly because
* it maintains all the allocation infrastructure, but even more because it ends
* up with maximal alignment because it doesn't know what the object alignment
* for the new allocation is.
*/
#include "git-compat-util.h"
#include "object.h"
#include "blob.h"
#include "tree.h"
#include "commit.h"
#include "repository.h"
#include "tag.h"
#include "alloc.h"
#define BLOCKING 1024
union any_object {
struct object object;
struct blob blob;
struct tree tree;
struct commit commit;
struct tag tag;
};
struct alloc_state {
int nr; /* number of nodes left in current allocation */
void *p; /* first free node in current allocation */
/* bookkeeping of allocations */
void **slabs;
int slab_nr, slab_alloc;
};
struct alloc_state *alloc_state_alloc(void)
{
return xcalloc(1, sizeof(struct alloc_state));
}
void alloc_state_free_and_null(struct alloc_state **s_)
{
struct alloc_state *s = *s_;
if (!s)
return;
while (s->slab_nr > 0) {
s->slab_nr--;
free(s->slabs[s->slab_nr]);
}
FREE_AND_NULL(s->slabs);
FREE_AND_NULL(*s_);
}
static inline void *alloc_node(struct alloc_state *s, size_t node_size)
{
void *ret;
if (!s->nr) {
s->nr = BLOCKING;
s->p = xmalloc(BLOCKING * node_size);
ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc);
s->slabs[s->slab_nr++] = s->p;
}
s->nr--;
ret = s->p;
s->p = (char *)s->p + node_size;
memset(ret, 0, node_size);
return ret;
}
void *alloc_blob_node(struct repository *r)
{
struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob));
b->object.type = OBJ_BLOB;
return b;
}
void *alloc_tree_node(struct repository *r)
{
struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree));
t->object.type = OBJ_TREE;
return t;
}
void *alloc_tag_node(struct repository *r)
{
struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag));
t->object.type = OBJ_TAG;
return t;
}
void *alloc_object_node(struct repository *r)
{
struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object));
obj->type = OBJ_NONE;
return obj;
}
/*
* The returned count is to be used as an index into commit slabs,
* that are *NOT* maintained per repository, and that is why a single
* global counter is used.
*/
static unsigned int alloc_commit_index(void)
{
static unsigned int parsed_commits_count;
return parsed_commits_count++;
}
void init_commit_node(struct commit *c)
{
c->object.type = OBJ_COMMIT;
c->index = alloc_commit_index();
}
void *alloc_commit_node(struct repository *r)
{
struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit));
init_commit_node(c);
return c;
} | c | github | https://github.com/git/git | alloc.c |
import os
import json
import shutil
from oeqa.core.utils.test import getCaseFile, getCaseMethod
def get_package_manager(d, root_path):
"""
Returns an OE package manager that can install packages in root_path.
"""
from oe.package_manager import RpmPM, OpkgPM, DpkgPM
pkg_class = d.getVar("IMAGE_PKGTYPE")
if pkg_class == "rpm":
pm = RpmPM(d,
root_path,
d.getVar('TARGET_VENDOR'))
pm.create_configs()
elif pkg_class == "ipk":
pm = OpkgPM(d,
root_path,
d.getVar("IPKGCONF_TARGET"),
d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
elif pkg_class == "deb":
pm = DpkgPM(d,
root_path,
d.getVar('PACKAGE_ARCHS'),
d.getVar('DPKG_ARCH'))
pm.write_index()
pm.update()
return pm
def find_packages_to_extract(test_suite):
"""
Returns packages to extract required by runtime tests.
"""
from oeqa.core.utils.test import getSuiteCasesFiles
needed_packages = {}
files = getSuiteCasesFiles(test_suite)
for f in set(files):
json_file = _get_json_file(f)
if json_file:
needed_packages.update(_get_needed_packages(json_file))
return needed_packages
def _get_json_file(module_path):
"""
Returns the path of the JSON file for a module, empty if doesn't exitst.
"""
json_file = '%s.json' % module_path.rsplit('.', 1)[0]
if os.path.isfile(module_path) and os.path.isfile(json_file):
return json_file
else:
return ''
def _get_needed_packages(json_file, test=None):
"""
Returns a dict with needed packages based on a JSON file.
If a test is specified it will return the dict just for that test.
"""
needed_packages = {}
with open(json_file) as f:
test_packages = json.load(f)
for key,value in test_packages.items():
needed_packages[key] = value
if test:
if test in needed_packages:
needed_packages = needed_packages[test]
else:
needed_packages = {}
return needed_packages
def extract_packages(d, needed_packages):
"""
Extract packages that will be needed during runtime.
"""
import bb
import oe.path
extracted_path = d.getVar('TEST_EXTRACTED_DIR')
for key,value in needed_packages.items():
packages = ()
if isinstance(value, dict):
packages = (value, )
elif isinstance(value, list):
packages = value
else:
bb.fatal('Failed to process needed packages for %s; '
'Value must be a dict or list' % key)
for package in packages:
pkg = package['pkg']
rm = package.get('rm', False)
extract = package.get('extract', True)
if extract:
#logger.debug(1, 'Extracting %s' % pkg)
dst_dir = os.path.join(extracted_path, pkg)
# Same package used for more than one test,
# don't need to extract again.
if os.path.exists(dst_dir):
continue
# Extract package and copy it to TEST_EXTRACTED_DIR
pkg_dir = _extract_in_tmpdir(d, pkg)
oe.path.copytree(pkg_dir, dst_dir)
shutil.rmtree(pkg_dir)
else:
#logger.debug(1, 'Copying %s' % pkg)
_copy_package(d, pkg)
def _extract_in_tmpdir(d, pkg):
""""
Returns path to a temp directory where the package was
extracted without dependencies.
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
pm = get_package_manager(d, pkg_path)
extract_dir = pm.extract(pkg)
shutil.rmtree(pkg_path)
return extract_dir
def _copy_package(d, pkg):
"""
Copy the RPM, DEB or IPK package to dst_dir
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
dst_dir = d.getVar('TEST_PACKAGED_DIR')
pm = get_package_manager(d, pkg_path)
pkg_info = pm.package_info(pkg)
file_path = pkg_info[pkg]['filepath']
shutil.copy2(file_path, dst_dir)
shutil.rmtree(pkg_path)
def install_package(test_case):
"""
Installs package in DUT if required.
"""
needed_packages = test_needs_package(test_case)
if needed_packages:
_install_uninstall_packages(needed_packages, test_case, True)
def uninstall_package(test_case):
"""
Uninstalls package in DUT if required.
"""
needed_packages = test_needs_package(test_case)
if needed_packages:
_install_uninstall_packages(needed_packages, test_case, False)
def test_needs_package(test_case):
"""
Checks if a test case requires to install/uninstall packages.
"""
test_file = getCaseFile(test_case)
json_file = _get_json_file(test_file)
if json_file:
test_method = getCaseMethod(test_case)
needed_packages = _get_needed_packages(json_file, test_method)
if needed_packages:
return needed_packages
return None
def _install_uninstall_packages(needed_packages, test_case, install=True):
"""
Install/Uninstall packages in the DUT without using a package manager
"""
if isinstance(needed_packages, dict):
packages = [needed_packages]
elif isinstance(needed_packages, list):
packages = needed_packages
for package in packages:
pkg = package['pkg']
rm = package.get('rm', False)
extract = package.get('extract', True)
src_dir = os.path.join(test_case.tc.extract_dir, pkg)
# Install package
if install and extract:
test_case.tc.target.copyDirTo(src_dir, '/')
# Uninstall package
elif not install and rm:
test_case.tc.target.deleteDirStructure(src_dir, '/') | unknown | codeparrot/codeparrot-clean | ||
import calendar
import glob
import gzip
import math
import os.path
import re
import pandas as pd
def _parse_raw(fp, start_tstamp, end_tstamp):
import progressbar
widgets = [
os.path.basename(fp.name), ': ',
progressbar.Bar(marker='-', left='[', right=']'), ' ',
progressbar.Percentage(), ' ', progressbar.ETA(),
]
# We don't know what the file's uncompressed size will wind up being,
# so take an educated guess and ignore the AssertionError later on
# if it winds up being bigger than we guess.
bar = progressbar.ProgressBar(
widgets=widgets, maxval=os.path.getsize(fp.name) * 15)
bar.start()
bar.update(0)
tstamp = 0
hardware = {}
data = {}
for line in fp:
matches = re.search(r'^>>> (\d+).\d+ <<<', line)
if matches:
try:
bar.update(fp.tell())
except AssertionError:
pass
tstamp = int(matches.group(1))
if (tstamp >= start_tstamp) or (tstamp <= end_tstamp):
data[tstamp] = {
'disk': {},
'mem': {},
'net': {},
'proc': {},
}
continue
if line.startswith('# SubSys: '):
matches = re.search(r'\sNumCPUs: (\d+)\s+', line)
if matches:
hardware['num_cpus'] = int(matches.group(1))
continue
if line.startswith('# Kernel: '):
matches = re.search(r'\sMemory: (\d+)\s+kB', line)
if matches:
hardware['memory'] = int(math.ceil(float(matches.group(1)) / math.pow(1024.0, 2.0)))
continue
if (tstamp < start_tstamp) or (tstamp > end_tstamp):
continue
if line.startswith('cpu '):
# Don't know what the last two fields are, but they
# always seem to be 0, and collectl doesn't parse them
# in formatit::dataAnalyze().
(title, user, nice, sys, idle, wait, irq,
soft, steal) = line.split()[:9]
data[tstamp]['cpu'] = {
'user': user,
'nice': nice,
'sys': sys,
'idle': idle,
'wait': wait,
'irq': irq,
'soft': soft,
'steal': steal,
}
elif line.startswith('disk '):
(title, major, minor, node,
num_reads, reads_merged, sectors_read, msec_spent_reading,
num_writes, writes_merged, sectors_written, msec_spent_writing,
iops_in_progress, msec_spent_on_iops,
weighted_msec_spent_on_iops) = line.split()
data[tstamp]['disk'][node] = {
'num_reads': num_reads,
'reads_merged': reads_merged,
'sectors_read': sectors_read,
'msec_spent_reading': msec_spent_reading,
'num_writes': num_writes,
'writes_merged': writes_merged,
'sectors_written': sectors_written,
'msec_spent_writing': msec_spent_writing,
'iops_in_progress': iops_in_progress,
'msec_spent_on_iops': msec_spent_on_iops,
'weighted_msec_spent_on_iops': weighted_msec_spent_on_iops,
}
elif line.startswith('Net '):
# Older kernel versions don't have whitespace after
# the interface colon:
#
# Net eth0:70627391
#
# unlike newer kernels:
#
# Net eth0: 415699541
line = re.sub(r'^(Net\s+[^:]+):', r'\1: ', line)
(title, iface,
rbyte, rpkt, rerr, rdrop, rfifo,
rframe, rcomp, rmulti,
tbyte, tpkt, terr, tdrop, tfifo,
tcoll, tcarrier, tcomp) = line.split()
iface = iface.replace(':', '')
data[tstamp]['net'][iface] = {
'rbyte': rbyte,
'rpkt': rpkt,
'rerr': rerr,
'rdrop': rdrop,
'rfifo': rfifo,
'rframe': rframe,
'rcomp': rcomp,
'rmulti': rmulti,
'tbyte': tbyte,
'tpkt': tpkt,
'terr': terr,
'tdrop': tdrop,
'tfifo': tfifo,
'tcoll': tcoll,
'tcarrier': tcarrier,
'tcomp': tcomp,
}
elif line.startswith('MemTotal:'):
title, amount, unit = line.split()
data[tstamp]['mem']['total'] = amount
elif line.startswith('MemFree:'):
title, amount, unit = line.split()
data[tstamp]['mem']['free'] = amount
elif line.startswith('Buffers:'):
title, amount, unit = line.split()
data[tstamp]['mem']['buffers'] = amount
elif line.startswith('Cached:'):
title, amount, unit = line.split()
data[tstamp]['mem']['cached'] = amount
# We don't currently do anything with process data,
# so don't bother parsing it.
elif False and line.startswith('proc:'):
title_pid, rest = line.split(None, 1)
title, pid = title_pid.split(':')
if pid not in data[tstamp]['proc']:
data[tstamp]['proc'][pid] = {}
if rest.startswith('cmd '):
title, cmd = rest.split(None, 1)
data[tstamp]['proc'][pid]['cmd'] = cmd
elif rest.startswith('io read_bytes: '):
value = rest.split(':')[1].strip()
data[tstamp]['proc'][pid]['read_bytes'] = value
elif rest.startswith('io write_bytes: '):
value = rest.split(':')[1].strip()
data[tstamp]['proc'][pid]['write_bytes'] = value
bar.finish()
return hardware, data
class _CollectlGunzip(gzip.GzipFile):
"""collectl writes data to its files incrementally, and doesn't
add a CRC to the end until it rotates the log. Ignore the CRC
errors; they're innocuous in this case.
"""
def _read_eof(self):
return
def load_collectl(pattern, start_time, end_time):
"""Read data from collectl data files into a pandas DataFrame.
:pattern: Absolute path to raw collectl files
"""
start_tstamp = calendar.timegm(start_time.utctimetuple())
end_tstamp = calendar.timegm(end_time.utctimetuple())
cols = []
rows = []
for path in glob.glob(pattern):
hardware, raw = _parse_raw(
_CollectlGunzip(path, 'r'), start_tstamp, end_tstamp)
if not cols:
instances = {
'disk': set(),
'net': set(),
'proc': set(),
}
for tstamp, sample in raw.iteritems():
for group, items in sample.iteritems():
if group == 'disk':
instances['disk'] = instances['disk'].union(
items.keys())
elif group == 'net':
instances['net'] = instances['net'].union(
items.keys())
elif group == 'proc':
instances['proc'] = instances['proc'].union(
items.keys())
cols = ['tstamp']
cols.extend([
'cpu_{}'.format(var)
for var
in ['user', 'nice', 'sys', 'idle', 'wait',
'irq', 'soft', 'steal']
])
for node in instances['disk']:
cols.extend([
'{}_{}'.format(node, var)
for var
in ['num_reads', 'reads_merged',
'sectors_read', 'msec_spent_reading',
'num_writes', 'writes_merged',
'sectors_written', 'msec_spent_writing',
'iops_in_progress', 'msec_spent_on_iops',
'weighted_msec_spent_on_iops']
])
cols.extend([
'mem_{}'.format(var)
for var
in ['total', 'free', 'buffers', 'cached']
])
for iface in instances['net']:
cols.extend([
'{}_{}'.format(iface, var)
for var
in ['rbyte', 'rpkt', 'rerr', 'rdrop',
'rfifo', 'rframe', 'rcomp', 'rmulti',
'tbyte', 'tpkt', 'terr', 'tdrop',
'tfifo', 'tcoll', 'tcarrier', 'tcomp']
])
for pid in instances['proc']:
cols.extend([
'{}_{}'.format(pid, var)
for var
in ['name', 'read_bytes', 'write_bytes']
])
for tstamp, sample in raw.iteritems():
if ('cpu' not in sample or
'disk' not in sample or
'mem' not in sample):
# Skip incomplete samples; there might be a truncated
# sample on the end of the file.
continue
values = [tstamp]
values.extend([
sample['cpu']['user'], sample['cpu']['nice'],
sample['cpu']['sys'], sample['cpu']['idle'],
sample['cpu']['wait'], sample['cpu']['irq'],
sample['cpu']['soft'], sample['cpu']['steal'],
])
for node in instances['disk']:
data = sample['disk'].get(node, {})
values.extend([
data.get('num_reads', 0),
data.get('reads_merged', 0),
data.get('sectors_read', 0),
data.get('msec_spent_reading', 0),
data.get('num_writes', 0),
data.get('writes_merged', 0),
data.get('sectors_written', 0),
data.get('msec_spent_writing', 0),
data.get('iops_in_progress', 0),
data.get('msec_spent_on_iops', 0),
data.get('weighted_msec_spent_on_iops', 0),
])
values.extend([
sample['mem']['total'], sample['mem']['free'],
sample['mem']['buffers'], sample['mem']['cached'],
])
for iface in instances['net']:
data = sample['net'].get(iface, {})
values.extend([
data.get('rbyte', 0), data.get('rpkt', 0),
data.get('rerr', 0), data.get('rdrop', 0),
data.get('rfifo', 0), data.get('rframe', 0),
data.get('rcomp', 0), data.get('rmulti', 0),
data.get('tbyte', 0), data.get('tpkt', 0),
data.get('terr', 0), data.get('tdrop', 0),
data.get('tfifo', 0), data.get('tcoll', 0),
data.get('tcarrier', 0), data.get('tcomp', 0),
])
if 'proc' in sample:
for pid in instances['proc']:
data = sample['proc'].get(pid, {})
values.extend([
data.get('cmd', ''),
data.get('read_bytes', 0),
data.get('write_bytes', 0),
])
rows.append(values)
if len(rows) == 0:
return pd.DataFrame(columns=cols), {}
df = pd.DataFrame(rows, columns=cols)
df = df.convert_objects(convert_numeric=True)
df['tstamp'] = df['tstamp'].astype('datetime64[s]')
df.set_index('tstamp', inplace=True)
df = df.tz_localize('UTC')
return df, hardware | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceProviderCommonOperations:
"""ResourceProviderCommonOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2019_03_22.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_subscription_quota(
self,
**kwargs
) -> "_models.UserSubscriptionQuotaListResult":
"""Get the number of iot hubs in the subscription.
Get the number of free and paid iot hubs in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserSubscriptionQuotaListResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.UserSubscriptionQuotaListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserSubscriptionQuotaListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.get_subscription_quota.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('UserSubscriptionQuotaListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_subscription_quota.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/usages'} # type: ignore | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/collisionSuperAndParameter1.ts] ////
//// [collisionSuperAndParameter1.ts]
class Foo {
}
class Foo2 extends Foo {
x() {
var lambda = (_super: number) => { // Error
}
}
}
//// [collisionSuperAndParameter1.js]
"use strict";
class Foo {
}
class Foo2 extends Foo {
x() {
var lambda = (_super) => {
};
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/collisionSuperAndParameter1.js |
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
# NOTE(flaper87): Pls, remove when graduating this module
# from the incubator.
from nova.openstack.common.strutils import mask_password # noqa
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
try:
handler = importutils.import_object(
"nova.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
except ImportError:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(facility=facility)
else:
syslog = logging.handlers.SysLogHandler(facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog'
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg)) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# ------------------------------------------------------------------------------
# imports
import os
import sys
def processMRNA (originalFileName):
finalFileName = getFinalFileName (originalFileName)
if not os.path.exists(originalFileName):
sys.exit("error: cannot find file: " + originalFileName)
else:
print "Preparing mRNA Data file: %s --> %s " % (originalFileName, finalFileName)
os.system(BIN_DIR + "preprocess-mrna.py " + originalFileName + " > " + finalFileName)
def processCNA (originalFileName):
rae_original_file = originalFileName;
rae_final_file = getFinalFileName (rae_original_file)
if not os.path.exists(rae_original_file):
sys.exit("error: cannot find " + rae_original_file + ".")
else:
print "Preparing CNA RAE file: %s --> %s " % (rae_original_file, rae_final_file)
os.system(BIN_DIR + "preprocess-rae.py " + rae_original_file + " > " + rae_final_file)
def getFinalFileName (originalFileName):
return originalFileName.replace("data_", "processed_")
def convertCaseFile (caseInFile, caseOutFile):
print "Converting case file: %s --> %s." % (caseInFile, caseOutFile)
in_file = open (caseInFile)
out_file = open (caseOutFile, "w")
for line in in_file:
if line.startswith("case_list_ids:"):
line = line.replace ("case_list_ids:", "")
parts = line.split()
for part in parts:
print >> out_file, part.strip()
# ------------------------------------------------------------------------------
# check for cgds environment var
CGDS_HOME = "CGDS_HOME"
PORTAL_DATA_HOME = "PORTAL_DATA_HOME";
cgds_home_found = 0
for key in os.environ.keys():
if key == CGDS_HOME:
cgds_home_found = 1
CGDS_HOME = os.environ[key]
if key == PORTAL_DATA_HOME:
PORTAL_DATA_HOME = os.environ[key]
if not cgds_home_found:
sys.exit("error: " + CGDS_HOME + " environment variable needs to be set")
# ------------------------------------------------------------------------------
# some globals/constants
BIN_DIR = CGDS_HOME + "/scripts/ovarian/"
DATA_DIR = PORTAL_DATA_HOME + "/ovarian/"
GENE2ACCESSION = DATA_DIR + "gene2accession-cooked.txt"
# ------------------------------------------------------------------------------
# pre-process CNA file
#processCNA (DATA_DIR + "data_CNA.txt");
#processCNA (DATA_DIR + "data_CNA_GISTIC_all.txt");
#processCNA (DATA_DIR + "data_CNA_GISTIC_focal.txt");
# ------------------------------------------------------------------------------
# pre-process mrna expression files
#processMRNA (DATA_DIR + "data_mRNA_unified.txt")
#processMRNA (DATA_DIR + "data_mRNA_unified_outliers.txt")
#processMRNA (DATA_DIR + "data_mRNA_unified_ZbyNormals.txt")
#processMRNA (DATA_DIR + "data_mRNA_unified_ZbyTumors.txt")
#processMRNA (DATA_DIR + "data_mRNA_median.txt")
#processMRNA (DATA_DIR + "data_mRNA_median_outliers.txt")
#processMRNA (DATA_DIR + "data_mRNA_median_ZbyNormals.txt")
#processMRNA (DATA_DIR + "data_mRNA_median_ZbyTumors.txt")
# pre-process the necessary case lists required for the mutation data
# for this, we need cases_all_list.txt and cases_sequenced_with_normal.txt
convertCaseFile (DATA_DIR + "case_lists/cases_all.txt", DATA_DIR + "processed_all_cases.txt")
convertCaseFile (DATA_DIR + "case_lists/cases_sequenced.txt", DATA_DIR + "processed_sequenced_cases.txt") | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2025-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.testing.resttestclient.contextconfig;
import org.junit.jupiter.api.BeforeEach;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import org.springframework.test.web.servlet.client.RestTestClient;
import org.springframework.web.context.WebApplicationContext;
@SpringJUnitConfig(WebConfig.class) // Specify the configuration to load
public class RestClientContextTests {
RestTestClient client;
@BeforeEach
void setUp(WebApplicationContext context) { // Inject the configuration
// Create the `RestTestClient`
client = RestTestClient.bindToApplicationContext(context).build();
}
} | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/testing/resttestclient/contextconfig/RestClientContextTests.java |
#!/usr/bin/env python
from .Rule import Rule
class CopyRule(Rule):
def __init__(self, hist, ruleHist, srcID, srcField, dstID, dstField, typ, ptype, content):
Rule.__init__(self, hist, ruleHist, srcID, srcField, dstID, dstField)
self.typ = typ
self.ptype = ptype
if 'Comp' in self.typ:
content = list(set(content))
self.content = content
def __str__(self):
return 'ID {!s} {!s} {!s} {!s} {!s}'.format(self.hist, self.srcID, self.srcField, self.dstID, self.dstField)
def __repr__(self):
return '{!r} {!r} {!r} {!r} {!r} {!r} {!r}'.format(self.hist, self.srcID, self.srcField, self.dstID,
self.dstField, self.ptype, self.content)
def toFile(self):
if 'Seq' in self.typ:
return 'RULE transition:{0} srcID:{1} srcField:{2} dstField:{3} type:SeqRule\ndiff:{4}\n' \
.format(self.ruleHist.toFile(), self.srcID, self.srcField, self.dstField, self.content)
elif 'Part' in self.typ:
if self.ptype == 'SUFFIX':
ptype = 'COPY_THE_SUFFIX'
elif self.ptype == 'PREFIX':
ptype = 'COPY_THE_PREFIX'
return 'RULE transition:{0} srcID:{1} srcField:{2} dstField:{3} type:CopyPartialRule\nptype:{4} sep:{5}\n' \
.format(self.ruleHist.toFile(), self.srcID, self.srcField, self.dstField, ptype, self.content)
elif 'Comp' in self.typ:
# rest = ','.join(list(set(self.content)))
rest = ','.join(self.content)
if self.ptype == 'SUFFIX':
ptype = 'COPY_AS_SUFFIX'
elif self.ptype == 'PREFIX':
ptype = 'COPY_AS_PREFIX'
return 'RULE transition:{0} srcID:{1} srcField:{2} dstField:{3} type:CopyCompleteRule\n' \
'ptype:{4} rest:{5}\n'\
.format(self.ruleHist.toFile(), self.srcID, self.srcField, self.dstField, ptype, rest) | unknown | codeparrot/codeparrot-clean | ||
"""
Unit tests for the stem.connection.connect function.
"""
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from mock import Mock, patch
except ImportError:
from unittest.mock import Mock, patch
import stem
import stem.connection
import stem.socket
class TestConnect(unittest.TestCase):
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.util.system.is_running')
@patch('os.path.exists', Mock(return_value = True))
@patch('stem.socket.ControlSocketFile', Mock(side_effect = stem.SocketError('failed')))
@patch('stem.socket.ControlPort', Mock(side_effect = stem.SocketError('failed')))
@patch('stem.connection._connect_auth', Mock())
def test_failue_with_the_default_endpoint(self, is_running_mock, stdout_mock):
is_running_mock.return_value = False
self._assert_connect_fails_with({}, stdout_mock, "Unable to connect to tor. Are you sure it's running?")
is_running_mock.return_value = True
self._assert_connect_fails_with({}, stdout_mock, "Unable to connect to tor. Maybe it's running without a ControlPort?")
@patch('sys.stdout', new_callable = StringIO)
@patch('os.path.exists')
@patch('stem.util.system.is_running', Mock(return_value = True))
@patch('stem.socket.ControlSocketFile', Mock(side_effect = stem.SocketError('failed')))
@patch('stem.socket.ControlPort', Mock(side_effect = stem.SocketError('failed')))
@patch('stem.connection._connect_auth', Mock())
def test_failure_with_a_custom_endpoint(self, path_exists_mock, stdout_mock):
path_exists_mock.return_value = True
self._assert_connect_fails_with({'control_port': ('127.0.0.1', 80), 'control_socket': None}, stdout_mock, "Unable to connect to 127.0.0.1:80: failed")
self._assert_connect_fails_with({'control_port': None, 'control_socket': '/tmp/my_socket'}, stdout_mock, "Unable to connect to '/tmp/my_socket': failed")
path_exists_mock.return_value = False
self._assert_connect_fails_with({'control_port': ('127.0.0.1', 80), 'control_socket': None}, stdout_mock, "Unable to connect to 127.0.0.1:80: failed")
self._assert_connect_fails_with({'control_port': None, 'control_socket': '/tmp/my_socket'}, stdout_mock, "The socket file you specified (/tmp/my_socket) doesn't exist")
@patch('stem.socket.ControlPort')
@patch('os.path.exists', Mock(return_value = False))
@patch('stem.connection._connect_auth', Mock())
def test_getting_a_control_port(self, port_mock):
stem.connection.connect()
port_mock.assert_called_once_with('127.0.0.1', 9051)
port_mock.reset_mock()
stem.connection.connect(control_port = ('255.0.0.10', 80), control_socket = None)
port_mock.assert_called_once_with('255.0.0.10', 80)
@patch('stem.socket.ControlSocketFile')
@patch('os.path.exists', Mock(return_value = True))
@patch('stem.connection._connect_auth', Mock())
def test_getting_a_control_socket(self, socket_mock):
stem.connection.connect()
socket_mock.assert_called_once_with('/var/run/tor/control')
socket_mock.reset_mock()
stem.connection.connect(control_port = None, control_socket = '/tmp/my_socket')
socket_mock.assert_called_once_with('/tmp/my_socket')
def _assert_connect_fails_with(self, args, stdout_mock, msg):
result = stem.connection.connect(**args)
if result is not None:
self.fail()
# Python 3.x seems to have an oddity where StringIO has prefixed null
# characters (\x00) after we call truncate(). This could be addressed
# a couple ways...
#
# * Don't use a stdout mock more than once.
# * Strip the null characters.
#
# Opting for the second (which is admittedly a hack) so the tests are a
# little nicer.
stdout_output = stdout_mock.getvalue()
stdout_mock.truncate(0)
self.assertEqual(msg, stdout_output.strip().lstrip('\x00'))
@patch('stem.connection.authenticate')
def test_auth_success(self, authenticate_mock):
control_socket = Mock()
stem.connection._connect_auth(control_socket, None, False, None, None)
authenticate_mock.assert_called_with(control_socket, None, None)
authenticate_mock.reset_mock()
stem.connection._connect_auth(control_socket, 's3krit!!!', False, '/my/chroot', None)
authenticate_mock.assert_called_with(control_socket, 's3krit!!!', '/my/chroot')
@patch('getpass.getpass')
@patch('stem.connection.authenticate')
def test_auth_success_with_password_prompt(self, authenticate_mock, getpass_mock):
control_socket = Mock()
def authenticate_mock_func(controller, password, *args):
if password is None:
raise stem.connection.MissingPassword('no password')
elif password == 'my_password':
return None # success
else:
raise ValueError('Unexpected authenticate_mock input: %s' % password)
authenticate_mock.side_effect = authenticate_mock_func
getpass_mock.return_value = 'my_password'
stem.connection._connect_auth(control_socket, None, True, None, None)
authenticate_mock.assert_any_call(control_socket, None, None)
authenticate_mock.assert_any_call(control_socket, 'my_password', None)
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.connection.authenticate')
def test_auth_failure(self, authenticate_mock, stdout_mock):
control_socket = stem.socket.ControlPort(connect = False)
authenticate_mock.side_effect = stem.connection.IncorrectSocketType('unable to connect to socket')
self._assert_authenticate_fails_with(control_socket, stdout_mock, 'Please check in your torrc that 9051 is the ControlPort.')
control_socket = stem.socket.ControlSocketFile(connect = False)
self._assert_authenticate_fails_with(control_socket, stdout_mock, 'Are you sure the interface you specified belongs to')
authenticate_mock.side_effect = stem.connection.UnrecognizedAuthMethods('unable to connect', ['telepathy'])
self._assert_authenticate_fails_with(control_socket, stdout_mock, 'Tor is using a type of authentication we do not recognize...\n\n telepathy')
authenticate_mock.side_effect = stem.connection.IncorrectPassword('password rejected')
self._assert_authenticate_fails_with(control_socket, stdout_mock, 'Incorrect password')
authenticate_mock.side_effect = stem.connection.UnreadableCookieFile('permission denied', '/tmp/my_cookie', False)
self._assert_authenticate_fails_with(control_socket, stdout_mock, "We were unable to read tor's authentication cookie...\n\n Path: /tmp/my_cookie\n Issue: permission denied")
authenticate_mock.side_effect = stem.connection.OpenAuthRejected('crazy failure')
self._assert_authenticate_fails_with(control_socket, stdout_mock, 'Unable to authenticate: crazy failure')
def _assert_authenticate_fails_with(self, control_socket, stdout_mock, msg):
result = stem.connection._connect_auth(control_socket, None, False, None, None)
if result is not None:
self.fail() # _connect_auth() was successful
stdout_output = stdout_mock.getvalue()
stdout_mock.truncate(0)
if msg not in stdout_output:
self.fail("Expected...\n\n%s\n\n... which couldn't be found in...\n\n%s" % (msg, stdout_output)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using the build tool default.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
export default function SectionSeparator() {
return <hr className="mb-24 border-accent-2 mt-28" />;
} | typescript | github | https://github.com/vercel/next.js | examples/cms-webiny/components/section-separator.tsx |
"""Functional tests for model.py"""
# Some Notes:
#
# * We don't really have any agreed-upon requirements about what __repr__
# should print, but I'm fairly certain I hit an argument mistmatch at
# some point, which is definitely wrong. The test_repr methods are there just
# to make sure it isn't throwing an exception.
from hil.model import Node, Nic, Project, Headnode, Hnic, Network, \
NetworkingAction, Metadata
from hil import config
from hil.test_common import fresh_database, config_testsuite, ModelTest, \
fail_on_log_warnings
import pytest
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
@pytest.fixture
def configure():
"""Configure HIL."""
config_testsuite()
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
pytestmark = pytest.mark.usefixtures('configure', 'fresh_database')
class TestNic(ModelTest):
"""ModelTest for Nic objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
return Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
class TestNode(ModelTest):
"""ModelTest for Node objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
return Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm"))
class TestProject(ModelTest):
"""ModelTest for Project objects."""
def sample_obj(self):
return Project('manhattan')
class TestHeadnode(ModelTest):
"""ModelTest for Headnode objects."""
def sample_obj(self):
return Headnode(Project('anvil-nextgen'),
'hn-example', 'base-headnode')
class TestHnic(ModelTest):
"""ModelTest for Hnic objects."""
def sample_obj(self):
return Hnic(Headnode(Project('anvil-nextgen'),
'hn-0', 'base-headnode'),
'storage')
class TestNetwork(ModelTest):
"""ModelTest for Network objects."""
def sample_obj(self):
pj = Project('anvil-nextgen')
return Network(pj, [pj], True, '102', 'hammernet')
class TestMetadata(ModelTest):
"""ModelTest for Metadata objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
node = Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm"))
return Metadata('EK', 'pk', node)
class TestNetworkingAction(ModelTest):
"""ModelTest for NetworkingAction objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
nic = Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
project = Project('anvil-nextgen')
network = Network(project, [project], True, '102', 'hammernet')
return NetworkingAction(nic=nic,
new_network=network,
channel='null') | unknown | codeparrot/codeparrot-clean | ||
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'quotes',
'USER': 'postgres',
'PASSWORD': 'passgrespost',
'HOST': 'localhost',
'PORT': '5433',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2014 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.graph;
import com.google.common.collect.Ordering;
import java.util.Arrays;
import java.util.Collection;
import org.jspecify.annotations.NullUnmarked;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/** Tests for a directed {@link StandardMutableNetwork} allowing self-loops. */
@AndroidIncompatible
@RunWith(Parameterized.class)
@NullUnmarked
public class StandardMutableDirectedNetworkTest extends AbstractStandardDirectedNetworkTest {
@Parameters(name = "allowsSelfLoops={0}, allowsParallelEdges={1}, nodeOrder={2}, edgeOrder={3}")
public static Collection<Object[]> parameters() {
ElementOrder<?> naturalElementOrder = ElementOrder.sorted(Ordering.natural());
return Arrays.asList(
new Object[][] {
{false, false, ElementOrder.insertion(), ElementOrder.insertion()},
{true, false, ElementOrder.insertion(), ElementOrder.insertion()},
{false, false, naturalElementOrder, naturalElementOrder},
{true, true, ElementOrder.insertion(), ElementOrder.insertion()},
});
}
private final boolean allowsSelfLoops;
private final boolean allowsParallelEdges;
private final ElementOrder<Integer> nodeOrder;
private final ElementOrder<String> edgeOrder;
public StandardMutableDirectedNetworkTest(
boolean allowsSelfLoops,
boolean allowsParallelEdges,
ElementOrder<Integer> nodeOrder,
ElementOrder<String> edgeOrder) {
this.allowsSelfLoops = allowsSelfLoops;
this.allowsParallelEdges = allowsParallelEdges;
this.nodeOrder = nodeOrder;
this.edgeOrder = edgeOrder;
}
@Override
MutableNetwork<Integer, String> createGraph() {
return NetworkBuilder.directed()
.allowsSelfLoops(allowsSelfLoops)
.allowsParallelEdges(allowsParallelEdges)
.nodeOrder(nodeOrder)
.edgeOrder(edgeOrder)
.build();
}
@Override
void addNode(Integer n) {
networkAsMutableNetwork.addNode(n);
}
@Override
void addEdge(Integer n1, Integer n2, String e) {
networkAsMutableNetwork.addEdge(n1, n2, e);
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/graph/StandardMutableDirectedNetworkTest.java |
from django.apps import AppConfig
from django.contrib.staticfiles.checks import check_finders, check_storages
from django.core import checks
from django.utils.translation import gettext_lazy as _
class StaticFilesConfig(AppConfig):
name = "django.contrib.staticfiles"
verbose_name = _("Static Files")
ignore_patterns = ["CVS", ".*", "*~"]
def ready(self):
checks.register(check_finders, checks.Tags.staticfiles)
checks.register(check_storages, checks.Tags.staticfiles) | python | github | https://github.com/django/django | django/contrib/staticfiles/apps.py |
#!/bin/sh
test_description='magic pathspec tests using git-log'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success 'setup' '
test_commit initial &&
test_tick &&
git commit --allow-empty -m empty &&
mkdir sub
'
test_expect_success '"git log :/" should not be ambiguous' '
git log :/
'
test_expect_success '"git log :/a" should be ambiguous (applied both rev and worktree)' '
: >a &&
test_must_fail git log :/a 2>error &&
test_grep ambiguous error
'
test_expect_success '"git log :/a -- " should not be ambiguous' '
git log :/a --
'
test_expect_success '"git log :/detached -- " should find a commit only in HEAD' '
test_when_finished "git checkout main" &&
git checkout --detach &&
test_commit --no-tag detached &&
test_commit --no-tag something-else &&
git log :/detached --
'
test_expect_success '"git log :/detached -- " should not find an orphaned commit' '
test_must_fail git log :/detached --
'
test_expect_success '"git log :/detached -- " should find HEAD only of own worktree' '
git worktree add other-tree HEAD &&
git -C other-tree checkout --detach &&
test_tick &&
git -C other-tree commit --allow-empty -m other-detached &&
git -C other-tree log :/other-detached -- &&
test_must_fail git log :/other-detached --
'
test_expect_success '"git log -- :/a" should not be ambiguous' '
git log -- :/a
'
test_expect_success '"git log :/any/path/" should not segfault' '
test_must_fail git log :/any/path/
'
# This differs from the ":/a" check above in that :/in looks like a pathspec,
# but doesn't match an actual file.
test_expect_success '"git log :/in" should not be ambiguous' '
git log :/in
'
test_expect_success '"git log :" should be ambiguous' '
test_must_fail git log : 2>error &&
test_grep ambiguous error
'
test_expect_success 'git log -- :' '
git log -- :
'
test_expect_success 'git log HEAD -- :/' '
initial=$(git rev-parse --short HEAD^) &&
cat >expected <<-EOF &&
$initial initial
EOF
(cd sub && git log --oneline HEAD -- :/ >../actual) &&
test_cmp expected actual
'
test_expect_success '"git log :^sub" is not ambiguous' '
git log :^sub
'
test_expect_success '"git log :^does-not-exist" does not match anything' '
test_must_fail git log :^does-not-exist
'
test_expect_success '"git log :!" behaves the same as :^' '
git log :!sub &&
test_must_fail git log :!does-not-exist
'
test_expect_success '"git log :(exclude)sub" is not ambiguous' '
git log ":(exclude)sub"
'
test_expect_success '"git log :(exclude)sub --" must resolve as an object' '
test_must_fail git log ":(exclude)sub" --
'
test_expect_success '"git log :(unknown-magic) complains of bogus magic' '
test_must_fail git log ":(unknown-magic)" 2>error &&
test_grep pathspec.magic error
'
test_expect_success 'command line pathspec parsing for "git log"' '
git reset --hard &&
>a &&
git add a &&
git commit -m "add an empty a" --allow-empty &&
echo 1 >a &&
git commit -a -m "update a to 1" &&
git checkout HEAD^ &&
echo 2 >a &&
git commit -a -m "update a to 2" &&
test_must_fail git merge main &&
git add a &&
git log --merge -- a
'
test_expect_success 'tree_entry_interesting does not match past submodule boundaries' '
test_when_finished "rm -rf repo submodule" &&
test_config_global protocol.file.allow always &&
git init submodule &&
test_commit -C submodule initial &&
git init repo &&
>"repo/[bracket]" &&
git -C repo add "[bracket]" &&
test_tick &&
git -C repo commit -m bracket &&
git -C repo rev-list HEAD -- "[bracket]" >expect &&
git -C repo submodule add ../submodule &&
test_tick &&
git -C repo commit -m submodule &&
git -C repo rev-list HEAD -- "[bracket]" >actual &&
test_cmp expect actual
'
test_done | unknown | github | https://github.com/git/git | t/t4208-log-magic-pathspec.sh |
# frozen_string_literal: true
module Psych
class Omap < ::Hash
end
end | ruby | github | https://github.com/ruby/ruby | ext/psych/lib/psych/omap.rb |
import elasticsearch
from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
from django.utils.html import strip_tags
from django.utils.text import unescape_entities
from elasticsearch.helpers import streaming_bulk
from elasticsearch_dsl import DocType, Long, Nested, Object, String, analysis
from elasticsearch_dsl.connections import connections
from .models import Document, document_url
class SearchPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
"""
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from hits.
result = self.object_list[bottom:top].execute()
page = Page(result.hits, number, self)
# Update the `_count`.
self._count = page.object_list.total
# Also store the aggregations, if any.
if hasattr(result, 'aggregations'):
page.aggregations = result.aggregations
# Now that we have the count validate that the page number isn't higher
# than the possible number of pages and adjust accordingly.
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page
class ImprovedDocType(DocType):
@classmethod
def index_all(cls, index_name, using=None, **kwargs):
def actions_generator():
for obj in cls.index_queryset().iterator():
elastic_data = cls.from_django(obj).to_dict(include_meta=True)
elastic_data['_index'] = index_name
yield elastic_data
client = connections.get_connection(using or cls._doc_type.using)
cls.init(index_name)
for ok, item in streaming_bulk(client, actions_generator(), chunk_size=100, **kwargs):
yield ok, item
@classmethod
def index_queryset(cls):
return cls.model._default_manager.all()
@classmethod
def index_object(cls, obj):
return cls.from_django(obj).save()
@classmethod
def unindex_object(cls, obj):
return cls.get(id=obj.pk).delete()
@classmethod
def from_django(cls, obj):
raise NotImplementedError('You must define a from_django classmethod '
'to map ORM object fields to ES fields')
analysis.Tokenizer._builtins = analysis.TOKENIZERS = frozenset((
'keyword', 'standard', 'path_hierarchy', 'whitespace'
))
class PathHierarchyTokenizer(analysis.Tokenizer):
name = 'path_hierarchy'
class WhitespaceTokenizer(analysis.Tokenizer):
name = 'whitespace'
path_analyzer = analysis.CustomAnalyzer('path',
tokenizer='path_hierarchy',
filter=['lowercase'])
lower_whitespace_analyzer = analysis.analyzer('lower_whitespace',
tokenizer='whitespace',
filter=['lowercase', 'stop'],
char_filter=['html_strip'])
class DocumentDocType(ImprovedDocType):
"""
The main documentation doc type to be used for searching.
It stores a bit of meta data so we don't have to hit the db
when rendering search results.
The search view will be using the 'lang' and 'version' fields
of the document's release to filter the search results, depending
which was found in the URL.
The breadcrumbs are shown under the search result title.
"""
model = Document
id = Long()
title = String(analyzer=lower_whitespace_analyzer, boost=1.2)
path = String(index='no', analyzer=path_analyzer)
content = String(analyzer=lower_whitespace_analyzer)
content_raw = String(index_options='offsets')
release = Object(properties={
'id': Long(),
'version': String(index='not_analyzed'),
'lang': String(index='not_analyzed'),
})
breadcrumbs = Nested(properties={
'title': String(index='not_analyzed'),
'path': String(index='not_analyzed'),
})
class Meta:
index = 'docs'
doc_type = 'document'
@classmethod
def alias_to_main_index(cls, index_name, using=None):
"""
Alias `index_name` to 'docs' (`cls._doc_type.index`).
"""
body = {'actions': [{'add': {'index': index_name, 'alias': cls._doc_type.index}}]}
client = connections.get_connection(using or cls._doc_type.using)
client.indices.refresh(index=index_name)
try:
old_index_name = list(client.indices.get_alias('docs').keys())[0]
except elasticsearch.exceptions.NotFoundError:
old_index_name = None
else:
body['actions'].append({'remove': {'index': old_index_name, 'alias': cls._doc_type.index}})
client.indices.update_aliases(body=body)
# Delete the old index that was aliased to 'docs'.
if old_index_name:
client.indices.delete(old_index_name)
@classmethod
def index_queryset(cls):
qs = super(DocumentDocType, cls).index_queryset()
return (
# don't index the module pages since source code is hard to
# combine with full text search
qs.exclude(path__startswith='_modules')
# not the crazy big flattened index of the CBVs
.exclude(path__startswith='ref/class-based-views/flattened-index')
.select_related('release'))
@classmethod
def from_django(cls, obj):
# turns HTML entities into unicode characters again and removes
# all HTML tags, aka "plain text" versio of the document
raw_body = strip_tags(unescape_entities(obj.body).replace(u'¶', ''))
doc = cls(path=obj.path,
title=obj.title,
content=obj.body,
content_raw=raw_body,
meta={'id': obj.id})
doc.release = {
'id': obj.release.id,
'lang': obj.release.lang,
'version': obj.release.version,
}
breadcrumbs = []
for breadcrumb in cls.model.objects.breadcrumbs(obj):
breadcrumbs.append({
'title': breadcrumb.title,
'path': breadcrumb.path,
})
doc.breadcrumbs = breadcrumbs
return doc
def get_absolute_url(self):
return document_url(self) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@author: Maribel Acosta
@author: Fabian Floeck
@author: Michael Ruster
'''
import csv
import re
import functions.TextPostProcessing as TextPostProcessing
import functions.WarningTemplates as WarningTemplates
import BlockTimeCalculation
from datetime import datetime
def writeAllRevisions(order, revisions, blocks, pageName = None):
""" Writes the revisions to disk. Don't pass a pageName if you want to
process deletion discussions. The pageName is used for user warnings to
determine the admonished user. """
assert (not pageName) ^ (not blocks), '[E] Illegal configuration. Both parameters pageName and blocks are set. One must be empty.'
for (revisionId, vandalism) in order:
if not(vandalism):
revision = revisions[revisionId]
text = extractCurrentRevisionsText(revision, blocks)
if not pageName:
writeDeletionDiscussion(text, revision, blocks)
else:
writeUserWarning(text, revision, pageName)
def extractCurrentRevisionsText(revision, blocks):
""" Iterates over the revision's text and extracts all text that has been
introduced in this revision as a list. """
textList = []
for hash_paragraph in revision.ordered_paragraphs:
para = revision.paragraphs[hash_paragraph]
paragraph = para[-1]
for hash_sentence in paragraph.ordered_sentences:
sentence = paragraph.sentences[hash_sentence][-1]
textList.extend( [word.value for word in sentence.words if word.revision is revision.wikipedia_id] )
return TextPostProcessing.merge(textList)
def writeDeletionDiscussion(text, revision, blocks):
""" Writes this deletion discussion to disk. Text is this revision's text.
Said text will be cleaned so that markup is removed. It will append to this
directory's 'deletionRevisions.csv'. Make sure, this script has writing
access. The CSV will have the following columns:
timestamp | contrib ID | contrib name | rev ID | text | seconds to block
Seconds to block is the time in seconds until the user who is author of this
revision got blocked.
Revisions of anonymous users will be ignored as IP addresses are not unique.
Likewise, bots will be ignored as we are interested in human communication.
The list of bots can be retrieved from running WikiWho w/o the Bot removal
and executing the bash command (replacing the space delimiter with a tab):
grep 'Bot ' deletionRevisions.csv | cut -f 3 -d ' ' | sort | uniq
For our dump, we detected 55 bots with SineBot being the most active. """
# we will not process anonymous users or bots:
if not revision.contributor_id or \
revision.contributor_name.endswith('Bot'):
return
text = TextPostProcessing.clean(text, revision.contributor_name)
text = removeAfDText(text)
# only print a line when this revision introduced new text:
if text.strip():
secondsToBlock = BlockTimeCalculation.calculateSecondsUntilNextBlock(blocks, revision.contributor_name, revision.timestamp)
print("[I] Writing authorship for revision %s to disk." % revision.wikipedia_id)
with open('deletionRevisions.csv', 'a', newline='') as csvFile:
writer = csv.writer(csvFile, delimiter='\t',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([revision.timestamp,
revision.contributor_id,
revision.contributor_name,
revision.wikipedia_id,
text,
secondsToBlock])
# calculate the templates once:
templatesRe = WarningTemplates.mergeTemplatesRe(WarningTemplates.vandalism,
WarningTemplates.disruptive,
WarningTemplates.agf,
WarningTemplates.harass,
WarningTemplates.npa)
def writeUserWarning(text, revision, pageName):
""" Writes user warnings in a block log format into 'userWarnings.csv'. The
columns look as follows:
timestamp | blocked user name | warning | issuer ID | issuer name """
assert pageName.startswith('User talk:'), '[E] Revision is not a user page:"%s"' % pageName
blockedUserName = pageName[10:]
for templateRe in templatesRe:
matchedTemplate = templateRe.search(text)
if matchedTemplate:
matchedWarning = matchedTemplate.group(1)
print('[I] Writing admonished user "%s" with warning "%s" to disk.' % (blockedUserName, matchedWarning))
with open('userWarnings.csv', 'a', newline='') as csvFile:
writer = csv.writer(csvFile, delimiter='\t',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([revision.timestamp,
blockedUserName,
revision.wikipedia_id,
matchedWarning,
revision.contributor_id,
revision.contributor_name])
break
#===============================================================================
# The following method uses regular expressions. The expressions are compiled
# before the method definition so that they are compiled only once:
#===============================================================================
# The following list of regular expressions has been build by analysing the most
# frequently made posts in AfDs.
afdTemplates = [ re.compile('(wikipediadeletionprocess )?(relistingdiscussions )?((wp)?relist )?(this afd is being )?relisted to generate a (clearer consensus|more thorough discussion so (a clearer |that )?(consensus|a decision) may (usefully )?be reached)( br| emsp| please add new discussion below this notice thanks)?'),
re.compile('(was proposed for deletion )?this page is an archive of (the discussion (about |surrounding ))?the proposed deletion (of the (article below|page entitled( \w)*) )?this page is (no longer live|kept as an historic record)'),
re.compile('this page is now preserved as an archive of the debate and like (some )?other (delete |vfd )?(sub)?pages is no longer live subsequent comments on the issue the deletion or (on )?the decisionmaking process should be placed on the relevant live pages please do not edit this page'),
re.compile('this page is an archive of the proposed deletion of the article below further comments should be made on the (appropriate discussion page such as the )?articles talk page (if it exists )?or (on a votes for undeletion nomination|after the end of this archived section)'),
re.compile('note this debate has been added to the .*?deletion list of .*?deletions( ron)?'),
re.compile('preceding wikipediasignatures (unsigned|undated)? comment (was )?added( at ?)?( by)?'),
re.compile('remove this template when closing this afd'),
re.compile('this afd nomination was incomplete (missing step )?it is listed now'),
re.compile('this afd nomination was wikipediaarticlesfordeletion howtolistpagesfordeletion orphaned listing now'),
re.compile('further comments should be made on the articles talk page rather than here so that this page is preserved as an historic record (br )?'),
re.compile('no further edits should be made to (this )?page'),
re.compile('the result( of the debate)? was'),
re.compile('(the (above discussion|following discussion)|this page) is ((now )?preserved as an archive of the debate( and (like other delete pages )?is no longer live)?|an archived debate of the proposed deletion of the article( below)?)'),
re.compile('(please do not modify it )?subsequent comments (on the issue the deletion or on the decisionmaking process )?should be (made|placed) on the (appropriate|relevant) (discussion|live) page(s)?( such as the articles talk page or (o|i)n an?)?') ]
spacesRe = re.compile(r' {2,}')
def removeAfDText(text):
""" Although templates can be put into text without them expanding, it is
advised against doing so. Therefore, templates are not marked as such but
instead the dumps contain the templates text next to actual content. We try
our best to remove the most frequently used templates using regular
expressions. These were build after sorting the AfD posts by frequency.
"""
for templateRe in afdTemplates:
text = templateRe.sub("", text)
# Remove leftover consecutive spaces that could appear after applying res:
text = spacesRe.sub(' ', text)
return text | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from utils import mathfont
import fontforge
nArySumCodePoint = 0x2211 # largeop operator
v = 3 * mathfont.em
f = mathfont.create("limits-lowerlimitbaselinedropmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = v
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 11 * mathfont.em
f = mathfont.create("limits-lowerlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = v
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 5 * mathfont.em
f = mathfont.create("limits-upperlimitbaselinerisemin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = v
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 7 * mathfont.em
f = mathfont.create("limits-upperlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = v
mathfont.save(f) | unknown | codeparrot/codeparrot-clean | ||
"""
cfg.py
Simulation configuration for M1 model (using NetPyNE)
Contributors: salvadordura@gmail.com
"""
from netpyne import specs
import pickle
cfg = specs.SimConfig()
#------------------------------------------------------------------------------
#
# SIMULATION CONFIGURATION
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Run parameters
#------------------------------------------------------------------------------
cfg.duration = 0.5*1e3
cfg.dt = 0.05
cfg.seeds = {'conn': 4321, 'stim': 1234, 'loc': 4321}
cfg.hParams = {'celsius': 34, 'v_init': -80}
cfg.verbose = 0
cfg.createNEURONObj = True
cfg.createPyStruct = True
cfg.cvode_active = False
cfg.cvode_atol = 1e-6
cfg.cache_efficient = True
cfg.printRunTime = 0.1
cfg.includeParamsLabel = False
cfg.printPopAvgRates = True
cfg.checkErrors = False
#------------------------------------------------------------------------------
# Recording
#------------------------------------------------------------------------------
allpops = ['IT2','PV2','SOM2','IT4','IT5A','PV5A','SOM5A','IT5B','PT5B','PV5B','SOM5B','IT6','CT6','PV6','SOM6']
cfg.recordTraces = {'V_soma': {'sec':'soma', 'loc':0.5, 'var':'v'}}
cfg.recordStim = False
cfg.recordTime = False
cfg.recordStep = 0.1
#------------------------------------------------------------------------------
# Saving
#------------------------------------------------------------------------------
cfg.simLabel = 'M1detailed'
cfg.saveFolder = '.'
cfg.savePickle = False
cfg.saveJson = True
cfg.saveDataInclude = ['simData', 'simConfig', 'netParams']#, 'net']
cfg.backupCfgFile = None #['cfg.py', 'backupcfg/']
cfg.gatherOnlySimData = False
cfg.saveCellSecs = True
cfg.saveCellConns = True
#------------------------------------------------------------------------------
# Analysis and plotting
#------------------------------------------------------------------------------
with open('cells/popColors.pkl', 'rb') as fileObj: popColors = pickle.load(fileObj)['popColors']
cfg.analysis['plotTraces'] = {'include': [('IT5A',0), ('PT5B',00)], 'timeRange': [0,500], 'oneFigPer': 'cell', 'figSize': (10,4), 'saveFig': True, 'showFig': False}
cfg.analysis['plotRaster'] = {'include': allpops, 'saveFig': True, 'showFig': False, 'labels': 'overlay', 'popRates': True, 'orderInverse': True,
'timeRange': [0,500], 'popColors': popColors, 'figSize': (6,6), 'lw': 0.3, 'markerSize':10, 'marker': '.', 'dpi': 300}
cfg.analysis['plotSpikeHist'] = {'include': ['IT2','IT4','IT5A','IT5B','PT5B','IT6','CT6'], 'timeRange': [0,500],
'saveFig': True, 'showFig': False, 'popColors': popColors, 'figSize': (10,4), 'dpi': 300}
cfg.analysis['plotConn'] = {'includePre': ['IT2','IT4','IT5A','IT5B','PT5B','IT6','CT6'], 'includePost': ['IT2','IT4','IT5A','IT5B','PT5B','IT6','CT6'], 'feature': 'strength', 'figSize': (10,10), 'groupBy': 'pop', \
'graphType': 'matrix', 'synOrConn': 'conn', 'synMech': None, 'saveData': None, 'saveFig': 1, 'showFig': 0}
#------------------------------------------------------------------------------
# Cells
#------------------------------------------------------------------------------
cfg.cellmod = {'IT2': 'HH_reduced',
'IT4': 'HH_reduced',
'IT5A': 'HH_full',
'IT5B': 'HH_reduced',
'PT5B': 'HH_full',
'IT6': 'HH_reduced',
'CT6': 'HH_reduced'}
cfg.ihModel = 'migliore' # ih model
cfg.ihGbar = 1.0 # multiplicative factor for ih gbar in PT cells
cfg.ihGbarZD = None # multiplicative factor for ih gbar in PT cells
cfg.ihGbarBasal = 1.0 # 0.1 # multiplicative factor for ih gbar in PT cells
cfg.ihlkc = 0.2 # ih leak param (used in Migliore)
cfg.ihlkcBasal = 1.0
cfg.ihlkcBelowSoma = 0.01
cfg.ihlke = -86 # ih leak param (used in Migliore)
cfg.ihSlope = 14*2
cfg.removeNa = False # simulate TTX; set gnabar=0s
cfg.somaNa = 5
cfg.dendNa = 0.3
cfg.axonNa = 7
cfg.axonRa = 0.005
cfg.gpas = 0.5 # multiplicative factor for pas g in PT cells
cfg.epas = 0.9 # multiplicative factor for pas e in PT cells
#------------------------------------------------------------------------------
# Synapses
#------------------------------------------------------------------------------
cfg.synWeightFractionEE = [0.5, 0.5] # E->E AMPA to NMDA ratio
cfg.synWeightFractionEI = [0.5, 0.5] # E->I AMPA to NMDA ratio
cfg.synWeightFractionSOME = [0.9, 0.1] # SOM -> E GABAASlow to GABAB ratio
cfg.synsperconn = {'HH_full': 5, 'HH_reduced': 1, 'HH_simple': 1}
cfg.AMPATau2Factor = 1.0
#------------------------------------------------------------------------------
# Network
#------------------------------------------------------------------------------
cfg.singleCellPops = 0 # Create pops with 1 single cell (to debug)
cfg.weightNormThreshold = 4.0 # weight normalization factor threshold
cfg.addConn = 1
cfg.scale = 1.0
cfg.sizeY = 1350.0
cfg.sizeX = 400.0
cfg.sizeZ = 400.0
cfg.scaleDensity = 0.01
cfg.EEGain = 1.0
cfg.EIGain = 1.0
cfg.IEGain = 1.0
cfg.IIGain = 1.0
cfg.IEdisynapticBias = None # increase prob of I->Ey conns if Ex->I and Ex->Ey exist
#------------------------------------------------------------------------------
## E->I gains
cfg.EPVGain = 1.0
cfg.ESOMGain = 1.0
#------------------------------------------------------------------------------
## I->E gains
cfg.PVEGain = 1.0
cfg.SOMEGain = 1.0
#------------------------------------------------------------------------------
## I->I gains
cfg.PVSOMGain = None #0.25
cfg.SOMPVGain = None #0.25
cfg.PVPVGain = None # 0.75
cfg.SOMSOMGain = None #0.75
#------------------------------------------------------------------------------
## I->E/I layer weights (L2/3+4, L5, L6)
cfg.IEweights = [1.2, 1.2, 1.2]
cfg.IIweights = [0.8, 1.2, 0.8]
cfg.IPTGain = 1.0
cfg.IFullGain = 1.0
#------------------------------------------------------------------------------
# Subcellular distribution
#------------------------------------------------------------------------------
cfg.addSubConn = 1
#------------------------------------------------------------------------------
# Long range inputs
#------------------------------------------------------------------------------
cfg.addLongConn = 1
cfg.numCellsLong = 10 # num of cells per population
cfg.noiseLong = 1.0 # firing rate random noise
cfg.delayLong = 5.0 # (ms)
cfg.weightLong = 0.5*2 # corresponds to unitary connection somatic EPSP (mV)
cfg.startLong = 0 # start at 0 ms
cfg.ratesLong = {'TPO': [0,20], 'TVL': [0,20], 'S1': [0,20], 'S2': [0,20], 'cM1': [0,20], 'M2': [0,20], 'OC': [0,20]}
## input pulses
cfg.addPulses = 0
cfg.pulse = {'pop': 'None', 'start': 1000, 'end': 1200, 'rate': 20, 'noise': 0.5}
#cfg.pulse2 = {'pop': 'None', 'start': 1000, 'end': 1200, 'rate': 20, 'noise': 0.5}
#------------------------------------------------------------------------------
# Current inputs
#------------------------------------------------------------------------------
cfg.addIClamp = 0
cfg.IClamp1 = {'pop': 'IT5B', 'sec': 'soma', 'loc': 0.5, 'start': 0, 'dur': 1000, 'amp': 0.50}
#------------------------------------------------------------------------------
# NetStim inputs
#------------------------------------------------------------------------------
cfg.addNetStim = 0
cfg.NetStim1 = {'pop': 'IT2', 'ynorm':[0,1], 'sec': 'soma', 'loc': 0.5, 'synMech': ['AMPA'], 'synMechWeightFactor': [1.0],
'start': 500, 'interval': 1000.0/60.0, 'noise': 0.0, 'number': 60.0, 'weight': 30.0, 'delay': 0} | unknown | codeparrot/codeparrot-clean | ||
from discord.ext import commands
import discord.utils
from __main__ import settings
#
# This is a modified version of checks.py, originally made by Rapptz
#
# https://github.com/Rapptz
# https://github.com/Rapptz/RoboDanny/tree/async
#
def is_owner_check(ctx):
return ctx.message.author.id == settings.owner
def is_owner():
return commands.check(is_owner_check)
# The permission system of the bot is based on a "just works" basis
# You have permissions and the bot has permissions. If you meet the permissions
# required to execute the command (and the bot does as well) then it goes through
# and you can execute the command.
# If these checks fail, then there are two fallbacks.
# A role with the name of Bot Mod and a role with the name of Bot Admin.
# Having these roles provides you access to certain commands without actually having
# the permissions required for them.
# Of course, the owner will always be able to execute commands.
def check_permissions(ctx, perms):
if is_owner_check(ctx):
return True
elif not perms:
return False
ch = ctx.message.channel
author = ctx.message.author
resolved = ch.permissions_for(author)
return all(getattr(resolved, name, None) == value for name, value in perms.items())
def role_or_permissions(ctx, check, **perms):
if check_permissions(ctx, perms):
return True
ch = ctx.message.channel
author = ctx.message.author
if ch.is_private:
return False # can't have roles in PMs
role = discord.utils.find(check, author.roles)
return role is not None
def mod_or_permissions(**perms):
def predicate(ctx):
server = ctx.message.server
mod_role = settings.get_server_mod(server).lower()
admin_role = settings.get_server_admin(server).lower()
return role_or_permissions(ctx, lambda r: r.name.lower() in (mod_role,admin_role), **perms)
return commands.check(predicate)
def admin_or_permissions(**perms):
def predicate(ctx):
server = ctx.message.server
admin_role = settings.get_server_admin(server)
return role_or_permissions(ctx, lambda r: r.name.lower() == admin_role.lower(), **perms)
return commands.check(predicate)
def serverowner_or_permissions(**perms):
def predicate(ctx):
if ctx.message.server is None:
return False
server = ctx.message.server
owner = server.owner
if ctx.message.author.id == owner.id:
return True
return check_permissions(ctx,perms)
return commands.check(predicate)
def serverowner():
return serverowner_or_permissions()
def admin():
return admin_or_permissions()
def mod():
return mod_or_permissions() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.