code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package e2etest
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/hashicorp/terraform/internal/e2e"
"github.com/hashicorp/terraform/internal/getproviders"
)
// TestProviderProtocols verifies that Terraform can execute provider plugins
// with both supported protocol versions.
func TestProviderProtocols(t *testing.T) {
if !canRunGoBuild {
// We're running in a separate-build-then-run context, so we can't
// currently execute this test which depends on being able to build
// new executable at runtime.
//
// (See the comment on canRunGoBuild's declaration for more information.)
t.Skip("can't run without building a new provider executable")
}
t.Parallel()
tf := e2e.NewBinary(t, terraformBin, "testdata/provider-plugin")
// In order to do a decent end-to-end test for this case we will need a real
// enough provider plugin to try to run and make sure we are able to
// actually run it. Here will build the simple and simple6 (built with
// protocol v6) providers.
simple6Provider := filepath.Join(tf.WorkDir(), "terraform-provider-simple6")
simple6ProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple-v6/main", simple6Provider)
simpleProvider := filepath.Join(tf.WorkDir(), "terraform-provider-simple")
simpleProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple/main", simpleProvider)
// Move the provider binaries into a directory that we will point terraform
// to using the -plugin-dir cli flag.
platform := getproviders.CurrentPlatform.String()
hashiDir := "cache/registry.terraform.io/hashicorp/"
if err := os.MkdirAll(tf.Path(hashiDir, "simple6/0.0.1/", platform), os.ModePerm); err != nil {
t.Fatal(err)
}
if err := os.Rename(simple6ProviderExe, tf.Path(hashiDir, "simple6/0.0.1/", platform, "terraform-provider-simple6")); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(tf.Path(hashiDir, "simple/0.0.1/", platform), os.ModePerm); err != nil {
t.Fatal(err)
}
if err := os.Rename(simpleProviderExe, tf.Path(hashiDir, "simple/0.0.1/", platform, "terraform-provider-simple")); err != nil {
t.Fatal(err)
}
//// INIT
_, stderr, err := tf.Run("init", "-plugin-dir=cache")
if err != nil {
t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr)
}
//// PLAN
_, stderr, err = tf.Run("plan", "-out=tfplan")
if err != nil {
t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr)
}
//// APPLY
stdout, stderr, err := tf.Run("apply", "tfplan")
if err != nil {
t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr)
}
if !strings.Contains(stdout, "Apply complete! Resources: 2 added, 0 changed, 0 destroyed.") {
t.Fatalf("wrong output:\nstdout:%s\nstderr%s", stdout, stderr)
}
/// DESTROY
stdout, stderr, err = tf.Run("destroy", "-auto-approve")
if err != nil {
t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr)
}
if !strings.Contains(stdout, "Resources: 2 destroyed") {
t.Fatalf("wrong destroy output\nstdout:%s\nstderr:%s", stdout, stderr)
}
} | go | github | https://github.com/hashicorp/terraform | internal/command/e2etest/provider_plugin_test.go |
# -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from odoo.tests.common import TransactionCase
class TestCrmLead(TransactionCase):
def setUp(self):
super(TestCrmLead, self).setUp()
self.crm_lead_1 = self.env.ref(
'sale_crm_medical_prescription.crm_lead_medical_lead_1'
)
self.rx_order_9 = self.env.ref(
'sale_crm_medical_prescription.'
'medical_prescription_prescription_order_9'
)
self.rx_order_10 = self.env.ref(
'sale_crm_medical_prescription.'
'medical_prescription_prescription_order_10'
)
def test_compute_prescription_order(self):
""" Test prescription orders properly calculated """
rx_orders = [self.rx_order_9, self.rx_order_10]
for rx in rx_orders:
self.assertIn(
rx,
self.crm_lead_1.prescription_order_ids
)
def test_compute_patient_ids(self):
""" Test patient ids properly calculated """
patient = self.env.ref(
'sale_crm_medical_prescription.'
'medical_patient_patient_10'
)
self.assertIn(
patient,
self.crm_lead_1.patient_ids
)
def test_compute_is_prescription(self):
""" Test is_prescription set to True """
self.assertTrue(
self.crm_lead_1.is_prescription
) | unknown | codeparrot/codeparrot-clean | ||
/* Generated from russian.sbl by Snowball 3.0.0 - https://snowballstem.org/ */
#include "stem_UTF_8_russian.h"
#include <stddef.h>
#include "snowball_runtime.h"
struct SN_local {
struct SN_env z;
int i_p2;
int i_pV;
};
typedef struct SN_local SN_local;
#ifdef __cplusplus
extern "C" {
#endif
extern int russian_UTF_8_stem(struct SN_env * z);
#ifdef __cplusplus
}
#endif
static int r_tidy_up(struct SN_env * z);
static int r_derivational(struct SN_env * z);
static int r_noun(struct SN_env * z);
static int r_verb(struct SN_env * z);
static int r_reflexive(struct SN_env * z);
static int r_adjectival(struct SN_env * z);
static int r_adjective(struct SN_env * z);
static int r_perfective_gerund(struct SN_env * z);
static int r_R2(struct SN_env * z);
static int r_mark_regions(struct SN_env * z);
static const symbol s_0[] = { 0xD0, 0xB0 };
static const symbol s_1[] = { 0xD1, 0x8F };
static const symbol s_2[] = { 0xD0, 0xB0 };
static const symbol s_3[] = { 0xD1, 0x8F };
static const symbol s_4[] = { 0xD0, 0xB0 };
static const symbol s_5[] = { 0xD1, 0x8F };
static const symbol s_6[] = { 0xD0, 0xBD };
static const symbol s_7[] = { 0xD0, 0xBD };
static const symbol s_8[] = { 0xD0, 0xBD };
static const symbol s_9[] = { 0xD1, 0x91 };
static const symbol s_10[] = { 0xD0, 0xB5 };
static const symbol s_11[] = { 0xD0, 0xB8 };
static const symbol s_0_0[10] = { 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8, 0xD1, 0x81, 0xD1, 0x8C };
static const symbol s_0_1[12] = { 0xD1, 0x8B, 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8, 0xD1, 0x81, 0xD1, 0x8C };
static const symbol s_0_2[12] = { 0xD0, 0xB8, 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8, 0xD1, 0x81, 0xD1, 0x8C };
static const symbol s_0_3[2] = { 0xD0, 0xB2 };
static const symbol s_0_4[4] = { 0xD1, 0x8B, 0xD0, 0xB2 };
static const symbol s_0_5[4] = { 0xD0, 0xB8, 0xD0, 0xB2 };
static const symbol s_0_6[6] = { 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8 };
static const symbol s_0_7[8] = { 0xD1, 0x8B, 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8 };
static const symbol s_0_8[8] = { 0xD0, 0xB8, 0xD0, 0xB2, 0xD1, 0x88, 0xD0, 0xB8 };
static const struct among a_0[9] = {
{ 10, s_0_0, 0, 1, 0},
{ 12, s_0_1, -1, 2, 0},
{ 12, s_0_2, -2, 2, 0},
{ 2, s_0_3, 0, 1, 0},
{ 4, s_0_4, -1, 2, 0},
{ 4, s_0_5, -2, 2, 0},
{ 6, s_0_6, 0, 1, 0},
{ 8, s_0_7, -1, 2, 0},
{ 8, s_0_8, -2, 2, 0}
};
static const symbol s_1_0[6] = { 0xD0, 0xB5, 0xD0, 0xBC, 0xD1, 0x83 };
static const symbol s_1_1[6] = { 0xD0, 0xBE, 0xD0, 0xBC, 0xD1, 0x83 };
static const symbol s_1_2[4] = { 0xD1, 0x8B, 0xD1, 0x85 };
static const symbol s_1_3[4] = { 0xD0, 0xB8, 0xD1, 0x85 };
static const symbol s_1_4[4] = { 0xD1, 0x83, 0xD1, 0x8E };
static const symbol s_1_5[4] = { 0xD1, 0x8E, 0xD1, 0x8E };
static const symbol s_1_6[4] = { 0xD0, 0xB5, 0xD1, 0x8E };
static const symbol s_1_7[4] = { 0xD0, 0xBE, 0xD1, 0x8E };
static const symbol s_1_8[4] = { 0xD1, 0x8F, 0xD1, 0x8F };
static const symbol s_1_9[4] = { 0xD0, 0xB0, 0xD1, 0x8F };
static const symbol s_1_10[4] = { 0xD1, 0x8B, 0xD0, 0xB5 };
static const symbol s_1_11[4] = { 0xD0, 0xB5, 0xD0, 0xB5 };
static const symbol s_1_12[4] = { 0xD0, 0xB8, 0xD0, 0xB5 };
static const symbol s_1_13[4] = { 0xD0, 0xBE, 0xD0, 0xB5 };
static const symbol s_1_14[6] = { 0xD1, 0x8B, 0xD0, 0xBC, 0xD0, 0xB8 };
static const symbol s_1_15[6] = { 0xD0, 0xB8, 0xD0, 0xBC, 0xD0, 0xB8 };
static const symbol s_1_16[4] = { 0xD1, 0x8B, 0xD0, 0xB9 };
static const symbol s_1_17[4] = { 0xD0, 0xB5, 0xD0, 0xB9 };
static const symbol s_1_18[4] = { 0xD0, 0xB8, 0xD0, 0xB9 };
static const symbol s_1_19[4] = { 0xD0, 0xBE, 0xD0, 0xB9 };
static const symbol s_1_20[4] = { 0xD1, 0x8B, 0xD0, 0xBC };
static const symbol s_1_21[4] = { 0xD0, 0xB5, 0xD0, 0xBC };
static const symbol s_1_22[4] = { 0xD0, 0xB8, 0xD0, 0xBC };
static const symbol s_1_23[4] = { 0xD0, 0xBE, 0xD0, 0xBC };
static const symbol s_1_24[6] = { 0xD0, 0xB5, 0xD0, 0xB3, 0xD0, 0xBE };
static const symbol s_1_25[6] = { 0xD0, 0xBE, 0xD0, 0xB3, 0xD0, 0xBE };
static const struct among a_1[26] = {
{ 6, s_1_0, 0, 1, 0},
{ 6, s_1_1, 0, 1, 0},
{ 4, s_1_2, 0, 1, 0},
{ 4, s_1_3, 0, 1, 0},
{ 4, s_1_4, 0, 1, 0},
{ 4, s_1_5, 0, 1, 0},
{ 4, s_1_6, 0, 1, 0},
{ 4, s_1_7, 0, 1, 0},
{ 4, s_1_8, 0, 1, 0},
{ 4, s_1_9, 0, 1, 0},
{ 4, s_1_10, 0, 1, 0},
{ 4, s_1_11, 0, 1, 0},
{ 4, s_1_12, 0, 1, 0},
{ 4, s_1_13, 0, 1, 0},
{ 6, s_1_14, 0, 1, 0},
{ 6, s_1_15, 0, 1, 0},
{ 4, s_1_16, 0, 1, 0},
{ 4, s_1_17, 0, 1, 0},
{ 4, s_1_18, 0, 1, 0},
{ 4, s_1_19, 0, 1, 0},
{ 4, s_1_20, 0, 1, 0},
{ 4, s_1_21, 0, 1, 0},
{ 4, s_1_22, 0, 1, 0},
{ 4, s_1_23, 0, 1, 0},
{ 6, s_1_24, 0, 1, 0},
{ 6, s_1_25, 0, 1, 0}
};
static const symbol s_2_0[4] = { 0xD0, 0xB2, 0xD1, 0x88 };
static const symbol s_2_1[6] = { 0xD1, 0x8B, 0xD0, 0xB2, 0xD1, 0x88 };
static const symbol s_2_2[6] = { 0xD0, 0xB8, 0xD0, 0xB2, 0xD1, 0x88 };
static const symbol s_2_3[2] = { 0xD1, 0x89 };
static const symbol s_2_4[4] = { 0xD1, 0x8E, 0xD1, 0x89 };
static const symbol s_2_5[6] = { 0xD1, 0x83, 0xD1, 0x8E, 0xD1, 0x89 };
static const symbol s_2_6[4] = { 0xD0, 0xB5, 0xD0, 0xBC };
static const symbol s_2_7[4] = { 0xD0, 0xBD, 0xD0, 0xBD };
static const struct among a_2[8] = {
{ 4, s_2_0, 0, 1, 0},
{ 6, s_2_1, -1, 2, 0},
{ 6, s_2_2, -2, 2, 0},
{ 2, s_2_3, 0, 1, 0},
{ 4, s_2_4, -1, 1, 0},
{ 6, s_2_5, -1, 2, 0},
{ 4, s_2_6, 0, 1, 0},
{ 4, s_2_7, 0, 1, 0}
};
static const symbol s_3_0[4] = { 0xD1, 0x81, 0xD1, 0x8C };
static const symbol s_3_1[4] = { 0xD1, 0x81, 0xD1, 0x8F };
static const struct among a_3[2] = {
{ 4, s_3_0, 0, 1, 0},
{ 4, s_3_1, 0, 1, 0}
};
static const symbol s_4_0[4] = { 0xD1, 0x8B, 0xD1, 0x82 };
static const symbol s_4_1[4] = { 0xD1, 0x8E, 0xD1, 0x82 };
static const symbol s_4_2[6] = { 0xD1, 0x83, 0xD1, 0x8E, 0xD1, 0x82 };
static const symbol s_4_3[4] = { 0xD1, 0x8F, 0xD1, 0x82 };
static const symbol s_4_4[4] = { 0xD0, 0xB5, 0xD1, 0x82 };
static const symbol s_4_5[6] = { 0xD1, 0x83, 0xD0, 0xB5, 0xD1, 0x82 };
static const symbol s_4_6[4] = { 0xD0, 0xB8, 0xD1, 0x82 };
static const symbol s_4_7[4] = { 0xD0, 0xBD, 0xD1, 0x8B };
static const symbol s_4_8[6] = { 0xD0, 0xB5, 0xD0, 0xBD, 0xD1, 0x8B };
static const symbol s_4_9[4] = { 0xD1, 0x82, 0xD1, 0x8C };
static const symbol s_4_10[6] = { 0xD1, 0x8B, 0xD1, 0x82, 0xD1, 0x8C };
static const symbol s_4_11[6] = { 0xD0, 0xB8, 0xD1, 0x82, 0xD1, 0x8C };
static const symbol s_4_12[6] = { 0xD0, 0xB5, 0xD1, 0x88, 0xD1, 0x8C };
static const symbol s_4_13[6] = { 0xD0, 0xB8, 0xD1, 0x88, 0xD1, 0x8C };
static const symbol s_4_14[2] = { 0xD1, 0x8E };
static const symbol s_4_15[4] = { 0xD1, 0x83, 0xD1, 0x8E };
static const symbol s_4_16[4] = { 0xD0, 0xBB, 0xD0, 0xB0 };
static const symbol s_4_17[6] = { 0xD1, 0x8B, 0xD0, 0xBB, 0xD0, 0xB0 };
static const symbol s_4_18[6] = { 0xD0, 0xB8, 0xD0, 0xBB, 0xD0, 0xB0 };
static const symbol s_4_19[4] = { 0xD0, 0xBD, 0xD0, 0xB0 };
static const symbol s_4_20[6] = { 0xD0, 0xB5, 0xD0, 0xBD, 0xD0, 0xB0 };
static const symbol s_4_21[6] = { 0xD0, 0xB5, 0xD1, 0x82, 0xD0, 0xB5 };
static const symbol s_4_22[6] = { 0xD0, 0xB8, 0xD1, 0x82, 0xD0, 0xB5 };
static const symbol s_4_23[6] = { 0xD0, 0xB9, 0xD1, 0x82, 0xD0, 0xB5 };
static const symbol s_4_24[8] = { 0xD1, 0x83, 0xD0, 0xB9, 0xD1, 0x82, 0xD0, 0xB5 };
static const symbol s_4_25[8] = { 0xD0, 0xB5, 0xD0, 0xB9, 0xD1, 0x82, 0xD0, 0xB5 };
static const symbol s_4_26[4] = { 0xD0, 0xBB, 0xD0, 0xB8 };
static const symbol s_4_27[6] = { 0xD1, 0x8B, 0xD0, 0xBB, 0xD0, 0xB8 };
static const symbol s_4_28[6] = { 0xD0, 0xB8, 0xD0, 0xBB, 0xD0, 0xB8 };
static const symbol s_4_29[2] = { 0xD0, 0xB9 };
static const symbol s_4_30[4] = { 0xD1, 0x83, 0xD0, 0xB9 };
static const symbol s_4_31[4] = { 0xD0, 0xB5, 0xD0, 0xB9 };
static const symbol s_4_32[2] = { 0xD0, 0xBB };
static const symbol s_4_33[4] = { 0xD1, 0x8B, 0xD0, 0xBB };
static const symbol s_4_34[4] = { 0xD0, 0xB8, 0xD0, 0xBB };
static const symbol s_4_35[4] = { 0xD1, 0x8B, 0xD0, 0xBC };
static const symbol s_4_36[4] = { 0xD0, 0xB5, 0xD0, 0xBC };
static const symbol s_4_37[4] = { 0xD0, 0xB8, 0xD0, 0xBC };
static const symbol s_4_38[2] = { 0xD0, 0xBD };
static const symbol s_4_39[4] = { 0xD0, 0xB5, 0xD0, 0xBD };
static const symbol s_4_40[4] = { 0xD0, 0xBB, 0xD0, 0xBE };
static const symbol s_4_41[6] = { 0xD1, 0x8B, 0xD0, 0xBB, 0xD0, 0xBE };
static const symbol s_4_42[6] = { 0xD0, 0xB8, 0xD0, 0xBB, 0xD0, 0xBE };
static const symbol s_4_43[4] = { 0xD0, 0xBD, 0xD0, 0xBE };
static const symbol s_4_44[6] = { 0xD0, 0xB5, 0xD0, 0xBD, 0xD0, 0xBE };
static const symbol s_4_45[6] = { 0xD0, 0xBD, 0xD0, 0xBD, 0xD0, 0xBE };
static const struct among a_4[46] = {
{ 4, s_4_0, 0, 2, 0},
{ 4, s_4_1, 0, 1, 0},
{ 6, s_4_2, -1, 2, 0},
{ 4, s_4_3, 0, 2, 0},
{ 4, s_4_4, 0, 1, 0},
{ 6, s_4_5, -1, 2, 0},
{ 4, s_4_6, 0, 2, 0},
{ 4, s_4_7, 0, 1, 0},
{ 6, s_4_8, -1, 2, 0},
{ 4, s_4_9, 0, 1, 0},
{ 6, s_4_10, -1, 2, 0},
{ 6, s_4_11, -2, 2, 0},
{ 6, s_4_12, 0, 1, 0},
{ 6, s_4_13, 0, 2, 0},
{ 2, s_4_14, 0, 2, 0},
{ 4, s_4_15, -1, 2, 0},
{ 4, s_4_16, 0, 1, 0},
{ 6, s_4_17, -1, 2, 0},
{ 6, s_4_18, -2, 2, 0},
{ 4, s_4_19, 0, 1, 0},
{ 6, s_4_20, -1, 2, 0},
{ 6, s_4_21, 0, 1, 0},
{ 6, s_4_22, 0, 2, 0},
{ 6, s_4_23, 0, 1, 0},
{ 8, s_4_24, -1, 2, 0},
{ 8, s_4_25, -2, 2, 0},
{ 4, s_4_26, 0, 1, 0},
{ 6, s_4_27, -1, 2, 0},
{ 6, s_4_28, -2, 2, 0},
{ 2, s_4_29, 0, 1, 0},
{ 4, s_4_30, -1, 2, 0},
{ 4, s_4_31, -2, 2, 0},
{ 2, s_4_32, 0, 1, 0},
{ 4, s_4_33, -1, 2, 0},
{ 4, s_4_34, -2, 2, 0},
{ 4, s_4_35, 0, 2, 0},
{ 4, s_4_36, 0, 1, 0},
{ 4, s_4_37, 0, 2, 0},
{ 2, s_4_38, 0, 1, 0},
{ 4, s_4_39, -1, 2, 0},
{ 4, s_4_40, 0, 1, 0},
{ 6, s_4_41, -1, 2, 0},
{ 6, s_4_42, -2, 2, 0},
{ 4, s_4_43, 0, 1, 0},
{ 6, s_4_44, -1, 2, 0},
{ 6, s_4_45, -2, 1, 0}
};
static const symbol s_5_0[2] = { 0xD1, 0x83 };
static const symbol s_5_1[4] = { 0xD1, 0x8F, 0xD1, 0x85 };
static const symbol s_5_2[6] = { 0xD0, 0xB8, 0xD1, 0x8F, 0xD1, 0x85 };
static const symbol s_5_3[4] = { 0xD0, 0xB0, 0xD1, 0x85 };
static const symbol s_5_4[2] = { 0xD1, 0x8B };
static const symbol s_5_5[2] = { 0xD1, 0x8C };
static const symbol s_5_6[2] = { 0xD1, 0x8E };
static const symbol s_5_7[4] = { 0xD1, 0x8C, 0xD1, 0x8E };
static const symbol s_5_8[4] = { 0xD0, 0xB8, 0xD1, 0x8E };
static const symbol s_5_9[2] = { 0xD1, 0x8F };
static const symbol s_5_10[4] = { 0xD1, 0x8C, 0xD1, 0x8F };
static const symbol s_5_11[4] = { 0xD0, 0xB8, 0xD1, 0x8F };
static const symbol s_5_12[2] = { 0xD0, 0xB0 };
static const symbol s_5_13[4] = { 0xD0, 0xB5, 0xD0, 0xB2 };
static const symbol s_5_14[4] = { 0xD0, 0xBE, 0xD0, 0xB2 };
static const symbol s_5_15[2] = { 0xD0, 0xB5 };
static const symbol s_5_16[4] = { 0xD1, 0x8C, 0xD0, 0xB5 };
static const symbol s_5_17[4] = { 0xD0, 0xB8, 0xD0, 0xB5 };
static const symbol s_5_18[2] = { 0xD0, 0xB8 };
static const symbol s_5_19[4] = { 0xD0, 0xB5, 0xD0, 0xB8 };
static const symbol s_5_20[4] = { 0xD0, 0xB8, 0xD0, 0xB8 };
static const symbol s_5_21[6] = { 0xD1, 0x8F, 0xD0, 0xBC, 0xD0, 0xB8 };
static const symbol s_5_22[8] = { 0xD0, 0xB8, 0xD1, 0x8F, 0xD0, 0xBC, 0xD0, 0xB8 };
static const symbol s_5_23[6] = { 0xD0, 0xB0, 0xD0, 0xBC, 0xD0, 0xB8 };
static const symbol s_5_24[2] = { 0xD0, 0xB9 };
static const symbol s_5_25[4] = { 0xD0, 0xB5, 0xD0, 0xB9 };
static const symbol s_5_26[6] = { 0xD0, 0xB8, 0xD0, 0xB5, 0xD0, 0xB9 };
static const symbol s_5_27[4] = { 0xD0, 0xB8, 0xD0, 0xB9 };
static const symbol s_5_28[4] = { 0xD0, 0xBE, 0xD0, 0xB9 };
static const symbol s_5_29[4] = { 0xD1, 0x8F, 0xD0, 0xBC };
static const symbol s_5_30[6] = { 0xD0, 0xB8, 0xD1, 0x8F, 0xD0, 0xBC };
static const symbol s_5_31[4] = { 0xD0, 0xB0, 0xD0, 0xBC };
static const symbol s_5_32[4] = { 0xD0, 0xB5, 0xD0, 0xBC };
static const symbol s_5_33[6] = { 0xD0, 0xB8, 0xD0, 0xB5, 0xD0, 0xBC };
static const symbol s_5_34[4] = { 0xD0, 0xBE, 0xD0, 0xBC };
static const symbol s_5_35[2] = { 0xD0, 0xBE };
static const struct among a_5[36] = {
{ 2, s_5_0, 0, 1, 0},
{ 4, s_5_1, 0, 1, 0},
{ 6, s_5_2, -1, 1, 0},
{ 4, s_5_3, 0, 1, 0},
{ 2, s_5_4, 0, 1, 0},
{ 2, s_5_5, 0, 1, 0},
{ 2, s_5_6, 0, 1, 0},
{ 4, s_5_7, -1, 1, 0},
{ 4, s_5_8, -2, 1, 0},
{ 2, s_5_9, 0, 1, 0},
{ 4, s_5_10, -1, 1, 0},
{ 4, s_5_11, -2, 1, 0},
{ 2, s_5_12, 0, 1, 0},
{ 4, s_5_13, 0, 1, 0},
{ 4, s_5_14, 0, 1, 0},
{ 2, s_5_15, 0, 1, 0},
{ 4, s_5_16, -1, 1, 0},
{ 4, s_5_17, -2, 1, 0},
{ 2, s_5_18, 0, 1, 0},
{ 4, s_5_19, -1, 1, 0},
{ 4, s_5_20, -2, 1, 0},
{ 6, s_5_21, -3, 1, 0},
{ 8, s_5_22, -1, 1, 0},
{ 6, s_5_23, -5, 1, 0},
{ 2, s_5_24, 0, 1, 0},
{ 4, s_5_25, -1, 1, 0},
{ 6, s_5_26, -1, 1, 0},
{ 4, s_5_27, -3, 1, 0},
{ 4, s_5_28, -4, 1, 0},
{ 4, s_5_29, 0, 1, 0},
{ 6, s_5_30, -1, 1, 0},
{ 4, s_5_31, 0, 1, 0},
{ 4, s_5_32, 0, 1, 0},
{ 6, s_5_33, -1, 1, 0},
{ 4, s_5_34, 0, 1, 0},
{ 2, s_5_35, 0, 1, 0}
};
static const symbol s_6_0[6] = { 0xD0, 0xBE, 0xD1, 0x81, 0xD1, 0x82 };
static const symbol s_6_1[8] = { 0xD0, 0xBE, 0xD1, 0x81, 0xD1, 0x82, 0xD1, 0x8C };
static const struct among a_6[2] = {
{ 6, s_6_0, 0, 1, 0},
{ 8, s_6_1, 0, 1, 0}
};
static const symbol s_7_0[6] = { 0xD0, 0xB5, 0xD0, 0xB9, 0xD1, 0x88 };
static const symbol s_7_1[2] = { 0xD1, 0x8C };
static const symbol s_7_2[8] = { 0xD0, 0xB5, 0xD0, 0xB9, 0xD1, 0x88, 0xD0, 0xB5 };
static const symbol s_7_3[2] = { 0xD0, 0xBD };
static const struct among a_7[4] = {
{ 6, s_7_0, 0, 1, 0},
{ 2, s_7_1, 0, 3, 0},
{ 8, s_7_2, 0, 1, 0},
{ 2, s_7_3, 0, 2, 0}
};
static const unsigned char g_v[] = { 33, 65, 8, 232 };
static int r_mark_regions(struct SN_env * z) {
((SN_local *)z)->i_pV = z->l;
((SN_local *)z)->i_p2 = z->l;
{
int v_1 = z->c;
{
int ret = out_grouping_U(z, g_v, 1072, 1103, 1);
if (ret < 0) goto lab0;
z->c += ret;
}
((SN_local *)z)->i_pV = z->c;
{
int ret = in_grouping_U(z, g_v, 1072, 1103, 1);
if (ret < 0) goto lab0;
z->c += ret;
}
{
int ret = out_grouping_U(z, g_v, 1072, 1103, 1);
if (ret < 0) goto lab0;
z->c += ret;
}
{
int ret = in_grouping_U(z, g_v, 1072, 1103, 1);
if (ret < 0) goto lab0;
z->c += ret;
}
((SN_local *)z)->i_p2 = z->c;
lab0:
z->c = v_1;
}
return 1;
}
static int r_R2(struct SN_env * z) {
return ((SN_local *)z)->i_p2 <= z->c;
}
static int r_perfective_gerund(struct SN_env * z) {
int among_var;
z->ket = z->c;
among_var = find_among_b(z, a_0, 9, 0);
if (!among_var) return 0;
z->bra = z->c;
switch (among_var) {
case 1:
do {
int v_1 = z->l - z->c;
if (!(eq_s_b(z, 2, s_0))) goto lab0;
break;
lab0:
z->c = z->l - v_1;
if (!(eq_s_b(z, 2, s_1))) return 0;
} while (0);
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 2:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
}
return 1;
}
static int r_adjective(struct SN_env * z) {
z->ket = z->c;
if (!find_among_b(z, a_1, 26, 0)) return 0;
z->bra = z->c;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_adjectival(struct SN_env * z) {
int among_var;
{
int ret = r_adjective(z);
if (ret <= 0) return ret;
}
{
int v_1 = z->l - z->c;
z->ket = z->c;
among_var = find_among_b(z, a_2, 8, 0);
if (!among_var) { z->c = z->l - v_1; goto lab0; }
z->bra = z->c;
switch (among_var) {
case 1:
do {
int v_2 = z->l - z->c;
if (!(eq_s_b(z, 2, s_2))) goto lab1;
break;
lab1:
z->c = z->l - v_2;
if (!(eq_s_b(z, 2, s_3))) { z->c = z->l - v_1; goto lab0; }
} while (0);
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 2:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
}
lab0:
;
}
return 1;
}
static int r_reflexive(struct SN_env * z) {
z->ket = z->c;
if (z->c - 3 <= z->lb || (z->p[z->c - 1] != 140 && z->p[z->c - 1] != 143)) return 0;
if (!find_among_b(z, a_3, 2, 0)) return 0;
z->bra = z->c;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_verb(struct SN_env * z) {
int among_var;
z->ket = z->c;
among_var = find_among_b(z, a_4, 46, 0);
if (!among_var) return 0;
z->bra = z->c;
switch (among_var) {
case 1:
do {
int v_1 = z->l - z->c;
if (!(eq_s_b(z, 2, s_4))) goto lab0;
break;
lab0:
z->c = z->l - v_1;
if (!(eq_s_b(z, 2, s_5))) return 0;
} while (0);
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 2:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
}
return 1;
}
static int r_noun(struct SN_env * z) {
z->ket = z->c;
if (!find_among_b(z, a_5, 36, 0)) return 0;
z->bra = z->c;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_derivational(struct SN_env * z) {
z->ket = z->c;
if (z->c - 5 <= z->lb || (z->p[z->c - 1] != 130 && z->p[z->c - 1] != 140)) return 0;
if (!find_among_b(z, a_6, 2, 0)) return 0;
z->bra = z->c;
{
int ret = r_R2(z);
if (ret <= 0) return ret;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_tidy_up(struct SN_env * z) {
int among_var;
z->ket = z->c;
among_var = find_among_b(z, a_7, 4, 0);
if (!among_var) return 0;
z->bra = z->c;
switch (among_var) {
case 1:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
z->ket = z->c;
if (!(eq_s_b(z, 2, s_6))) return 0;
z->bra = z->c;
if (!(eq_s_b(z, 2, s_7))) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 2:
if (!(eq_s_b(z, 2, s_8))) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 3:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
}
return 1;
}
extern int russian_UTF_8_stem(struct SN_env * z) {
{
int v_1 = z->c;
while (1) {
int v_2 = z->c;
while (1) {
int v_3 = z->c;
z->bra = z->c;
if (!(eq_s(z, 2, s_9))) goto lab2;
z->ket = z->c;
z->c = v_3;
break;
lab2:
z->c = v_3;
{
int ret = skip_utf8(z->p, z->c, z->l, 1);
if (ret < 0) goto lab1;
z->c = ret;
}
}
{
int ret = slice_from_s(z, 2, s_10);
if (ret < 0) return ret;
}
continue;
lab1:
z->c = v_2;
break;
}
z->c = v_1;
}
{
int ret = r_mark_regions(z);
if (ret < 0) return ret;
}
z->lb = z->c; z->c = z->l;
{
int v_4;
if (z->c < ((SN_local *)z)->i_pV) return 0;
v_4 = z->lb; z->lb = ((SN_local *)z)->i_pV;
{
int v_5 = z->l - z->c;
do {
int v_6 = z->l - z->c;
{
int ret = r_perfective_gerund(z);
if (ret == 0) goto lab4;
if (ret < 0) return ret;
}
break;
lab4:
z->c = z->l - v_6;
{
int v_7 = z->l - z->c;
{
int ret = r_reflexive(z);
if (ret == 0) { z->c = z->l - v_7; goto lab5; }
if (ret < 0) return ret;
}
lab5:
;
}
do {
int v_8 = z->l - z->c;
{
int ret = r_adjectival(z);
if (ret == 0) goto lab6;
if (ret < 0) return ret;
}
break;
lab6:
z->c = z->l - v_8;
{
int ret = r_verb(z);
if (ret == 0) goto lab7;
if (ret < 0) return ret;
}
break;
lab7:
z->c = z->l - v_8;
{
int ret = r_noun(z);
if (ret == 0) goto lab3;
if (ret < 0) return ret;
}
} while (0);
} while (0);
lab3:
z->c = z->l - v_5;
}
{
int v_9 = z->l - z->c;
z->ket = z->c;
if (!(eq_s_b(z, 2, s_11))) { z->c = z->l - v_9; goto lab8; }
z->bra = z->c;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
lab8:
;
}
{
int v_10 = z->l - z->c;
{
int ret = r_derivational(z);
if (ret < 0) return ret;
}
z->c = z->l - v_10;
}
{
int v_11 = z->l - z->c;
{
int ret = r_tidy_up(z);
if (ret < 0) return ret;
}
z->c = z->l - v_11;
}
z->lb = v_4;
}
z->c = z->lb;
return 1;
}
extern struct SN_env * russian_UTF_8_create_env(void) {
struct SN_env * z = SN_new_env(sizeof(SN_local));
if (z) {
((SN_local *)z)->i_p2 = 0;
((SN_local *)z)->i_pV = 0;
}
return z;
}
extern void russian_UTF_8_close_env(struct SN_env * z) {
SN_delete_env(z);
} | c | github | https://github.com/postgres/postgres | src/backend/snowball/libstemmer/stem_UTF_8_russian.c |
"""Support for Habitica sensors."""
from datetime import timedelta
import logging
from homeassistant.components import habitica
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the habitica platform."""
if discovery_info is None:
return
name = discovery_info[habitica.CONF_NAME]
sensors = discovery_info[habitica.CONF_SENSORS]
sensor_data = HabitipyData(hass.data[habitica.DOMAIN][name])
await sensor_data.update()
async_add_devices(
[HabitipySensor(name, sensor, sensor_data) for sensor in sensors], True
)
class HabitipyData:
"""Habitica API user data cache."""
def __init__(self, api):
"""Habitica API user data cache."""
self.api = api
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update(self):
"""Get a new fix from Habitica servers."""
self.data = await self.api.user.get()
class HabitipySensor(Entity):
"""A generic Habitica sensor."""
def __init__(self, name, sensor_name, updater):
"""Initialize a generic Habitica sensor."""
self._name = name
self._sensor_name = sensor_name
self._sensor_type = habitica.SENSORS_TYPES[sensor_name]
self._state = None
self._updater = updater
async def async_update(self):
"""Update Condition and Forecast."""
await self._updater.update()
data = self._updater.data
for element in self._sensor_type.path:
data = data[element]
self._state = data
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._sensor_type.icon
@property
def name(self):
"""Return the name of the sensor."""
return f"{habitica.DOMAIN}_{self._name}_{self._sensor_name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._sensor_type.unit | unknown | codeparrot/codeparrot-clean | ||
---
navigation_title: "Update By Query API"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update.html#update-api-example
applies_to:
stack: all
---
# Update by query API examples
The [Update by query API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) updates all documents that match a specified query, enabling bulk modification of the document source or metadata via a script.
You can learn how to:
- [Run basic update-by-query operations](#run-basic-updates)
- [Modify documents using scripts or ingest pipelines](#update-the-document)
- [Throttle update operations](#change-throttling-for-a-request)
- [Parallelize updates using manual slicing](#slice-manually)
- [Automate slicing for better performance](#use-automatic-slicing)
- [Apply mapping changes to existing documents](#pick-up-a-new-property)
## Run basic updates
The simplest usage of `_update_by_query` just performs an update on every document in the data stream or index without changing the source. This is useful to [pick up a new property](#pick-up-a-new-property) or some other online mapping change.
To update selected documents, specify a query in the request body:
```console
POST my-index-000001/_update_by_query?conflicts=proceed
{
"query": { <1>
"term": {
"user.id": "kimchy"
}
}
}
```
% TEST[setup:my_index]
1. The query must be passed as a value to the `query` key, in the same way as the [Search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search). You can also use the `q` parameter in the same way as the search API.
### Target multiple indices
Update documents in multiple data streams or indices:
```console
POST my-index-000001,my-index-000002/_update_by_query
```
% TEST[s/^/PUT my-index-000001\nPUT my-index-000002\n/]
### Filter by routing
Limit the update by query operation to shards that a particular routing value:
```console
POST my-index-000001/_update_by_query?routing=1
```
% TEST[setup:my_index]
### Change batch size
By default update by query uses scroll batches of 1000. You can change the batch size with the `scroll_size` parameter:
```console
POST my-index-000001/_update_by_query?scroll_size=100
```
% TEST[setup:my_index]
## Update the document
Update a document using a unique attribute:
```console
POST my-index-000001/_update_by_query
{
"query": {
"term": {
"user.id": "kimchy"
}
},
"max_docs": 1
}
```
% TEST[setup:my_index]
### Update the document source
Update by query supports scripts to update the document source. For example, the following request increments the `count` field for all documents with a `user.id` of `kimchy` in `my-index-000001`:
<!--
```console
PUT my-index-000001/_create/1
{
"user": {
"id": "kimchy"
},
"count": 1
}
```
-->
```console
POST my-index-000001/_update_by_query
{
"script": {
"source": "ctx._source.count++",
"lang": "painless"
},
"query": {
"term": {
"user.id": "kimchy"
}
}
}
```
% TEST[continued]
Note that `conflicts=proceed` is not specified in this example. In this case, a version conflict should halt the process so you can handle the failure.
As with the [Update API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update), you can set `ctx.op` to change the operation that is performed:
`noop`
: Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes.
The update by query operation skips updating the document and increments the `noop` counter.
`delete`
: Set `ctx.op = "delete"` if your script decides that the document should be deleted.
The update by query operation deletes the document and increments the `deleted` counter.
Update by query only supports `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error.
This API only enables you to modify the source of matching documents, you cannot move them.
### Update documents using an ingest pipeline
Update by query can use the [ingest pipelines](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) feature by specifying a `pipeline`:
```console
PUT _ingest/pipeline/set-foo
{
"description" : "sets foo",
"processors" : [ {
"set" : {
"field": "foo",
"value": "bar"
}
} ]
}
POST my-index-000001/_update_by_query?pipeline=set-foo
```
% TEST[setup:my_index]
### Get the status of update by query operations
You can fetch the status of all running update by query requests with the [Task API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks):
```console
GET _tasks?detailed=true&actions=*byquery
```
% TEST[skip:No tasks to retrieve]
The responses looks like:
```console-result
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
"name" : "r1A2WoR",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
"attributes" : {
"testattr" : "test",
"portsfile" : "true"
},
"tasks" : {
"r1A2WoRbTwKZ516z6NEs5A:36619" : {
"node" : "r1A2WoRbTwKZ516z6NEs5A",
"id" : 36619,
"type" : "transport",
"action" : "indices:data/write/update/byquery",
"status" : { <1>
"total" : 6154,
"updated" : 3500,
"created" : 0,
"deleted" : 0,
"batches" : 4,
"version_conflicts" : 0,
"noops" : 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0
},
"description" : ""
}
}
}
}
}
```
1. This object contains the actual status. It is just like the response JSON with the important addition of the `total` field. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request will finish when their sum is equal to the `total` field.
With the task id you can look up the task directly. The following example retrieves information about task `r1A2WoRbTwKZ516z6NEs5A:36619`:
```console
GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619
```
% TEST[catch:missing]
The advantage of this API is that it integrates with `wait_for_completion=false` to transparently return the status of completed tasks. If the task is completed and `wait_for_completion=false` was set on it, then it'll come back with a `results` or an `error` field. The cost of this feature is the document that `wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to you to delete that document.
### Cancel an update by query operation
Any update by query can be cancelled using the [Cancel API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks):
```console
POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
```
The task ID can be found using the [Task API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks).
Cancellation should happen quickly but might take a few seconds. The task status API above will continue to list the update by query task until this task checks that it has been cancelled and terminates itself.
## Change throttling for a request
The value of `requests_per_second` can be changed on a running update by query using the `_rethrottle` API:
```console
POST _update_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
```
The task ID can be found using the [Task API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks).
Just like when setting it on the `_update_by_query` API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the query takes effect immediately, but rethrotting that slows down the query will take effect after completing the current batch. This prevents scroll timeouts.
## Slice manually
Slice an update by query manually by providing a slice id and total number of slices to each request:
```console
POST my-index-000001/_update_by_query
{
"slice": {
"id": 0,
"max": 2
},
"script": {
"source": "ctx._source['extra'] = 'test'"
}
}
POST my-index-000001/_update_by_query
{
"slice": {
"id": 1,
"max": 2
},
"script": {
"source": "ctx._source['extra'] = 'test'"
}
}
```
% TEST[setup:my_index_big]
Which you can verify works with:
```console
GET _refresh
POST my-index-000001/_search?size=0&q=extra:test&filter_path=hits.total
```
% TEST[continued]
Which results in a sensible `total` like this one:
```console-result
{
"hits": {
"total": {
"value": 120,
"relation": "eq"
}
}
}
```
## Use automatic slicing
You can also let update by query automatically parallelize using [slice-scroll](paginate-search-results.md#slice-scroll) to slice on `_id`. Use `slices` to specify the number of slices to use:
```console
POST my-index-000001/_update_by_query?refresh&slices=5
{
"script": {
"source": "ctx._source['extra'] = 'test'"
}
}
```
% TEST[setup:my_index_big]
Which you also can verify works with:
```console
POST my-index-000001/_search?size=0&q=extra:test&filter_path=hits.total
```
% TEST[continued]
Which results in a sensible `total` like this one:
```console-result
{
"hits": {
"total": {
"value": 120,
"relation": "eq"
}
}
}
```
Setting `slices` to `auto` will let Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.
Adding `slices` to `_update_by_query` just automates the manual process used in the section above, creating sub-requests which means it has some quirks:
- You can see these requests in the [Tasks APIs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query). These sub-requests are "child" tasks of the task for the request with `slices`.
- Fetching the status of the task for the request with `slices` only contains the status of completed slices.
- These sub-requests are individually addressable for things like cancellation and rethrottling.
- Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.
- Canceling the request with `slices` will cancel each sub-request.
- Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
- Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.
- Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.
## Pick up a new property
Say you created an index without dynamic mapping, filled it with data, and then added a mapping value to pick up more fields from the data:
```console
PUT test
{
"mappings": {
"dynamic": false, <1>
"properties": {
"text": {"type": "text"}
}
}
}
POST test/_doc?refresh
{
"text": "words words",
"flag": "bar"
}
POST test/_doc?refresh
{
"text": "words words",
"flag": "foo"
}
PUT test/_mapping <2>
{
"properties": {
"text": {"type": "text"},
"flag": {"type": "text", "analyzer": "keyword"}
}
}
```
1. This means that new fields won't be indexed, just stored in `_source`.
2. This updates the mapping to add the new `flag` field. To pick up the new field you have to reindex all documents with it.
Searching for the data won't find anything:
```console
POST test/_search?filter_path=hits.total
{
"query": {
"match": {
"flag": "foo"
}
}
}
```
% TEST[continued]
```console-result
{
"hits" : {
"total": {
"value": 0,
"relation": "eq"
}
}
}
```
But you can issue an `_update_by_query` request to pick up the new mapping:
```console
POST test/_update_by_query?refresh&conflicts=proceed
POST test/_search?filter_path=hits.total
{
"query": {
"match": {
"flag": "foo"
}
}
}
```
% TEST[continued]
```console-result
{
"hits" : {
"total": {
"value": 1,
"relation": "eq"
}
}
}
```
You can do the exact same thing when adding a field to a multifield. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/elasticsearch/rest-apis/update-by-query-api.md |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "v0alpha1.status-history-thresholds-mappings.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 15116,
"links": [],
"panels": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 0,
"y": 0
},
"id": 11,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,0"
}
],
"title": "default",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 10
},
{
"color": "#EAB839",
"value": 20
},
{
"color": "#6ED0E0",
"value": 30
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 6,
"y": 0
},
"id": 2,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
}
],
"title": "default absolute thresholds",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 10
},
{
"color": "#EAB839",
"value": 20
},
{
"color": "#6ED0E0",
"value": 30
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 12,
"y": 0
},
"id": 8,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
}
],
"title": "default percentage thresholds",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "A-series"
},
"properties": [
{
"id": "thresholds",
"value": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 10
},
{
"color": "#EAB839",
"value": 20
},
{
"color": "#6ED0E0",
"value": 30
}
]
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 6,
"x": 18,
"y": 0
},
"id": 3,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
}
],
"title": "override thresholds",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [
{
"options": {
"from": 0,
"result": {
"color": "green",
"index": 0
},
"to": 9.9999
},
"type": "range"
},
{
"options": {
"from": 10,
"result": {
"color": "yellow",
"index": 1
},
"to": 14.9999
},
"type": "range"
},
{
"options": {
"from": 15,
"result": {
"color": "red",
"index": 2
},
"to": 24.9999
},
"type": "range"
},
{
"options": {
"from": 25,
"result": {
"color": "blue",
"index": 3
},
"to": 100000
},
"type": "range"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 0,
"y": 9
},
"id": 6,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.47,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
}
],
"title": "default value mappings",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "A-series"
},
"properties": [
{
"id": "mappings",
"value": [
{
"options": {
"from": 0,
"result": {
"color": "green",
"index": 0
},
"to": 9.9999
},
"type": "range"
},
{
"options": {
"from": 10,
"result": {
"color": "yellow",
"index": 1
},
"to": 14.9999
},
"type": "range"
},
{
"options": {
"from": 15,
"result": {
"color": "red",
"index": 2
},
"to": 24.9999
},
"type": "range"
},
{
"options": {
"from": 25,
"result": {
"color": "blue",
"index": 3
},
"to": 100000
},
"type": "range"
}
]
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 6,
"x": 6,
"y": 9
},
"id": 7,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
}
],
"title": "override value mappings",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 12,
"y": 9
},
"id": 5,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"fields\": [\n {\n \"name\": \"time\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time\",\n \"nullable\": true\n },\n \"config\": {}\n },\n {\n \"name\": \"value\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"int64\",\n \"nullable\": true\n },\n \"config\": {\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n },\n {\n \"color\": \"#EAB839\",\n \"value\": 10\n },\n {\n \"color\": \"red\",\n \"value\": 15\n },\n {\n \"color\": \"#6ED0E0\",\n \"value\": 25\n }\n ]\n }\n }\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1674732835000,\n 1674736435000,\n 1674740035000,\n 1674743635000\n ],\n [\n 5,\n 10,\n 20,\n 30\n ]\n ]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "field thresholds from data",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 18,
"y": 9
},
"id": 9,
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "5,10,20,30,40"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 30,
"min": 0.01,
"noise": 30,
"refId": "B",
"scenarioId": "random_walk",
"startValue": 1
}
],
"title": "threshold from random walk",
"transformations": [
{
"id": "configFromData",
"options": {
"configRefId": "B",
"mappings": [
{
"fieldName": "B-series",
"handlerKey": "threshold1"
}
]
}
}
],
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"fieldMinMax": false,
"mappings": [
{
"options": {
"match": "null",
"result": {
"color": "purple",
"index": 0,
"text": "null"
}
},
"type": "special"
},
{
"options": {
"match": "nan",
"result": {
"color": "red",
"index": 1,
"text": "NaN"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 0,
"y": 18
},
"id": 12,
"options": {
"alignValue": "center",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"fields\": [\n {\n \"name\": \"time\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time\",\n \"nullable\": true\n },\n \"config\": {}\n },\n {\n \"name\": \"value\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"int64\",\n \"nullable\": true\n },\n \"config\": {\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n }\n ]\n }\n }\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1674732835000,\n 1674736435000,\n 1674740035000,\n 1674743635000,\n 1674747235000\n ],\n [\n 5,\n null,\n 20,\n null,\n 40\n ]\n ],\n \"entities\": [null, { \"NaN\": [3]}]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "special null | NaN value mapping from data",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"fieldMinMax": false,
"mappings": [
{
"options": {
"match": "null+nan",
"result": {
"color": "super-light-red",
"index": 0,
"text": "null + NaN"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 6,
"y": 18
},
"id": 13,
"options": {
"alignValue": "center",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"fields\": [\n {\n \"name\": \"time\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time\",\n \"nullable\": true\n },\n \"config\": {}\n },\n {\n \"name\": \"value\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"int64\",\n \"nullable\": true\n },\n \"config\": {\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n }\n ]\n }\n }\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1674732835000,\n 1674736435000,\n 1674740035000,\n 1674743635000,\n 1674747235000\n ],\n [\n 5,\n null,\n 20,\n null,\n 40\n ]\n ],\n \"entities\": [null, { \"NaN\": [1]}]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "special null + NaN value mapping from data",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"fieldMinMax": false,
"mappings": [
{
"options": {
"match": "false",
"result": {
"color": "red",
"index": 0
}
},
"type": "special"
},
{
"options": {
"match": "null+nan",
"result": {
"color": "blue",
"index": 1,
"text": "null + NaN"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 12,
"y": 18
},
"id": 14,
"options": {
"alignValue": "center",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"fields\": [\n {\n \"name\": \"time\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time\",\n \"nullable\": true\n },\n \"config\": {}\n },\n {\n \"name\": \"value\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"int64\",\n \"nullable\": true\n },\n \"config\": {\n \"thresholds\": {\n \"mode\": \"absolute\",\n \"steps\": [\n {\n \"color\": \"green\",\n \"value\": null\n }\n ]\n }\n }\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1674732835000,\n 1674736435000,\n 1674740035000,\n 1674743635000,\n 1674747235000,\n 1674750835000,\n 1674754235000,\n 1674757835000\n ],\n [\n null,\n null,\n false,\n true,\n true,\n false,\n true,\n null\n ]\n ],\n \"entities\": [null, { \"NaN\": [0], \"Undefined\": [1]}]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "boolean values from data",
"type": "status-history"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
},
"fieldMinMax": false,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 18,
"y": 18
},
"id": 15,
"options": {
"alignValue": "center",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0-pre",
"targets": [
{
"refId": "A",
"scenarioId": "random_walk",
"seriesCount": 0
}
],
"title": "no data",
"type": "status-history"
}
],
"preload": false,
"refresh": "",
"schemaVersion": 42,
"tags": [
"gdev",
"panel-tests",
"state-timeline",
"graph-ng"
],
"templating": {
"list": []
},
"time": {
"from": "2023-01-26T11:29:47.180Z",
"to": "2023-01-26T16:29:39.205Z"
},
"timepicker": {},
"timezone": "utc",
"title": "StatusHistory - Thresholds \u0026 Mappings",
"uid": "a2f4ad9e-3b44-4624-8067-35f31be5d309",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-status-history/v0alpha1.status-history-thresholds-mappings.v42.v1beta1.json |
"""
Test Display Options
"""
__RCSID__ = "$Id$"
import unittest
import thread
from DIRAC.FrameworkSystem.private.standardLogging.test.TestLoggingBase import Test_Logging, gLogger, cleaningLog
class Test_DisplayOptions(Test_Logging):
"""
Test the creation of subloggers and their properties
"""
def setUp(self):
super(Test_DisplayOptions, self).setUp()
self.filename = '/tmp/logtmp.log'
with open(self.filename, "w"):
pass
def test_00setShowHeaders(self):
"""
Set the headers
"""
gLogger.showHeaders(False)
gLogger.notice('message', 'varmessage')
self.assertEqual("message varmessage\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
def test_01setShowThreadIDs(self):
"""
Set the thread ID
"""
gLogger.showThreadIDs(False)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
gLogger.showThreadIDs(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.buffer.truncate(0)
def test_02setShowThreadIDsHeaders(self):
"""
Create a subsubsublogger and create a logrecord
"""
gLogger.showHeaders(False)
gLogger.showThreadIDs(False)
gLogger.notice('message')
self.assertEqual("message\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(False)
gLogger.showThreadIDs(True)
gLogger.notice('message')
self.assertEqual("message\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(False)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.buffer.truncate(0)
def test_03setSubLogShowHeaders(self):
"""
Create a sublogger and set it its own Header option.
"""
sublog = gLogger.getSubLogger('sublog')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublogNOTICE:message\n")
def test_04SubLogShowHeadersChange(self):
"""
Create a sublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog2')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
self.assertEqual(self.buffer.getvalue(), "message\n")
def test_05setSubLoggLoggerShowHeaders(self):
"""
Create a sublogger, set its Header option and the Header option of the gLogger.
Show that its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog3')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog3NOTICE:message\n")
def test_06setSubLoggLoggerShowHeadersInverse(self):
"""
Create a sublogger, set the Header option of the gLogger and its Header option.
Show that the gLogger Header option do not follow the change of its child Header option.
"""
sublog = gLogger.getSubLogger('sublog4')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog4NOTICE:message\n")
def test_07subLogShowHeadersChange(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog5')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\nmessage\n")
self.assertEqual(self.buffer.getvalue(), "message\n")
def test_07subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog6')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\nmessage\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog6/subsublogNOTICE:message\n")
def test_09subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and set its Header option and show that
its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog7')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.showHeaders(True)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog7/subsublog NOTICE: message\nmessage\n", message)
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog7/subsublogNOTICE:message\n")
def test_10gLoggerShowHeadersChange2Times(self):
"""
Create a sublogger with a file backend and change the Header option of gLogger 2 times
in order to verify the propagation.
"""
sublog = gLogger.getSubLogger('sublog8')
sublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual("message\n", message)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog8 NOTICE: message\n", message)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test_DisplayOptions)
testResult = unittest.TextTestRunner(verbosity=2).run(suite) | unknown | codeparrot/codeparrot-clean | ||
import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_
#define ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_
#include <atomic>
#include <cstdint>
#include <functional>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/thread_annotations.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cordz_functions.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
// CordzInfo tracks a profiled Cord. Each of these objects can be in two places.
// If a Cord is alive, the CordzInfo will be in the global_cordz_infos map, and
// can also be retrieved via the linked list starting with
// global_cordz_infos_head and continued via the cordz_info_next() method. When
// a Cord has reached the end of its lifespan, the CordzInfo object will be
// migrated out of the global_cordz_infos list and the global_cordz_infos_map,
// and will either be deleted or appended to the global_delete_queue. If it is
// placed on the global_delete_queue, the CordzInfo object will be cleaned in
// the destructor of a CordzSampleToken object.
class ABSL_LOCKABLE CordzInfo : public CordzHandle {
public:
using MethodIdentifier = CordzUpdateTracker::MethodIdentifier;
// TrackCord creates a CordzInfo instance which tracks important metrics of
// a sampled cord, and stores the created CordzInfo instance into `cord'. All
// CordzInfo instances are placed in a global list which is used to discover
// and snapshot all actively tracked cords. Callers are responsible for
// calling UntrackCord() before the tracked Cord instance is deleted, or to
// stop tracking the sampled Cord. Callers are also responsible for guarding
// changes to the 'tree' value of a Cord (InlineData.tree) through the Lock()
// and Unlock() calls. Any change resulting in a new tree value for the cord
// requires a call to SetCordRep() before the old tree has been unreffed
// and/or deleted. `method` identifies the Cord public API method initiating
// the cord to be sampled.
// Requires `cord` to hold a tree, and `cord.cordz_info()` to be null.
static void TrackCord(InlineData& cord, MethodIdentifier method);
// Identical to TrackCord(), except that this function fills the
// `parent_stack` and `parent_method` properties of the returned CordzInfo
// instance from the provided `src` instance if `src` is sampled.
// This function should be used for sampling 'copy constructed' and 'copy
// assigned' cords. This function allows 'cord` to be already sampled, in
// which case the CordzInfo will be newly created from `src`.
static void TrackCord(InlineData& cord, const InlineData& src,
MethodIdentifier method);
// Maybe sample the cord identified by 'cord' for method 'method'.
// Uses `cordz_should_profile` to randomly pick cords to be sampled, and if
// so, invokes `TrackCord` to start sampling `cord`.
static void MaybeTrackCord(InlineData& cord, MethodIdentifier method);
// Maybe sample the cord identified by 'cord' for method 'method'.
// `src` identifies a 'parent' cord which is assigned to `cord`, typically the
// input cord for a copy constructor, or an assign method such as `operator=`
// `cord` will be sampled if (and only if) `src` is sampled.
// If `cord` is currently being sampled and `src` is not being sampled, then
// this function will stop sampling the cord and reset the cord's cordz_info.
//
// Previously this function defined that `cord` will be sampled if either
// `src` is sampled, or if `cord` is randomly picked for sampling. However,
// this can cause issues, as there may be paths where some cord is assigned an
// indirect copy of it's own value. As such a 'string of copies' would then
// remain sampled (`src.is_profiled`), then assigning such a cord back to
// 'itself' creates a cycle where the cord will converge to 'always sampled`.
//
// For example:
//
// Cord x;
// for (...) {
// // Copy ctor --> y.is_profiled := x.is_profiled | random(...)
// Cord y = x;
// ...
// // Assign x = y --> x.is_profiled = y.is_profiled | random(...)
// // ==> x.is_profiled |= random(...)
// // ==> x converges to 'always profiled'
// x = y;
// }
static void MaybeTrackCord(InlineData& cord, const InlineData& src,
MethodIdentifier method);
// Stops tracking changes for a sampled cord, and deletes the provided info.
// This function must be called before the sampled cord instance is deleted,
// and before the root cordrep of the sampled cord is unreffed.
// This function may extend the lifetime of the cordrep in cases where the
// CordInfo instance is being held by a concurrent collection thread.
void Untrack();
// Invokes UntrackCord() on `info` if `info` is not null.
static void MaybeUntrackCord(CordzInfo* info);
CordzInfo() = delete;
CordzInfo(const CordzInfo&) = delete;
CordzInfo& operator=(const CordzInfo&) = delete;
// Retrieves the oldest existing CordzInfo.
static CordzInfo* Head(const CordzSnapshot& snapshot)
ABSL_NO_THREAD_SAFETY_ANALYSIS;
// Retrieves the next oldest existing CordzInfo older than 'this' instance.
CordzInfo* Next(const CordzSnapshot& snapshot) const
ABSL_NO_THREAD_SAFETY_ANALYSIS;
// Locks this instance for the update identified by `method`.
// Increases the count for `method` in `update_tracker`.
void Lock(MethodIdentifier method) ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_);
// Unlocks this instance. If the contained `rep` has been set to null
// indicating the Cord has been cleared or is otherwise no longer sampled,
// then this method will delete this CordzInfo instance.
void Unlock() ABSL_UNLOCK_FUNCTION(mutex_);
// Asserts that this CordzInfo instance is locked.
void AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_);
// Updates the `rep` property of this instance. This methods is invoked by
// Cord logic each time the root node of a sampled Cord changes, and before
// the old root reference count is deleted. This guarantees that collection
// code can always safely take a reference on the tracked cord.
// Requires a lock to be held through the `Lock()` method.
// TODO(b/117940323): annotate with ABSL_EXCLUSIVE_LOCKS_REQUIRED once all
// Cord code is in a state where this can be proven true by the compiler.
void SetCordRep(CordRep* rep);
// Returns the current `rep` property of this instance with a reference
// added, or null if this instance represents a cord that has since been
// deleted or untracked.
CordRep* RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_);
// Returns the current value of `rep_` for testing purposes only.
CordRep* GetCordRepForTesting() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
return rep_;
}
// Sets the current value of `rep_` for testing purposes only.
void SetCordRepForTesting(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS {
rep_ = rep;
}
// Returns the stack trace for where the cord was first sampled. Cords are
// potentially sampled when they promote from an inlined cord to a tree or
// ring representation, which is not necessarily the location where the cord
// was first created. Some cords are created as inlined cords, and only as
// data is added do they become a non-inlined cord. However, typically the
// location represents reasonably well where the cord is 'created'.
absl::Span<void* const> GetStack() const;
// Returns the stack trace for a sampled cord's 'parent stack trace'. This
// value may be set if the cord is sampled (promoted) after being created
// from, or being assigned the value of an existing (sampled) cord.
absl::Span<void* const> GetParentStack() const;
// Retrieves the CordzStatistics associated with this Cord. The statistics
// are only updated when a Cord goes through a mutation, such as an Append
// or RemovePrefix.
CordzStatistics GetCordzStatistics() const;
private:
using SpinLock = absl::base_internal::SpinLock;
using SpinLockHolder = ::absl::base_internal::SpinLockHolder;
// Global cordz info list. CordzInfo stores a pointer to the global list
// instance to harden against ODR violations.
struct List {
constexpr explicit List(absl::ConstInitType)
: mutex(absl::kConstInit,
absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
SpinLock mutex;
std::atomic<CordzInfo*> head ABSL_GUARDED_BY(mutex){nullptr};
};
static constexpr size_t kMaxStackDepth = 64;
explicit CordzInfo(CordRep* rep, const CordzInfo* src,
MethodIdentifier method);
~CordzInfo() override;
// Sets `rep_` without holding a lock.
void UnsafeSetCordRep(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS;
void Track();
// Returns the parent method from `src`, which is either `parent_method_` or
// `method_` depending on `parent_method_` being kUnknown.
// Returns kUnknown if `src` is null.
static MethodIdentifier GetParentMethod(const CordzInfo* src);
// Fills the provided stack from `src`, copying either `parent_stack_` or
// `stack_` depending on `parent_stack_` being empty, returning the size of
// the parent stack.
// Returns 0 if `src` is null.
static size_t FillParentStack(const CordzInfo* src, void** stack);
void ODRCheck() const {
#ifndef NDEBUG
ABSL_RAW_CHECK(list_ == &global_list_, "ODR violation in Cord");
#endif
}
// Non-inlined implementation of `MaybeTrackCord`, which is executed if
// either `src` is sampled or `cord` is sampled, and either untracks or
// tracks `cord` as documented per `MaybeTrackCord`.
static void MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
MethodIdentifier method);
ABSL_CONST_INIT static List global_list_;
List* const list_ = &global_list_;
// ci_prev_ and ci_next_ require the global list mutex to be held.
// Unfortunately we can't use thread annotations such that the thread safety
// analysis understands that list_ and global_list_ are one and the same.
std::atomic<CordzInfo*> ci_prev_{nullptr};
std::atomic<CordzInfo*> ci_next_{nullptr};
mutable absl::Mutex mutex_;
CordRep* rep_ ABSL_GUARDED_BY(mutex_);
void* stack_[kMaxStackDepth];
void* parent_stack_[kMaxStackDepth];
const size_t stack_depth_;
const size_t parent_stack_depth_;
const MethodIdentifier method_;
const MethodIdentifier parent_method_;
CordzUpdateTracker update_tracker_;
const absl::Time create_time_;
};
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
InlineData& cord, MethodIdentifier method) {
if (ABSL_PREDICT_FALSE(cordz_should_profile())) {
TrackCord(cord, method);
}
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
InlineData& cord, const InlineData& src, MethodIdentifier method) {
if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src))) {
MaybeTrackCordImpl(cord, src, method);
}
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeUntrackCord(
CordzInfo* info) {
if (ABSL_PREDICT_FALSE(info)) {
info->Untrack();
}
}
inline void CordzInfo::AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_) {
#ifndef NDEBUG
mutex_.AssertHeld();
#endif
}
inline void CordzInfo::SetCordRep(CordRep* rep) {
AssertHeld();
rep_ = rep;
}
inline void CordzInfo::UnsafeSetCordRep(CordRep* rep) { rep_ = rep; }
inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) {
MutexLock lock(&mutex_);
return rep_ ? CordRep::Ref(rep_) : nullptr;
}
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/strings/internal/cordz_info.h |
import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View
# Create the time grid
# In the context of the spectral estimate or Fourier transform use,
# we use data blocs with size of form 2^p
tMin = 0.
timeStep = 0.1
size = pow(2, 12)
myTimeGrid = ot.RegularGrid(tMin, timeStep, size)
# We fix the parameter of the Cauchy model
amplitude = [5]
scale = [3]
model = ot.ExponentialCauchy(scale, amplitude)
myNormalProcess = ot.SpectralNormalProcess(model, myTimeGrid)
# Get a time series or a sample of time series
#myTimeSeries = myNormalProcess.getRealization()
mySample = myNormalProcess.getSample(1000)
mySegmentNumber = 10
myOverlapSize = 0.3
# Build a spectral model factory
myFactory = ot.WelchFactory(ot.Hanning(), mySegmentNumber, myOverlapSize)
# Estimation on a TimeSeries or on a ProcessSample
#myEstimatedModel_TS = myFactory.build(myTimeSeries)
myEstimatedModel_PS = myFactory.build(mySample)
# Change the filtering window
myFactory.setFilteringWindows(ot.Hamming())
# Get the FFT algorithm
myFFT = myFactory.getFFTAlgorithm()
# Get the frequencyGrid
frequencyGrid = myEstimatedModel_PS.getFrequencyGrid()
# With the model, we want to compare values
# We compare values computed with theoritical values
plotSample = ot.NumericalSample(frequencyGrid.getN(), 3)
# Loop of comparison ==> data are saved in plotSample
for k in range(frequencyGrid.getN()):
freq = frequencyGrid.getStart() + k * frequencyGrid.getStep()
plotSample[k, 0] = freq
plotSample[k, 1] = abs(myEstimatedModel_PS(freq)[0, 0])
plotSample[k, 2] = abs(model.computeSpectralDensity(freq)[0, 0])
# Graph section
# We build 2 curves
# each one is function of frequency values
ind = ot.Indices(2)
ind.fill()
# Some cosmetics : labels, legend position, ...
graph = ot.Graph("Estimated spectral function - Validation", "Frequency",
"Spectral density function", True, "topright", 1.0, ot.GraphImplementation.LOGY)
# The first curve is the estimate density as function of frequency
curve1 = ot.Curve(plotSample.getMarginal(ind))
curve1.setColor('blue')
curve1.setLegend('estimate model')
# The second curve is the theoritical density as function of frequency
ind[1] = 2
curve2 = ot.Curve(plotSample.getMarginal(ind))
curve2.setColor('red')
curve2.setLegend('Cauchy model')
graph.add(curve1)
graph.add(curve2)
fig = plt.figure(figsize=(10, 4))
plt.suptitle('Spectral model estimation')
graph_axis = fig.add_subplot(111)
view = View(graph, figure=fig, axes=[graph_axis], add_legend=False) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from troveclient.openstack.common.apiclient import exceptions as troveexc
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import trove
from heat.engine.resources.openstack.trove import trove_cluster
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
stack_template = '''
heat_template_version: 2013-05-23
resources:
cluster:
type: OS::Trove::Cluster
properties:
datastore_type: mongodb
datastore_version: 2.6.1
instances:
- flavor: m1.heat
volume_size: 1
- flavor: m1.heat
volume_size: 1
- flavor: m1.heat
volume_size: 1
'''
class FakeTroveCluster(object):
def __init__(self, status='ACTIVE'):
self.name = 'cluster'
self.id = '1189aa64-a471-4aa3-876a-9eb7d84089da'
self.ip = ['10.0.0.1']
self.instances = [
{'id': '416b0b16-ba55-4302-bbd3-ff566032e1c1', 'status': status},
{'id': '965ef811-7c1d-47fc-89f2-a89dfdd23ef2', 'status': status},
{'id': '3642f41c-e8ad-4164-a089-3891bf7f2d2b', 'status': status}]
def delete(self):
pass
class FakeFlavor(object):
def __init__(self, id, name):
self.id = id
self.name = name
class FakeVersion(object):
def __init__(self, name="2.6.1"):
self.name = name
class TroveClusterTest(common.HeatTestCase):
def setUp(self):
super(TroveClusterTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
t = template_format.parse(stack_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['cluster']
self.patcher_client = mock.patch.object(trove_cluster.TroveCluster,
'client')
mock_client = self.patcher_client.start()
self.client = mock_client.return_value
client = mock.Mock()
client.flavors.list.return_value = [FakeFlavor(1, 'm1.heat')]
client.datastore_versions.list.return_value = [FakeVersion()]
self.patchobject(trove.TroveClientPlugin, 'client',
return_value=client)
def tearDown(self):
super(TroveClusterTest, self).tearDown()
self.patcher_client.stop()
def _create_resource(self, name, snippet, stack):
tc = trove_cluster.TroveCluster(name, snippet, stack)
self.client.clusters.create.return_value = FakeTroveCluster()
self.client.clusters.get.return_value = FakeTroveCluster()
scheduler.TaskRunner(tc.create)()
return tc
def test_create(self):
tc = self._create_resource('cluster', self.rsrc_defn, self.stack)
expected_state = (tc.CREATE, tc.COMPLETE)
self.assertEqual(expected_state, tc.state)
args = self.client.clusters.create.call_args[1]
self.assertEqual([{'flavorRef': 1, 'volume': {'size': 1}},
{'flavorRef': 1, 'volume': {'size': 1}},
{'flavorRef': 1, 'volume': {'size': 1}}],
args['instances'])
self.assertEqual('mongodb', args['datastore'])
self.assertEqual('2.6.1', args['datastore_version'])
self.assertEqual('1189aa64-a471-4aa3-876a-9eb7d84089da',
tc.resource_id)
def test_resource_mapping(self):
mapping = trove_cluster.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(trove_cluster.TroveCluster,
mapping['OS::Trove::Cluster'])
def test_attributes(self):
tc = self._create_resource('cluster', self.rsrc_defn, self.stack)
self.assertEqual(['10.0.0.1'], tc.FnGetAtt('ip'))
self.assertEqual(['416b0b16-ba55-4302-bbd3-ff566032e1c1',
'965ef811-7c1d-47fc-89f2-a89dfdd23ef2',
'3642f41c-e8ad-4164-a089-3891bf7f2d2b'],
tc.FnGetAtt('instances'))
def test_delete(self):
tc = self._create_resource('cluster', self.rsrc_defn, self.stack)
self.patchobject(tc, 'handle_delete', return_value=None)
scheduler.TaskRunner(tc.delete)()
self.assertEqual((tc.DELETE, tc.COMPLETE), tc.state)
def test_delete_not_found(self):
tc = self._create_resource('cluster', self.rsrc_defn, self.stack)
self.client.clusters.get.side_effect = troveexc.NotFound()
scheduler.TaskRunner(tc.delete)()
self.assertEqual((tc.DELETE, tc.COMPLETE), tc.state)
self.client.clusters.get.assert_called_with(tc.resource_id)
self.assertEqual(2, self.client.clusters.get.call_count)
def test_validate_ok(self):
tc = trove_cluster.TroveCluster('cluster', self.rsrc_defn, self.stack)
self.assertIsNone(tc.validate())
def test_validate_invalid_dsversion(self):
self.rsrc_defn['Properties']['datastore_version'] = '2.6.2'
tc = trove_cluster.TroveCluster('cluster', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, tc.validate)
error_msg = ('Datastore version 2.6.2 for datastore type mongodb is '
'not valid. Allowed versions are 2.6.1.')
self.assertEqual(error_msg, six.text_type(ex))
def test_validate_invalid_flavor(self):
self.rsrc_defn['Properties']['instances'][0]['flavor'] = 'm1.small'
tc = trove_cluster.TroveCluster('cluster', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, tc.validate)
error_msg = ("Property error: "
"resources.cluster.properties.instances[0].flavor: "
"Error validating value 'm1.small': "
"The Flavor ID (m1.small) could not be found.")
self.assertEqual(error_msg, six.text_type(ex)) | unknown | codeparrot/codeparrot-clean | ||
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppFloodlightLoadBalancer
# This starts a Floodlight process in a separate process. The .properties file referenced here
# contains the parameters used by Floodlight.
start_cmd = ('''java -ea -Dlogback.configurationFile=./src/main/resources/logback-test-trace.xml -jar '''
'''./target/floodlight.jar '''
'''-cf ./src/main/resources/trace_loadbalancer.properties''')
# This specifies the controller that STS should use.
controllers = [ControllerConfig(start_cmd, cwd='../floodlight', address="127.0.0.1", port=6633)]
# Uncomment this if you are running Floodlight separately, e.g. for debugging in Eclipse. There must be a controller listening on port 6633.
# start_cmd = '''echo "no-op"'''
# controllers = [ControllerConfig(start_cmd, cwd='../floodlight', address="127.0.0.1", port=6633, controller_type='dummy')]
#################################
# Topologies used in PLDI paper #
#################################
# Uncomment exactly one of the topologies below to use it..
############
# "Single" #
# ############
# num = 2
# topology_class = StarTopology #
# topology_params = "num_hosts=%d" % num
#############
# "Single4" #
############
num = 4
topology_class = StarTopology #
topology_params = "num_hosts=%d" % num
############
# "Linear" #
############
#num = 2
#topology_class = MeshTopology
#topology_params = "num_switches=%d" % num
############
# "BinTree" #
############
# num = 2
# topology_class = BinaryLeafTreeTopology
# topology_params = "num_levels=%d" % num
# Increase this value to get longer traces
steps = 200
# This folder will be placed in the root STS directory
# To see the exact parameters used for each trace, refer to the "orig_config.py" in each trace directory.
# This file will be copied to the results_dir, along with the results themselves.
results_dir = "plditraces/trace_floodlight_loadbalancer-%s%d-steps%s" % (topology_class.__name__, num, steps)
apps = [AppFloodlightLoadBalancer('loadbalancer', cwd='./', controller='localhost:8080')]
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=True, # needs to be True otherwise loadbalancer will throw errors.
check_interval=10,
delay=0.1,
halt_on_violation=True,
send_init_packets=False,
steps=steps, # if no circuits are installed, increase this number.
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps) | unknown | codeparrot/codeparrot-clean | ||
"""Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback) | unknown | codeparrot/codeparrot-clean | ||
#
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import os, string, sys
class _imagingft_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imagingft C module is not installed")
try:
import _imagingft
core = _imagingft
del _imagingft
except ImportError:
core = _imagingft_not_installed()
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
##
# The <b>ImageFont</b> module defines a class with the same name.
# Instances of this class store bitmap fonts, and are used with the
# <b>text</b> method of the <b>ImageDraw</b> class.
# <p>
# PIL uses it's own font file format to store bitmap fonts. You can
# use the <b>pilfont</b> utility to convert BDF and PCF font
# descriptors (X window font formats) to this format.
# <p>
# Starting with version 1.1.4, PIL can be configured to support
# TrueType and OpenType fonts. For earlier version, TrueType
# support is only available as part of the imToolkit package
#
# @see ImageDraw#ImageDraw.text
# @see pilfont
class ImageFont:
"PIL font wrapper"
def _load_pilfont(self, filename):
file = open(filename, "rb")
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(file, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != "PILfont\n":
raise SyntaxError("Not a PILfont file")
d = string.split(file.readline(), ";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == "DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256*20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
# delegate critical operations to internal type
self.getsize = self.font.getsize
self.getmask = self.font.getmask
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont:
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, file, size, index=0, encoding=""):
# FIXME: use service provider instead
self.font = core.getfont(file, size, index, encoding)
def getname(self):
return self.font.family, self.font.style
def getmetrics(self):
return self.font.ascent, self.font.descent
def getsize(self, text):
return self.font.getsize(text)[0]
def getmask(self, text, mode=""):
return self.getmask2(text, mode)[0]
def getmask2(self, text, mode="", fill=Image.core.fill):
size, offset = self.font.getsize(text)
im = fill("L", size, 0)
self.font.render(text, im.id, mode=="1")
return im, offset
##
# Wrapper that creates a transposed font from any existing font
# object.
#
# @param font A font object.
# @param orientation An optional orientation. If given, this should
# be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
# Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
class TransposedFont:
"Wrapper for writing rotated or mirrored text"
def __init__(self, font, orientation=None):
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getsize(self, text):
w, h = self.font.getsize(text)
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w
return w, h
def getmask(self, text, mode=""):
im = self.font.getmask(text, mode)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
##
# Load font file. This function loads a font object from the given
# bitmap font file, and returns the corresponding font object.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
def load(filename):
"Load a font file."
f = ImageFont()
f._load_pilfont(filename)
return f
##
# Load a TrueType or OpenType font file, and create a font object.
# This function loads a font object from the given file, and creates
# a font object for a font of the given size.
# <p>
# This function requires the _imagingft service.
#
# @param filename A truetype font file. Under Windows, if the file
# is not found in this filename, the loader also looks in Windows
# <b>fonts</b> directory
# @param size The requested size, in points.
# @param index Which font face to load (default is first available face).
# @param encoding Which font encoding to use (default is Unicode). Common
# encodings are "unic" (Unicode), "symb" (Microsoft Symbol), "ADOB"
# (Adobe Standard), "ADBE" (Adobe Expert), and "armn" (Apple Roman).
# See the FreeType documentation for more information.
# @return A font object.
# @exception IOError If the file could not be read.
def truetype(filename, size, index=0, encoding=""):
"Load a truetype font file."
try:
return FreeTypeFont(filename, size, index, encoding)
except IOError:
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
filename = os.path.join(windir, "fonts", filename)
return FreeTypeFont(filename, size, index, encoding)
raise
##
# Load font file. Same as load, but searches for a bitmap font along
# the Python path.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
# @see #load
def load_path(filename):
"Load a font file, searching along the Python path."
for dir in sys.path:
if Image.isDirectory(dir):
try:
return load(os.path.join(dir, filename))
except IOError:
pass
raise IOError("cannot find font file")
##
# Load a (probably rather ugly) default font.
#
# @return A font object.
def load_default():
"Load a default font."
from StringIO import StringIO
import base64
f = ImageFont()
f._load_pilfont_data(
# courB08
StringIO(base64.decodestring('''
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
''')), Image.open(StringIO(base64.decodestring('''
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
'''))))
return f
if __name__ == "__main__":
# create font data chunk for embedding
import base64, os, sys
font = "../Images/courB08"
print " f._load_pilfont_data("
print " # %s" % os.path.basename(font)
print " StringIO(base64.decodestring('''"
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print "''')), Image.open(StringIO(base64.decodestring('''"
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print "'''))))" | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2020 BayLibre, SAS
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/amlogic,axg-ge2d.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic GE2D Acceleration Unit
maintainers:
- Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
enum:
- amlogic,axg-ge2d
interrupts:
minItems: 1
reg:
minItems: 1
resets:
maxItems: 1
clocks:
minItems: 1
required:
- compatible
- reg
- interrupts
- clocks
- resets
additionalProperties: false
examples:
- |
ge2d: ge2d@ff940000 {
compatible = "amlogic,axg-ge2d";
reg = <0xff940000 0x10000>;
interrupts = <150>;
clocks = <&clk_ge2d>;
resets = <&reset_ge2d>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml |
import pathlib
import tomllib
import pytest
import yaml
from scripts.validate_min_versions_in_sync import (
get_toml_map_from,
get_yaml_map_from,
pin_min_versions_to_yaml_file,
)
DATA_PATH = pathlib.Path(__file__).parents[2] / "scripts/tests/data/"
@pytest.mark.parametrize(
"src_toml, src_yaml, expected_yaml",
[
(
DATA_PATH / "deps_minimum.toml",
DATA_PATH / "deps_unmodified_random.yaml",
DATA_PATH / "deps_expected_random.yaml",
),
(
DATA_PATH / "deps_minimum.toml",
DATA_PATH / "deps_unmodified_same_version.yaml",
DATA_PATH / "deps_expected_same_version.yaml",
),
(
DATA_PATH / "deps_minimum.toml",
DATA_PATH / "deps_unmodified_duplicate_package.yaml",
DATA_PATH / "deps_expected_duplicate_package.yaml",
),
(
DATA_PATH / "deps_minimum.toml",
DATA_PATH / "deps_unmodified_no_version.yaml",
DATA_PATH / "deps_expected_no_version.yaml",
),
(
DATA_PATH / "deps_minimum.toml",
DATA_PATH / "deps_unmodified_range.yaml",
DATA_PATH / "deps_expected_range.yaml",
),
],
)
def test_pin_min_versions_to_yaml_file(src_toml, src_yaml, expected_yaml) -> None:
with open(src_toml, "rb") as toml_f:
toml_map = tomllib.load(toml_f)
with open(src_yaml, encoding="utf-8") as yaml_f:
yaml_file_data = yaml_f.read()
yaml_file = yaml.safe_load(yaml_file_data)
yaml_dependencies = yaml_file["dependencies"]
yaml_map = get_yaml_map_from(yaml_dependencies)
toml_map = get_toml_map_from(toml_map)
result_yaml_file = pin_min_versions_to_yaml_file(yaml_map, toml_map, yaml_file_data)
with open(expected_yaml, encoding="utf-8") as yaml_f:
dummy_yaml_expected_file_1 = yaml_f.read()
assert result_yaml_file == dummy_yaml_expected_file_1 | python | github | https://github.com/pandas-dev/pandas | scripts/tests/test_validate_min_versions_in_sync.py |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import unittest
from unittest.mock import Mock
from mantidqt.widgets.observers.ads_observer import WorkspaceDisplayADSObserver
class MockWorkspaceDisplay:
def __init__(self):
self.close = Mock()
self.force_close = Mock()
self.replace_workspace = Mock()
class WorkspaceDisplayADSObserverTest(unittest.TestCase):
def test_clearHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
observer.clearHandle()
mock_wsd.force_close.assert_called_once_with()
def test_deleteHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
expected_name = "adad"
observer.deleteHandle(expected_name, None)
mock_wsd.close.assert_called_once_with(expected_name)
def test_replaceHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
expected_name = "a"
expected_parameter = 444555.158
observer.replaceHandle(expected_name, expected_parameter)
mock_wsd.replace_workspace.assert_called_once_with(expected_name, expected_parameter) | unknown | codeparrot/codeparrot-clean | ||
{
"name": "eslint-plugin-mongodb",
"version": "1.0.0",
"private": true,
"main": "plugin.js",
"type": "module"
} | json | github | https://github.com/mongodb/mongo | buildscripts/eslint-plugin-mongodb/package.json |
// Copyright David Abrahams 2002.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_WORKAROUND_DWA2002126_HPP
#define BOOST_WORKAROUND_DWA2002126_HPP
#include <boost/config/workaround.hpp>
#endif // BOOST_WORKAROUND_DWA2002126_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/detail/workaround.hpp |
"""Adding in a suspicious crash table
Revision ID: 2c03d8ea0a50
Revises: 35604f61bc24
Create Date: 2013-08-09 18:46:42.618063
"""
# revision identifiers, used by Alembic.
revision = '2c03d8ea0a50'
down_revision = '49bf379b5a8'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "json"
def upgrade():
op.create_table(u'suspicious_crash_signatures',
sa.Column(u'suspicious_crash_signature_id', sa.INTEGER()),
sa.Column(u'signature_id', sa.INTEGER()),
sa.Column(u'report_date', sa.TIMESTAMP(timezone=True))
)
def downgrade():
op.drop_table(u'suspicious_crash_signatures') | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines
import kotlinx.atomicfu.*
import kotlin.coroutines.*
import kotlin.test.*
class MultithreadedDispatcherStressTest {
private val shared = atomic(0)
/**
* Tests that [newFixedThreadPoolContext] will not drop tasks when closed.
*/
@Test
fun testClosingNotDroppingTasks() {
repeat(7) {
shared.value = 0
val nThreads = it + 1
val dispatcher = newFixedThreadPoolContext(nThreads, "testMultiThreadedContext")
repeat(1_000) {
dispatcher.dispatch(EmptyCoroutineContext, Runnable {
shared.incrementAndGet()
})
}
dispatcher.close()
while (shared.value < 1_000) {
// spin.
// the test will hang here if the dispatcher drops tasks.
}
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/concurrent/test/MultithreadedDispatcherStressTest.kt |
import os
import six
import logging
from collections import defaultdict
from scrapy.exceptions import NotConfigured
from scrapy.http import Response
from scrapy.http.cookies import CookieJar
from scrapy.utils.python import to_native_str
logger = logging.getLogger(__name__)
class CookiesMiddleware(object):
"""This middleware enables working with sites that need cookies"""
def __init__(self, debug=False):
self.jars = defaultdict(CookieJar)
self.debug = debug
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COOKIES_ENABLED'):
raise NotConfigured
return cls(crawler.settings.getbool('COOKIES_DEBUG'))
def process_request(self, request, spider):
if request.meta.get('dont_merge_cookies', False):
return
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
for cookie in cookies:
jar.set_cookie_if_ok(cookie, request)
# set Cookie header
request.headers.pop('Cookie', None)
jar.add_cookie_header(request)
self._debug_cookie(request, spider)
def process_response(self, request, response, spider):
if request.meta.get('dont_merge_cookies', False):
return response
# extract cookies from Set-Cookie and drop invalid/expired cookies
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
jar.extract_cookies(response, request)
self._debug_set_cookie(response, spider)
return response
def _debug_cookie(self, request, spider):
if self.debug:
cl = [to_native_str(c, errors='replace')
for c in request.headers.getlist('Cookie')]
if cl:
cookies = "\n".join("Cookie: {}\n".format(c) for c in cl)
msg = "Sending cookies to: {}\n{}".format(request, cookies)
logger.debug(msg, extra={'spider': spider})
def _debug_set_cookie(self, response, spider):
if self.debug:
cl = [to_native_str(c, errors='replace')
for c in response.headers.getlist('Set-Cookie')]
if cl:
cookies = "\n".join("Set-Cookie: {}\n".format(c) for c in cl)
msg = "Received cookies from: {}\n{}".format(response, cookies)
logger.debug(msg, extra={'spider': spider})
def _format_cookie(self, cookie):
# build cookie string
cookie_str = '%s=%s' % (cookie['name'], cookie['value'])
if cookie.get('path', None):
cookie_str += '; Path=%s' % cookie['path']
if cookie.get('domain', None):
cookie_str += '; Domain=%s' % cookie['domain']
return cookie_str
def _get_request_cookies(self, jar, request):
if isinstance(request.cookies, dict):
cookie_list = [{'name': k, 'value': v} for k, v in \
six.iteritems(request.cookies)]
else:
cookie_list = request.cookies
cookies = [self._format_cookie(x) for x in cookie_list]
headers = {'Set-Cookie': cookies}
response = Response(request.url, headers=headers)
return jar.make_cookies(response, request) | unknown | codeparrot/codeparrot-clean | ||
from datetime import datetime
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.optimize import leastsq, fsolve
import time
import random
import labrad
from labrad.units import Unit
ns, us, GHz, MHz = [Unit(s) for s in ('ns', 'us', 'GHz', 'MHz')]
from scripts import GHz_DAC_bringup
from pyle.dataking import measurement
from pyle.dataking import multiqubit as mq
from pyle.dataking import util as util
from pyle.util import sweeptools as st
from pyle.dataking import noon
from pyle.dataking import hadamard as hadi
from pyle.plotting import dstools as ds
from pyle import tomo
# TODO: Big dream here. Create a "report" or a dictionary of all the calibrations for each qubit
# then print that to the screen/save it to registry/ save fits to dataset
# TODO add fits to e.g. T1 and T2
# TODO separate datataking from analysis (so can rerun analysis on datasets directly from data vault)
# TODO save all fitting params with datasets
# TODO retune measure pulse amplitude for maximum visibility
def smsNotify(cxn, msg, username):
return cxn.telecomm_server.send_sms('automate daily', msg, username)
def getBoardGroup(cxn, sample):
""" Get the board group used by the experiment associated to sample"""
fpgas = cxn.ghz_fpgas
boardGroups = fpgas.list_board_groups()
def getAnyBoard():
for dev in sample.values():
try:
#Look in channels to see if we can find any FPGA board
return dict(dev['channels'])['uwave'][1][0]
except (KeyError,TypeError):
#Don't do anything, just try the next one
pass
board = getAnyBoard()
if board is None:
return board
for bg in boardGroups:
if board in [name[1] for name in fpgas.list_devices(bg)]:
return bg
return None
def getReadoutType(sample, device):
pass
def daily_bringup(s, pause=False):
cxn = s._cxn
username = s._dir[1]
sample, qubits = util.loadQubits(s)
boardGroup = getBoardGroup(cxn, sample)
#Bring up FPGA boards. If it fails, send SMS to the user and end daily bringup
if not bringupBoards(cxn, boardGroup):
smsNotify(cxn, 'board bringup failed', username)
return False
#Set up DC bias (SQUID steps, resonator readout, etc.)
bringup_dcBias(s, pause=pause)
return
bringup_stepedge(s, pause=pause) #run stepedge and find stepedge for each qubit
bringup_scurve(s, pause=pause) #measure scurve over reasonable range. find_mpa_func
bringup_sample(s, pause=pause) #spectroscopy, tune pi freq/amp, visibility
#fluxFunc/zpaFunc on reasonable frequency ranges
#todo the pulshape tune-up
single_qubit_scans(s) #Coherence factors
qubit_coupling_resonator_scans(s) #For each qubit: swapTuner,fockTuner. Then inter-qubit timing.
#qubit_memory_resonator_scans(s)
gate_bringup(s)
create_bell_state_iswap(s,zSweep=False)
def bringupBoards(cxn, boardGroup):
ok = True
resultWords = {True:'ok',False:'failed'}
fpgas = cxn.ghz_fpgas
try:
successDict = GHz_DAC_bringup.bringupBoardGroup(fpgas, boardGroup)
for board, successes in successDict.items():
for item,success in successes.items():
if not success:
print 'board %s %s failed'%(board,item)
ok = False
except Exception:
ok = False
return ok
def bringup_dcBias(s, pause=True):
pass
def bringup_squidsteps(s, pause=True):
N = len(s['config'])
for i in range(N):
print 'measuring squidsteps for qubit %d...' % i,
mq.squidsteps(s, measure=i, noisy=False, update=pause)
print 'done.'
def bringup_stepedge(s, pause=True):
N = len(s['config'])
for i in range(N):
print 'measuring step edge, qubit %d...' % i,
mq.stepedge(s, measure=i, noisy=False, update=pause)
print 'done.'
for i in range(N):
print 'binary searching to find step edge %d...' % i
mq.find_step_edge(s, measure=i, noisy=False)
print 'done.'
def bringup_scurve(s, pause=True):
N = len(s['config'])
for i in range(N):
print 'measuring scurve, qubit %d...' % i
mpa05 = mq.find_mpa(s, measure=i, target=0.05, noisy=False, update=False)
print '5% tunneling at mpa =', mpa05
mpa95 = mq.find_mpa(s, measure=i, target=0.95, noisy=False, update=False)
print '95% tunneling at mpa =', mpa95
low = st.nearest(mpa05 - (mpa95 - mpa05) * 1.0, 0.002)
high = st.nearest(mpa95 + (mpa95 - mpa05) * 1.0, 0.002)
step = 0.002 * np.sign(high - low)
mpa_range = st.r[low:high:step]
mq.scurve(s, mpa_range, measure=i, stats=1200, noisy=False, update=pause)
print 'done.'
for i in range(N):
print 'binary searching to find mpa %d...' % i
mq.find_mpa(s, measure=i, noisy=False, update=True)
mq.find_mpa_func(s, measure=i, noisy=False, update=True)
print 'done.'
def bringup_spectroscopy(s, freq_range=(6.0*GHz, 6.8*GHz)):
qubits = s['config']
N = len(qubits)
for i in range(N):
mq.spectroscopy(s, st.r[freq_range[0]:freq_range[1]:0.005*GHz], measure=i, update=True)
def bringup_sample(s, pause=False, fine_tune=True):
N = len(s['config'])
bringup_pi_pulses(s, pause=pause)
if fine_tune:
for i in range(N):
# choose frequency range to cover all qubits
fmin = min(s[qubit]['f10'] for qubit in s['config']) - 0.1*GHz
fmax = max(s[qubit]['f10'] for qubit in s['config']) + 0.1*GHz
print 'measuring flux func, qubit %d...' % i,
mq.find_flux_func(s, (fmin, fmax), measure=i, noisy=False)
print 'done.'
print 'measuring zpa func, qubit %d...' % i,
mq.find_zpa_func(s, (fmin, fmax), measure=i, noisy=False)
print 'done.'
# update the calibrated ratio of DAC amplitudes to detuning and rabi freqs
update_cal_ratios(s)
def update_cal_ratios(s):
s, _qubits, Qubits = util.loadQubits(s, write_access=True)
# single-qubit bringup
for Q in Qubits:
# convert microwave amplitude to rabi frequency
fwhm = Q['piFWHM'][ns]
A = float(Q['piAmp'])
Q['calRabiOverUwa'] = 2*np.sqrt(np.log(2)/np.pi)/(A*fwhm)*GHz # total area is 1 cycle
# convert z amplitude to detuning frequency
a = float(Q['calZpaFunc'][0])
f = Q['f10'][GHz]
Q['calDfOverZpa'] = 1/(4*a*f**3)*GHz
def bringup_pi_pulses(s, pause=False):
N = len(s['config'])
for i in range(N):
print 'measuring spectroscopy, qubit %d...' % i,
mq.spectroscopy(s, measure=i, noisy=False, update=pause) # zoom in on resonance peak
mq.spectroscopy_two_state(s, measure=i, noisy=False, update=pause)
print 'done.'
for i in range(N):
print 'calibrating pi pulse, qubit %d...' % i,
mq.pitunerHD(s, measure=i, noisy=False)
print 'done.'
print 'fine-tuning frequency, qubit %d...' % i,
mq.freqtuner(s, iterations=1, measure=i, save=True)
print 'done.'
print 'redoing pi pulse calibration, qubit %d...' % i,
mq.pitunerHD(s, measure=i, noisy=False)
print 'done.'
print 'checking visibility, qubit %d...' % i
mpa1_05 = mq.find_mpa(s, measure=i, pi_pulse=True, target=0.05, noisy=False, update=False)
print '5% tunneling of 1 at mpa =', mpa1_05
mpa0_95 = mq.find_mpa(s, measure=i, pi_pulse=False, target=0.95, noisy=False, update=False)
print '95% tunneling of 0 at mpa =', mpa0_95
low = max(st.nearest(mpa1_05 - (mpa0_95 - mpa1_05) * 0.5, 0.002), 0)
high = min(st.nearest(mpa0_95 + (mpa0_95 - mpa1_05) * 0.5, 0.002), 2)
step = 0.002 * np.sign(high - low)
mpa_range = st.r[low:high:step]
mq.visibility(s, mpa_range, stats=1200, measure=i, noisy=False)
print 'done.'
# TODO adjust measurePulse_amplitude for maximum visibility
# measure e0, e1 and visibility very carefully at the correct measure-pulse amplitude
print 'measuring visibility at calibrated mpa %d...' % i,
Q = s[s['config'][i]]
data = mq.visibility(s, [Q['measureAmp']]*100, stats=600, measure=i, noisy=False, name='Measurement Fidelity', collect=True)
e0, f1 = np.mean(data[:,1]), np.mean(data[:,2])
print 'done.'
print ' e0: %g, f0: %g' % (e0, 1-e0)
print ' e1: %g, f1: %g' % (1-f1, f1)
Q['measureE0'] = e0
Q['measureF0'] = 1-e0
Q['measureE1'] = 1-f1
Q['measureF1'] = f1
def bringup_timing(s):
N = len(s['config'])
for i in range(N):
print 'measuring timing delay on qubit %d...' % i
mq.testdelay(s, measure=i, update=True, plot=True, noisy=False)
print 'done.'
for i in range(1,N):
print 'measuring timing delay between qubit 0 and %d...' % i
mq.testdelay_x(s, measure=0, z_pulse=i, update=True, plot=True, noisy=False)
print 'done.'
def bringup_xtalk(s):
"""Measure the z-pulse crosstalk between each pair of qubits.
We then create the crosstalk matrix and store it in the registry.
In addition, we invert the crosstalk matrix, since this is needed
to correct the z-pulse signals if desired.
Assumes you have already run spectroscopy2DZauto, so that the
cal_zpa_func has already been set, as xtalk is relative to this.
"""
s, qubits, Qubits = util.loadQubits(s, write_access=True)
A = np.eye(len(qubits))
for i, qi in enumerate(qubits):
for j, _qj in enumerate(qubits):
if i == j:
continue
print 'measuring crosstalk on %s from z-pulse on %s' % (i, j)
xtfunc = mq.spectroscopy2DZxtalk(s, measure=i, z_pulse=j, noisy=False)
aii = float(qi['calZpaFunc'][0])
aij = float(xtfunc[0])
print 'crosstalk =', aii/aij
A[i,j] = aii/aij
Ainv = np.linalg.inv(A)
print
print 'xtalk matrix:\n', A
print
print 'inverse xtalk matrix:\n', Ainv
for i, Qi in enumerate(Qubits):
Qi['calZpaXtalk'] = A[i]
Qi['calZpaXtalkInv'] = Ainv[i]
def test_xtalk(s):
s, qubits = util.loadQubits(s, write_access=False)
readouts = [(0,1,2,3), (0,1,3,2), (3,2,1,0), (1,2,0,3), (2,0,1,3), (2,1,3,0)]
for readout_order in readouts:
for q, order in zip(qubits, readout_order):
q['squidReadoutDelay'] = (order+1) * 10*us
for i in range(len(qubits)):
mq.meas_xtalk(s, name='meas-xtalk simult readout order %s' % (readout_order,), drive=i, simult=True, stats=1200, noisy=False)
for readout_order in readouts:
for q, order in zip(qubits, readout_order):
q['squidReadoutDelay'] = (order+1) * 10*us
for i in range(len(qubits)):
mq.meas_xtalk(s, name='meas-xtalk readout order %s' % (readout_order,), drive=i, simult=False, stats=1200, noisy=False)
#def test_measurements(s):
# """Test the various ways of measuring qubits."""
# s, qubits = util.loadQubits(s)
# q0, _q1, q2 = qubits
#
# zpa0 = q0['wZpulseAmp']
# zpa2 = q2['wZpulseAmp']
#
# # couple all three qubits together
# kw = dict(pi_pulse_on=1, t_couple=32*ns, name='w-state meas_test', delay=st.r[30:36:1,ns], zpas=[zpa0, 0, zpa2], stats=3000)
#
# werner.w_state(s, measure=0, **kw)
# werner.w_state(s, measure=[1], **kw)
# werner.w_state(s, measure=[0,1,2], **kw)
# werner.w_state(s, measure=measurement.Null(3, [0,1]), **kw)
# werner.w_state(s, measure=measurement.Null(3, [0,1,2]), **kw)
# werner.w_state(s, measure=measurement.Tomo(3, [0]), **kw)
# werner.w_state(s, measure=measurement.Tomo(3, [0,1]), **kw)
# werner.w_state(s, measure=measurement.Tomo(3, [0,1,2]), **kw)
# werner.w_state(s, measure=measurement.TomoNull(3, [0]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.TomoNull(3, [0,1]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.TomoNull(3, [0,1,2]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.Octomo(3, [0]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.Octomo(3, [0,1]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.Octomo(3, [0,1,2]), pipesize=2, **kw)
# werner.w_state(s, measure=measurement.OctomoNull(3, [0,1,2]), pipesize=2, **kw)
def single_qubit_scans(s):
N = len(s['config'])
for i in range(N):
print 'measuring T1, qubit %d' % i,
mq.t1(s, stats=1800, measure=i, noisy=False)
#TODO add T1 fits
print 'done.'
print 'measuring ramsey fringe, qubit %d' % i,
#TODO bring T1 fit from above and turn on T2 fit
mq.ramsey(s, stats=1800, measure=i, noisy=False)
print 'done.'
print 'measuring spin_echo, qubit %d' % i,
mq.spinEcho(s, stats=1800, measure=i, noisy=False)
print 'done.'
def qubit_coupling_resonator_scans(s):
start = datetime.now()
N = len(s['config'])
for i in range(N):
print 'measuring SWAP10 Spectroscopy, qubit %d' % i,
swap10Len, swap10Amp = mq.swap10tuner(s, measure=i, stats=1800, noisy=False, whichRes='Coupler')
print 'measuring 2D-SWAP Spec around Coupling resonator, for qubit %d' % i,
swapAmpBND = 0.2
swapAmpSteps = 0.001
coarseSet = np.arange(0,swap10Amp*(1-swapAmpBND),swapAmpSteps*5)
fineSet = np.arange(swap10Amp*(1-swapAmpBND),swap10Amp*(1+swapAmpBND), swapAmpSteps)
swap10Amp = np.hstack((coarseSet,fineSet))
mq.swapSpectroscopy(s, state=1, swapLen=st.arangePQ(0,75,2,ns), swapAmp=swap10Amp, measure=i, save=True, noisy=False)
#run focktuner level =1
print 'fock tuner for fine calibratin of cZControlLen'
mq.fockTuner(s, n=1, iteration=3, tuneOS=False, stats=1800, measure=i, save=True, noisy=False)
print 'done. Calibrated Control qubits'
print 'Tuning up pi-pulse for |2> of qubit %d' % i,
noon.pituner21(s, stats = 1800, measure=i, noisy=False, findMPA=True)
print 'done'
print 'measuring SWAP21 Spectroscopy'
swap21Len, swap21Amp = mq.swap21tuner(s, measure=i, stats=1800, noisy=False)
print 'measuring 2D-SWAP Spec around resonator, for qubit %d' % i,
mq.swapSpectroscopy(s, state=2, swapLen=st.arangePQ(0,60,2,ns), swapAmp=st.r[swap21Amp*(1-0.2):swap21Amp*(1+0.2):0.001], measure=i, save=True, noisy=False)
mq.fockTuners21(s, n=1, iteration=3, tuneOS=False, stats=1800, measure=i, save=True, noisy=False)
print 'done. Calibrated Target qubits'
print 'now starting qubit-qubit timing calibrations...'
print 'measuring qubit-qubit delay via the resonator'
for j,k in [(0,1),(1,0), (0,2),(2,0), (1,2),(2,1), (0,3),(3,0), (1,3),(3,1), (2,3),(3,2)]: #( add back in when all 4 qubits work!
mq.testQubResDelayCmp(s,measureC=j, measureT=k)
print 'now measuring resonator T1 using q0 for photon exchange'
noon.resonatorT1(s, stats=1800, measure=0, whichRes='Coupler')
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed time for qubit-resonator scans:', (end-start)
def qubit_memory_resonator_scans(s, stats=1800):
start = datetime.now()
N = len(s['config'])
for i in range(N):
print 'measuring SWAP10 Spectroscopy, qubit %d' % i,
swap10Len, swap10Amp = mq.swap10tuner(s, measure=i, stats=stats, noisy=False, whichRes='Memory')
print 'measuring 2D-SWAP Spec around Memory resonator, for qubit %d' % i,
swapAmpBND = 0.2
swapAmpSteps = 0.001
coarseSet = np.arange(0,swap10Amp*(1-swapAmpBND),swapAmpSteps*5)
fineSet = np.arange(swap10Amp*(1-swapAmpBND),swap10Amp*(1+swapAmpBND), swapAmpSteps)
swap10Amp = np.hstack((coarseSet,fineSet))
mq.swapSpectroscopy(s, swapLen=st.arangePQ(0,300,5,ns), swapAmp=swap10Amp, measure=i,
save=True, noisy=False, stats=stats, whichRes='Memory')
#run focktuner level =1
print 'fock tuner for fine calibratin of memoryReadWriteLen'
mq.fockTuner(s, n=1, iteration=3, tuneOS=False, stats=stats, measure=i, save=True, noisy=False, whichRes='Memory')
print 'done. Memory resonator tuned up'
print 'now measuring memory resonator T1 for resonator %d' %i,
noon.resonatorT1(s, stats=stats, measure=i, whichRes='Memory')
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed time for qubit-mem-resonator scans:', (end-start)
def gate_bringup(s):
start = datetime.now()
N = len(s['config'])
for i in range(N):
print 'Begin Calibrating Single Qubit Hadamard Gates'
print 'Z-pi pulse tuner'
mq.pitunerZ(s, measure=i, save=True, stats = 1800, update=True, noisy=False)
print 'done tuning Z-pi amplitude for qubit %d' %i,
hadi.hadamardTrajectory(s, measure=i, stats=1500, useHD=True, useTomo=True, tBuf=5*ns, save=True, noisy=False)
print 'plotting hadamard trajectory on Bloch Sphere'
print 'correcting for visibilities...generating pretty plots'
hadi.plotTrajectory(path=s._dir, dataset=None, state=None) #grabs the most recent dataset in the current session
hadi.plotDensityArrowPlot(path=s._dir, dataset = None) #grabs most recent dataset in the current session
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed time for single qubit gate bringups:', (end-start)
def create_bell_state_iswap(s,zSweep=False):
start = datetime.now()
for j,k in [(0,1),(0,2),(1,2)]: #(0,3),(1,3),(2,3) add back in when all 4 qubits work!
Qj = s[s['config'][j]]
print 'measuring SWAPs between q%d and q%d via Rc' %(j,k)
shor.iSwap(s, measure=[j,k], stats=1500, noisy=False)
if zSweep:
bellPhase = Qj['piAmpZ']
bellPhases = np.arange(-1.0,1.0,0.1)*bellPhase
for phase in bellPhases:
print 'Preparing Bell-States via SQRT(iSWAP) between q%d and q%d via Rc' %(j,k)
shor.bellStateiSwap(s, reps=5, measure=[j,k], stats=1800, corrAmp=phase)
else:
print 'Preparing Bell-States via SQRT(iSWAP) between q%d and q%d via Rc' %(j,k)
shor.bellStateiSwap(s, reps=5, measure=[j,k], stats=1800, corrAmp=0.0)
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed time for single qubit gate bringups:', (end-start)
def cPhase_bringup(s):
start = datetime.now()
N = len(s['config'])
for i in range(N):
print 'done with COWS'
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed time for c-phase bringups:', (end-start)
def full_run(s):
bringup_multiqubits(s)
measure_w(s) # do tomography
def bringup_multiqubits(s):
start = datetime.now()
test_coupling(s, guess_zpa=True, use_overshoots=False)
tune_swaps(s)
#test_coupling(s, guess_zpa=False, use_overshoots=True) # try the swap again with correct overshoot
tune_phases(s) # tune microwave phases between channels
check_phase_vs_time(s) # tune microwave phase between channels as a function of time
tune_swap_dphases(s) # tune phase change due to a swap z-pulse
tune_dphases(s) # tune phase change due to z-pulses of any length
end = datetime.now()
print 'start:', start
print 'end:', end
print 'elapsed:', (end-start)
# TODO save hyperbolic fit to coupling strength, so we can adjust for unequal coupling strengths
def test_coupling(s, guess_zpa=True, use_overshoots=False):
"""Determine the z-pulse amplitude needed to bring qubits into resonance.
Also, measure coupling strength between qubits.
sets: w_zpulse_amp, w_swap_amp
"""
s, qubits, Qubits = util.loadQubits(s, write_access=True)
q0, q1, q2 = qubits
Q0, _Q1, Q2 = Qubits
zpafunc0 = mq.get_zpa_func(q0)
zpafunc1 = mq.get_zpa_func(q1)
zpafunc2 = mq.get_zpa_func(q2)
S = 0.015 * GHz # expected coupling strength
if guess_zpa:
# guess the required zpa
zpa0 = q0['wZpulseAmp'] = zpafunc0(q1['f10'])
zpa2 = q2['wZpulseAmp'] = zpafunc2(q1['f10'])
# calculate zpa limits to give a reasonable range based on the expected coupling strength
zpalims0 = sorted([zpafunc0(q1['f10'] - S*2), zpafunc0(q1['f10'] + S*2)])
zpalims2 = sorted([zpafunc2(q1['f10'] - S*2), zpafunc2(q1['f10'] + S*2)])
else:
# use calibrated zpa
zpa0 = q0['wZpulseAmp']
zpa2 = q2['wZpulseAmp']
# calculate zpa limits based on calibrated coupling change with zpa
dzpa0 = abs(S[GHz]*2 / q0['coupling1DsByDzpa'][GHz])
zpalims0 = [zpa0 - dzpa0, zpa0 + dzpa0]
dzpa2 = abs(S[GHz]*2 / q2['coupling1DsByDzpa'][GHz])
zpalims2 = [zpa2 - dzpa2, zpa2 + dzpa2]
if not use_overshoots:
q0['wZpulseOvershoot'] = 0.0
q2['wZpulseOvershoot'] = 0.0
from pyle.fitting import fourierplot
opts = {
'collect': True,
'noisy': False,
}
null012 = measurement.Null(3, [0,1,2])
if 1:
# couple q0 with q1
rng0 = st.r[zpalims0[0]:zpalims0[1]:(zpalims0[1] - zpalims0[0]) / 25]
data0 = werner.w_state(s, name='coupling 0 and 1 2D', pi_pulse_on=1, measure=[0],
t_couple=1000*ns, delay=st.r[0:200:4,ns], zpas=[rng0, 0, 0], **opts)
S0, zpa0, ds_by_dzpa0 = fourierplot.fitswap(data0, return_fit=True) # find swap frequency and optimal z-pulse
print S0, zpa0, ds_by_dzpa0
Q0['swapAmp'] = Q0['wZpulseAmp'] = zpa0
Q0['coupling1'] = S0*MHz
Q0['coupling1DsByDzpa'] = ds_by_dzpa0*MHz
# do a 1D scan with the optimal pulse amplitude
data0 = werner.w_state(s, name='coupling 0 and 1', pi_pulse_on=1, measure=null012,
t_couple=1000*ns, delay=st.r[0:100:2,ns], zpas=[zpa0, 0, 0], stats=3000, **opts)
if 1:
# couple q2 with q1
rng2 = st.r[zpalims2[0]:zpalims2[1]:(zpalims2[1] - zpalims2[0]) / 25]
data2 = werner.w_state(s, name='coupling 1 and 2 2D', pi_pulse_on=1, measure=[2],
t_couple=1000*ns, delay=st.r[0:200:4,ns], zpas=[0, 0, rng2], **opts)
S2, zpa2, ds_by_dzpa2 = fourierplot.fitswap(data2, return_fit=True) # find swap frequency and optimal z-pulse
print S2, zpa2, ds_by_dzpa2
Q2['swapAmp'] = Q2['wZpulseAmp'] = zpa2
Q2['coupling1'] = S2*MHz
Q2['coupling1DsByDzpa'] = ds_by_dzpa2*MHz
# do a 1D scan with the optimal pulse amplitude
data2 = werner.w_state(s, name='coupling 1 and 2', pi_pulse_on=1, measure=null012,
t_couple=1000*ns, delay=st.r[0:100:2,ns], zpas=[0, 0, zpa2], stats=3000, **opts)
if 1:
# couple q0 with q2, moving q1 to negative detuning
zpa1 = zpafunc1(q2['f10']) # move q1 out of the way
rng2 = st.r[zpalims2[0]:zpalims2[1]:(zpalims2[1] - zpalims2[0]) / 25]
data2 = werner.w_state(s, name='coupling 0 and 2 2D', pi_pulse_on=0, measure=[2],
t_couple=1000*ns, delay=st.r[0:200:4,ns], zpas=[zpa0, zpa1, rng2], **opts)
S2, zpa2, ds_by_dzpa2 = fourierplot.fitswap(data2, return_fit=True) # find swap frequency and optimal z-pulse
print S2, zpa2, ds_by_dzpa2
#Q2['swapAmp'] = Q2['wZpulseAmp'] = zpa2
Q2['coupling0'] = S2*MHz # save this coupling value, but not in the standard place
Q2['coupling0DsByDzpa'] = ds_by_dzpa2*MHz # save fit, but not in the standard place
# do a 1D scan with the optimal pulse amplitude
data2 = werner.w_state(s, name='coupling 0 and 2', pi_pulse_on=0, measure=null012,
t_couple=1000*ns, delay=st.r[0:100:2,ns], zpas=[zpa0, zpa1, zpa2], stats=3000, **opts)
def tune_swaps(s):
"""Adjust overshoot and pulse length to get the best swap."""
# overshoots don't seem to help
s.q0['swapOvershoot'] = 0.0
s.q2['swapOvershoot'] = 0.0
werner.swaptuner(s, measure=0, pi_pulse_on=1, noisy=False, update=True, save=False, stats=3000, tune_overshoot=False)
werner.swaptuner(s, measure=2, pi_pulse_on=1, noisy=False, update=True, save=False, stats=3000, tune_overshoot=False)
# set overshoots for w-state to be equal to calibrated swap overshoots
s.q0['wZpulseOvershoot'] = s.q0['swapOvershoot']
s.q2['wZpulseOvershoot'] = s.q2['swapOvershoot']
def tune_phases(s, t0=None, calibrated_amp=True, stats=3000L, res=50, plot=True):
s, qubits, Qubits = util.loadQubits(s, write_access=True)
q0, q1, q2 = qubits
if calibrated_amp:
zpa0 = q0['swapAmp']
zpa2 = q2['swapAmp']
else:
zpafunc0 = mq.get_zpa_func(q0)
zpafunc2 = mq.get_zpa_func(q2)
zpa0 = zpafunc0(q1['f10'])
zpa2 = zpafunc2(q1['f10'])
f_couple = 0.015*GHz
t_couple = (1/f_couple/4)[ns]*ns
phase = st.r[-np.pi:np.pi:np.pi/res]
data0 = werner.uwave_phase_adjust(s, phase=phase, t0=t0, t_couple=t_couple, adjust=0, ref=1, zpas=[zpa0, 0.0, 0.0], collect=True, noisy=False, stats=stats)
data2 = werner.uwave_phase_adjust(s, phase=phase, t0=t0, t_couple=t_couple, adjust=2, ref=1, zpas=[0.0, 0.0, zpa2], collect=True, noisy=False, stats=stats)
def fitfunc(x, c):
return -np.sin(x - c[0]) * c[1] + c[2]
ph, _p00, p01, p10, _p11 = data0.T
fit0, _ = leastsq(lambda c: fitfunc(ph, c) - p10, [q0['uwavePhase'], (max(p10)-min(p10))/2.0, (max(p10)+min(p10))/2.0])
if fit0[1] < 0:
fit0[0] = (fit0[0] + 2*np.pi) % (2*np.pi) - np.pi
fit0[1] *= -1
fit0[0] = (fit0[0] + np.pi) % (2*np.pi) - np.pi
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ph, p10, 'b.', label='|10>')
ax.plot(ph, p01, 'g.', label='|01>')
ax.plot(ph, fitfunc(ph, fit0), 'r-')
ax.axvline(fit0[0], linestyle='--', color='gray')
ax.set_title('microwave phase adjustment, qubit 0, ref 1: phase = %0.5g' % fit0[0])
ax.legend()
print 'old phase:', q0['uwavePhase']
print 'new phase:', fit0[0]
Qubits[0]['uwavePhase'] = fit0[0]
ph, _p00, p01, p10, _p11 = data2.T
fit2, _ = leastsq(lambda c: fitfunc(ph, c) - p01, [q2['uwavePhase'], (max(p01)-min(p01))/2.0, (max(p01)+min(p01))/2.0])
if fit2[1] < 0:
fit2[0] = (fit2[0] + 2*np.pi) % (2*np.pi) - np.pi
fit2[1] *= -1
fit2[0] = (fit2[0] + np.pi) % (2*np.pi) - np.pi
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ph, p10, 'b.', label='|10>')
ax.plot(ph, p01, 'g.', label='|01>')
ax.plot(ph, fitfunc(ph, fit2), 'r-')
ax.axvline(fit2[0], linestyle='--', color='gray')
ax.set_title('microwave phase adjustment, qubit 2, ref 1: phase = %0.5g' % fit2[0])
ax.legend()
print 'old phase:', q2['uwavePhase']
print 'new phase:', fit2[0]
Qubits[2]['uwavePhase'] = fit2[0]
return fit0[0], fit2[0]
def check_phase_vs_time(s, plot=True):
s, qubits, Qubits = util.loadQubits(s, write_access=True)
phases0 = []
phases2 = []
t0s = st.r[0:12:1,ns]
for t0 in t0s:
ph0, ph2 = tune_phases(s, t0, stats=1200, res=20, plot=False)
phases0.append(ph0)
phases2.append(ph2)
phases0 = np.unwrap(phases0)
phases2 = np.unwrap(phases2)
fit0 = np.polyfit(t0s, phases0, 1)
fit2 = np.polyfit(t0s, phases2, 1)
df0 = (s.q1['f10'] - s.q0['f10'])[GHz]
df2 = (s.q1['f10'] - s.q2['f10'])[GHz]
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t0s, phases0, 'b.', label='measured phase')
ax.plot(t0s, np.polyval(fit0, t0s), 'r-', label='phase fit')
ax.plot(t0s, np.polyval([-2*np.pi*df0, 0], t0s), 'c-', label='detuning')
ax.legend()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t0s, phases2, 'b.', label='measured phase')
ax.plot(t0s, np.polyval(fit2, t0s), 'r-', label='phase fit')
ax.plot(t0s, np.polyval([-2*np.pi*df2, 0], t0s), 'c-', label='detuning')
ax.legend()
print 'qubit 0:'
print ' detuning:', df0
print ' phase fit:', fit0[0]/(2*np.pi)
print ' phase offset:', fit0[1]/(2*np.pi)
print
Qubits[0]['uwavePhaseSlope'] = fit0[0]/(2*np.pi) * GHz
Qubits[0]['uwavePhaseOfs'] = fit0[1]
Qubits[0]['uwavePhaseFit'] = fit0
print 'qubit 2:'
print ' detuning q2:', df2
print ' phase fit:', fit2[0]/(2*np.pi)
print ' phase offset:', fit2[1]/(2*np.pi)
print
Qubits[2]['uwavePhaseSlope'] = fit2[0]/(2*np.pi) * GHz
Qubits[2]['uwavePhaseOfs'] = fit2[1]
Qubits[2]['uwavePhaseFit'] = fit2
def tune_swap_dphases(s, calibrated_amp=True):
s, qubits, Qubits = util.loadQubits(s, write_access=True)
q0, q1, q2 = qubits
Q0, _Q1, Q2 = Qubits
if not calibrated_amp:
zpafunc0 = mq.get_zpa_func(q0)
zpafunc2 = mq.get_zpa_func(q2)
q0['swapAmp'] = zpafunc0(q1['f10'])
q2['swapAmp'] = zpafunc2(q1['f10'])
def fitfunc(x, c):
return np.cos(x - c[0]) * c[1] + c[2]
def fit_dphase(i, q):
print 'measuring qubit', i
phase = st.r[-np.pi:np.pi:np.pi/20]
data = werner.swap_dphase_adjust(s, phase, adjust=i, ref=1, stats=600, noisy=False, collect=True, save=False)
ph, p1 = data.T
fit, _ = leastsq(lambda c: fitfunc(ph, c) - p1, [ph[np.argmax(p1)], (max(p1)-min(p1))/2.0, (max(p1)+min(p1))/2.0])
if fit[1] < 0:
fit[0] = (fit[0] + 2*np.pi) % (2*np.pi) - np.pi
fit[1] *= -1
print ' dphase =', fit[0]
dphase = fit[0]
return dphase
dphase0 = fit_dphase(0, q0)
dphase2 = fit_dphase(2, q2)
print 'qubit 0:'
print ' swapDphase:', dphase0
print
Q0['swapDphase'] = dphase0
print 'qubit 2:'
print ' swapDphase:', dphase2
print
Q2['swapDphase'] = dphase2
def tune_dphases(s, calibrated_amp=True):
s, qubits, Qubits = util.loadQubits(s, write_access=True)
q0, q1, q2 = qubits
Q0, _Q1, Q2 = Qubits
if not calibrated_amp:
zpafunc0 = mq.get_zpa_func(q0)
zpafunc2 = mq.get_zpa_func(q2)
q0['wZpulseAmp'] = zpafunc0(q1['f10'])
q2['wZpulseAmp'] = zpafunc2(q1['f10'])
def fitfunc(x, c):
return np.cos(x - c[0]) * c[1] + c[2]
zp_rng = st.r[0:25:1,ns]
ts = np.array([zp_len[ns] for zp_len in zp_rng])
def fit_phases(i, q):
print 'measuring qubit', i
dphases = []
for zp_len in zp_rng:
q['wZpulseLen'] = zp_len
phase = st.r[-np.pi:np.pi:np.pi/20]
data = werner.w_dphase_adjust(s, phase, adjust=i, ref=1, stats=600,
noisy=False, collect=True, save=True)
ph, p1 = data.T
fit, _ = leastsq(lambda c: fitfunc(ph, c) - p1, [ph[np.argmax(p1)], (max(p1)-min(p1))/2.0, (max(p1)+min(p1))/2.0])
if fit[1] < 0:
fit[0] = (fit[0] + 2*np.pi) % (2*np.pi) - np.pi
fit[1] *= -1
print ' t =', zp_len[ns], ' dphase =', fit[0]
dphases.append(fit[0])
print
return np.unwrap(dphases)
dphases0 = fit_phases(0, q0)
dphases2 = fit_phases(2, q2)
fit0 = np.polyfit(ts, dphases0, 1)
fit2 = np.polyfit(ts, dphases2, 1)
df0 = (q1['f10'] - q0['f10'])[GHz]
df2 = (q1['f10'] - q2['f10'])[GHz]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ts, dphases0, 'b.', label='measured phase')
ax.plot(ts, np.polyval(fit0, ts), 'r-', label='phase fit')
ax.plot(ts, np.polyval([-2*np.pi*df0, 0], ts), 'c-', label='detuning')
ax.legend()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ts, dphases2, 'b.')
ax.plot(ts, np.polyval(fit2, ts), 'r-')
ax.plot(ts, np.polyval([-2*np.pi*df2, 0], ts), 'c-', label='detuning')
ax.legend()
print 'qubit 0:'
print ' detuning:', df0
print ' phase fit:', fit0[0]/(2*np.pi)
print ' phase offset:', fit0[1]/(2*np.pi)
print
Q0['wDphaseSlope'] = fit0[0]/(2*np.pi) * GHz
Q0['wDphaseFit'] = fit0
print 'qubit 2:'
print ' detuning q2:', df2
print ' phase fit:', fit2[0]/(2*np.pi)
print ' phase offset:', fit2[1]/(2*np.pi)
print
Q2['wDphaseSlope'] = fit2[0]/(2*np.pi) * GHz
Q2['wDphaseFit'] = fit2
def measure_w(s, with_tomo=True):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
t_swap = (q0['swapLen'] + q2['swapLen']) / 2
t_couple = t_swap * 4.0/9.0
for _i in itertools.count():
# couple all three qubits together
null012 = measurement.Null(3, [0,1,2])
werner.w_state(s, pi_pulse_on=1, t_couple=1000*ns, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
werner.w_state(s, pi_pulse_on=1, t_couple=t_couple, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
if with_tomo:
# do tomography
tomo012 = measurement.TomoNull(3, [0,1,2])
opts = {
'pi_pulse_on': 1,
'measure': tomo012,
'stats': 600,
'pipesize': 1,
}
werner.w_state(s, t_couple=1000*ns, delay=st.r[0:30:1,ns], **opts)
werner.w_state(s, t_couple=t_couple, delay=st.r[0:30:1,ns], **opts)
werner.w_state(s, t_couple=1000*ns, delay=st.r[15:20:0.25,ns], **opts)
werner.w_state(s, t_couple=t_couple, delay=st.r[15:20:0.25,ns], **opts)
def tweak_detunings(s):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
zpa0 = q0['swapAmp']
zpa2 = q2['swapAmp']
def sfunc(q):
p0 = q['coupling1'][MHz]
p1 = q['coupling1DsByDzpa'][MHz]
zpa0 = q['swap_amp']
return lambda zpa: np.sqrt(p0**2 + p1**2*(zpa - zpa0))
sfunc0 = sfunc(q0)
sfunc2 = sfunc(q2)
smin0 = sfunc0(q0['swapAmp'])
smin2 = sfunc2(q2['swapAmp'])
print 'minimum splittings:'
print ' q0 <-> q1: %g MHz' % smin0
print ' q2 <-> q1: %g MHz' % smin2
print
if smin0 < smin2:
# adjust zpa 0
zpa0opt = fsolve(lambda zpa: sfunc0(zpa) - smin2, zpa0)
det0 = q0['coupling1DsByDzpa'] * (zpa0opt - q0['swapAmp'])
print 'qubit0 optimal zpa=%g, s=%g, det=%g' % (zpa0opt, sfunc0(zpa0opt), det0)
zpas = sorted([zpa0opt, 2*zpa0 - zpa0opt])
print 'trying', zpas
for zpa0 in zpas:
q0['swapAmp'] = zpa0
measure_w(s, with_tomo=False)
else:
# adjust zpa 0
zpa2opt = fsolve(lambda zpa: sfunc2(zpa) - smin0, zpa2)
det2 = q2['coupling1DsByDzpa'] * (zpa2opt - q2['swapAmp'])
print 'qubit2 optimal zpa=%g, s=%g, det=%g' % (zpa2opt, sfunc2(zpa2opt), det2)
zpas = sorted([zpa2opt, 2*zpa2 - zpa2opt])
print 'trying', zpas
for zpa2 in zpas:
q2['swapAmp'] = zpa2
measure_w(s, with_tomo=False)
def measure_ghz(s, with_tomo=True, with_ghz=True):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
for _i in [0]: #itertools.count():
# couple all three qubits together
null012 = measurement.Null(3, [0,1,2])
#mq.w_state(s, pi_pulse_on=1, t_couple=1000*ns, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
#mq.w_state(s, pi_pulse_on=1, t_couple=17.5*ns, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
ghz.ghz_simult(s, stage=st.r[0:3:0.05], measure=measurement.Null(3), stats=1800)
ghz.ghz_iswap(s, stage=st.r[0:4:0.05], measure=measurement.Null(3), stats=1800)
if with_ghz:
ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=ghz.GHZ(), stats=1200)
ghz.ghz_iswap(s, stage=st.r[0:4:0.1], measure=ghz.GHZ(), stats=1200)
if with_tomo:
# do tomography
tomo012 = measurement.TomoNull(3, [0,1,2])
opts = {
'pi_pulse_on': 1,
'measure': tomo012,
'stats': 600,
'pipesize': 1,
}
#mq.w_state(s, t_couple=1000*ns, delay=st.r[0:30:1,ns], **opts)
#mq.w_state(s, t_couple=19*ns, delay=st.r[0:30:1,ns], **opts)
#mq.w_state(s, t_couple=1000*ns, delay=st.r[15:25:0.25,ns], **opts)
#mq.w_state(s, t_couple=19*ns, delay=st.r[15:25:0.25,ns], **opts)
ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=measurement.TomoNull(3), pipesize=1, stats=1200)
ghz.ghz_iswap(s, stage=st.r[0:4:0.1], measure=measurement.TomoNull(3), pipesize=1, stats=1200)
def measure_ghz_iswap(s, with_tomo=True, with_ghz=True):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
while True:
#for sf, ef, es in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
# (2, 0, 0), (2, 0, 1), (2, 1, 0), (2, 1, 1)]:
for sf, ef, es in [(0, 1, 0), (0, 1, 1),
(2, 0, 0), (2, 0, 1), (2, 1, 0), (2, 1, 1)]:
# couple all three qubits together
null012 = measurement.Null(3, [0,1,2])
#mq.w_state(s, pi_pulse_on=1, t_couple=1000*ns, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
#mq.w_state(s, pi_pulse_on=1, t_couple=17.5*ns, delay=st.r[0:50:1,ns], measure=null012, stats=1200)
opts = {
'swap_first': sf,
'swap_second': 2-sf,
'echo_first': ef,
'echo_second': es,
}
opts2 = {
'swap_first': sf,
'swap_second': 2-sf,
}
#ghz.ghz_simult(s, stage=st.r[0:3:0.05], measure=measurement.Null(3), stats=1800)
#ghz.ghz_iswap(s, stage=st.r[0:4:0.05], measure=null012, stats=1200, **opts)
#if with_ghz:
# ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=ghz.GHZ(), stats=1200)
# ghz.ghz_iswap(s, stage=st.r[0:4:0.1], measure=ghz.GHZ(), stats=1200)
if with_tomo:
# do tomography
tomo012 = measurement.TomoNull(3, [0,1,2])
#opts = {
# 'pi_pulse_on': 1,
# 'measure': tomo012,
# 'stats': 600,
# 'pipesize': 1,
#}
#mq.w_state(s, t_couple=1000*ns, delay=st.r[0:30:1,ns], **opts)
#mq.w_state(s, t_couple=19*ns, delay=st.r[0:30:1,ns], **opts)
#mq.w_state(s, t_couple=1000*ns, delay=st.r[15:25:0.25,ns], **opts)
#mq.w_state(s, t_couple=19*ns, delay=st.r[15:25:0.25,ns], **opts)
#ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=measurement.TomoNull(3), pipesize=1, stats=600)
#ghz.ghz_iswap(s, stage=[4], measure=tomo012, pipesize=1, stats=6000, **opts)
ghz.ghz_iswap_tight(s, stage=[4], measure=tomo012, pipesize=1, stats=6000, **opts2)
#ghz.ghz_iswap(s, stage=st.r[0:4:0.2], measure=tomo012, pipesize=1, stats=600, **opts)
def measure_ghz_iswap_tight(s, with_tomo=True, with_ghz=True):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
for _i in range(1):
for sf in [0, 2]:
opts = {
'swap_first': sf,
'swap_second': 2-sf,
}
#null012 = measurement.Null(3, [0,1,2])
#ghz.ghz_simult(s, stage=st.r[0:3:0.05], measure=null012, stats=1800)
#ghz.ghz_iswap(s, stage=st.r[0:4:0.05], measure=null012, stats=1200, **opts)
#if with_ghz:
# ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=ghz.GHZ(), stats=1200)
# ghz.ghz_iswap(s, stage=st.r[0:4:0.1], measure=ghz.GHZ(), stats=1200)
if with_tomo:
tomo012 = measurement.TomoNull(3, [0,1,2])
ghz.ghz_iswap_tight(s, stage=[0,1,2,3,4], measure=tomo012, pipesize=1, stats=6000, **opts)
def measure_ghz_simult(s, with_tomo=True, with_ghz=True):
s, qubits = util.loadQubits(s)
q0, _q1, q2 = qubits
for _i in range(1):
for sf in [0, 2]:
opts = {
'swap_first': sf,
'swap_second': 2-sf,
}
#null012 = measurement.Null(3, [0,1,2])
#ghz.ghz_simult(s, stage=st.r[0:3:0.05], measure=null012, stats=1800)
#ghz.ghz_iswap(s, stage=st.r[0:4:0.05], measure=null012, stats=1200, **opts)
#if with_ghz:
# ghz.ghz_simult(s, stage=st.r[0:3:0.1], measure=ghz.GHZ(), stats=1200)
# ghz.ghz_iswap(s, stage=st.r[0:4:0.1], measure=ghz.GHZ(), stats=1200)
if with_tomo:
tomo012 = measurement.TomoNull(3, [0,1,2])
ghz.ghz_iswap_tight(s, stage=[0,1,2,3,4], measure=tomo012, pipesize=1, stats=6000, **opts) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2011-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import os
import pytest
from hamcrest import ( assert_that,
calling,
contains_exactly,
empty,
equal_to,
raises )
from unittest.mock import patch, MagicMock
from types import ModuleType
from ycmd.completers.cpp import flags
from ycmd.completers.cpp.flags import ShouldAllowWinStyleFlags, INCLUDE_FLAGS
from ycmd.tests.test_utils import ( MacOnly, TemporaryTestDir, WindowsOnly,
TemporaryClangProject )
from ycmd.utils import CLANG_RESOURCE_DIR
from ycmd.responses import NoExtraConfDetected
@contextlib.contextmanager
def MockExtraConfModule( settings_function ):
module = MagicMock( spec = ModuleType )
module.is_global_ycm_extra_conf = False
setattr( module, settings_function.__name__, settings_function )
with patch( 'ycmd.extra_conf_store.ModuleForSourceFile',
return_value = module ):
yield
def FlagsForFile_NothingReturned_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
pass
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, empty() )
assert_that( filename, equal_to( '/foo' ) )
def FlagsForFile_FlagsNotReady_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [],
'flags_ready': False
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo', False )
assert_that( list( flags_list ), equal_to( [] ) )
assert_that( filename, equal_to( '/foo' ) )
def FlagsForFile_BadNonUnicodeFlagsAreAlsoRemoved_test( *args ):
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ bytes( b'-c' ), '-c', bytes( b'-foo' ), '-bar' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( list( flags_list ), equal_to( [ '-foo', '-bar' ] ) )
def FlagsForFile_FlagsCachedByDefault_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def FlagsForFile_FlagsNotCachedWhenDoCacheIsFalse_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c' ],
'do_cache': False
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c++' ) )
def FlagsForFile_FlagsCachedWhenDoCacheIsTrue_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c' ],
'do_cache': True
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def FlagsForFile_DoNotMakeRelativePathsAbsoluteByDefault_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c', '-I', 'header' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list,
contains_exactly( '-x', 'c',
'-I', 'header' ) )
def FlagsForFile_MakeRelativePathsAbsoluteIfOptionSpecified_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-x', 'c', '-I', 'header' ],
'include_paths_relative_to_dir': '/working_dir/'
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list,
contains_exactly( '-x', 'c',
'-I', os.path.normpath( '/working_dir/header' ) ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/usr/include/c++/v1',
'/System/Library/Frameworks/Foundation.framework/Headers'
] )
def FlagsForFile_AddMacIncludePaths_SysRoot_Default_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform'
'/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks'
'/Foundation.framework/Headers'
] )
def FlagsForFile_AddMacIncludePaths_SysRoot_Xcode_NoStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include',
'-iframework', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/System/Library/Frameworks',
'-iframework', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform'
'/Developer/SDKs/MacOSX.sdk/usr/include/c++/v1',
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform'
'/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks'
'/Foundation.framework/Headers'
] )
def FlagsForFile_AddMacIncludePaths_SysRoot_Xcode_WithStdlin_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/usr/include/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include',
'-iframework', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/System/Library/Frameworks',
'-iframework', '/Applications/Xcode.app/Contents/Developer/Platforms'
'/MacOSX.platform/Developer/SDKs/MacOSX.sdk'
'/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/include/c++/v1',
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/System/Library/Frameworks/Foundation.framework/Headers'
] )
def FlagsForFile_AddMacIncludePaths_SysRoot_CommandLine_WithStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/include/c++/v1',
'-isystem', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/include',
'-iframework', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/System/Library/Frameworks',
'-iframework', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/System/Library/Frameworks/Foundation.framework/Headers'
] )
def FlagsForFile_AddMacIncludePaths_SysRoot_CommandLine_NoStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/usr/include',
'-iframework', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/System/Library/Frameworks',
'-iframework', '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'
'/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/path/to/second/sys/root/usr/include/c++/v1'
] )
def FlagsForFile_AddMacIncludePaths_Sysroot_Custom_WithStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall',
'-isysroot/path/to/first/sys/root',
'-isysroot', '/path/to/second/sys/root/',
'--sysroot=/path/to/third/sys/root',
'--sysroot', '/path/to/fourth/sys/root' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-isysroot/path/to/first/sys/root',
'-isysroot', '/path/to/second/sys/root/',
'--sysroot=/path/to/third/sys/root',
'--sysroot', '/path/to/fourth/sys/root',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/path/to/second/sys/root/usr/include/c++/v1',
'-isystem', '/path/to/second/sys/root/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/path/to/second/sys/root/usr/include',
'-iframework', '/path/to/second/sys/root/System/Library/Frameworks',
'-iframework', '/path/to/second/sys/root/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_Sysroot_Custom_NoStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall',
'-isysroot/path/to/first/sys/root',
'-isysroot', '/path/to/second/sys/root/',
'--sysroot=/path/to/third/sys/root',
'--sysroot', '/path/to/fourth/sys/root' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-isysroot/path/to/first/sys/root',
'-isysroot', '/path/to/second/sys/root/',
'--sysroot=/path/to/third/sys/root',
'--sysroot', '/path/to/fourth/sys/root',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/path/to/second/sys/root/usr/local/include',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/path/to/second/sys/root/usr/include',
'-iframework', '/path/to/second/sys/root/System/Library/Frameworks',
'-iframework', '/path/to/second/sys/root/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path == '/Applications/Xcode.app/Contents/Developer/Toolchains/'
'XcodeDefault.xctoolchain' )
def FlagsForFile_AddMacIncludePaths_Toolchain_Xcode_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains'
'/XcodeDefault.xctoolchain/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains'
'/XcodeDefault.xctoolchain/usr/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [
'/usr/include/c++/v1',
'/Applications/Xcode.app/Contents/Developer/Toolchains/'
'XcodeDefault.xctoolchain'
] )
def FlagsForFile_AddMacIncludePaths_Toolchain_Xcode_WithSysrootStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains'
'/XcodeDefault.xctoolchain/usr/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path == '/Library/Developer/CommandLineTools' )
def FlagsForFile_AddMacIncludePaths_Toolchain_CommandLine_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/Library/Developer/CommandLineTools/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Library/Developer/CommandLineTools/usr/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path:
path in [ '/usr/include/c++/v1', '/Library/Developer/CommandLineTools' ] )
def FlagsForFile_AddMacIncludePaths_Toolchain_CommandLine_SysrootStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/Library/Developer/CommandLineTools/usr/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: path == '/usr/include/c++/v1' )
def FlagsForFile_AddMacIncludePaths_ObjCppLanguage_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-x', 'c', '-xobjective-c++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-x', 'c',
'-xobjective-c++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_ObjCppLanguage_NoSysrootStdbib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-x', 'c', '-xobjective-c++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-x', 'c',
'-xobjective-c++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: path == '/usr/include/c++/v1' )
def FlagsForFile_AddMacIncludePaths_CppLanguage_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-x', 'c', '-xc++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-x', 'c',
'-xc++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_CppLanguage_NoStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-x', 'c', '-xc++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-x', 'c',
'-xc++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_CLanguage_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-xc++', '-xc' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-xc++',
'-xc',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_NoLibCpp_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-stdlib=libc++', '-stdlib=libstdc++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-stdlib=libc++',
'-stdlib=libstdc++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_NoStandardCppIncludes_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-nostdinc++' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-nostdinc++',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_NoStandardSystemIncludes_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-nostdinc' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-nostdinc',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', os.path.join( CLANG_RESOURCE_DIR, 'include' ),
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: False )
def FlagsForFile_AddMacIncludePaths_NoBuiltinIncludes_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-nobuiltininc' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-nobuiltininc',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/local/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
@MacOnly
@patch( 'os.path.exists', lambda path: path == '/usr/include/c++/v1' )
def FlagsForFile_AddMacIncludePaths_NoBuiltinIncludes_SysrootStdlib_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [ '-Wall', '-nobuiltininc' ]
}
with MockExtraConfModule( Settings ):
flags_list, _ = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly(
'-Wall',
'-nobuiltininc',
'-resource-dir=' + CLANG_RESOURCE_DIR,
'-isystem', '/usr/include/c++/v1',
'-isystem', '/usr/local/include',
'-isystem', '/usr/include',
'-iframework', '/System/Library/Frameworks',
'-iframework', '/Library/Frameworks',
'-fspell-checking' ) )
def FlagsForFile_OverrideTranslationUnit_test():
flags_object = flags.Flags()
def Settings( **kwargs ):
return {
'flags': [],
'override_filename': 'changed:' + kwargs[ 'filename' ]
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( 'changed:/foo' ) )
def Settings( **kwargs ):
return {
'flags': [],
'override_filename': kwargs[ 'filename' ]
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( '/foo' ) )
def Settings( **kwargs ):
return {
'flags': [],
'override_filename': None
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( '/foo' ) )
def Settings( **kwargs ):
return {
'flags': [],
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( '/foo' ) )
def Settings( **kwargs ):
return {
'flags': [],
'override_filename': ''
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( '/foo' ) )
def Settings( **kwargs ):
return {
'flags': [],
'override_filename': '0'
}
with MockExtraConfModule( Settings ):
flags_list, filename = flags_object.FlagsForFile( '/foo' )
assert_that( flags_list, contains_exactly() )
assert_that( filename, equal_to( '0' ) )
def FlagsForFile_Compatibility_KeywordArguments_test():
flags_object = flags.Flags()
def FlagsForFile( filename, **kwargs ):
return {
'flags': [ '-x', 'c' ]
}
with MockExtraConfModule( FlagsForFile ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def FlagsForFile_Compatibility_NoKeywordArguments_test():
flags_object = flags.Flags()
def FlagsForFile( filename ):
return {
'flags': [ '-x', 'c' ]
}
with MockExtraConfModule( FlagsForFile ):
flags_list, _ = flags_object.FlagsForFile( '/foo', False )
assert_that( flags_list, contains_exactly( '-x', 'c' ) )
def RemoveUnusedFlags_Passthrough_test():
compiler_flags = [ '-foo', '-bar' ]
assert_that( flags.RemoveUnusedFlags(
compiler_flags,
'file',
ShouldAllowWinStyleFlags( compiler_flags ) ),
contains_exactly( '-foo', '-bar' ) )
def RemoveUnusedFlags_RemoveDashC_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-c' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
to_remove + expected,
filename,
ShouldAllowWinStyleFlags( to_remove + expected ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ -1: ] ) ) ) )
def RemoveUnusedFlags_RemoveColor_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '--fcolor-diagnostics' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
to_remove + expected,
filename,
ShouldAllowWinStyleFlags( to_remove + expected ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ -1: ] ) ) ) )
def RemoveUnusedFlags_RemoveDashO_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-o', 'output_name' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
to_remove + expected,
filename,
ShouldAllowWinStyleFlags( to_remove + expected ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ -1: ] ) ) ) )
def RemoveUnusedFlags_RemoveMP_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-MP' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
to_remove + expected,
filename,
ShouldAllowWinStyleFlags( to_remove + expected ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ -1: ] ) ) ) )
def RemoveUnusedFlags_RemoveFilename_test():
expected = [ 'foo', '-bar' ]
to_remove = [ 'file' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ -1: ] ) ) ) )
def RemoveUnusedFlags_RemoveFlagWithoutPrecedingDashFlag_test():
expected = [ 'g++', '-foo', '-x', 'c++', '-bar', 'include_dir' ]
to_remove = [ 'unrelated_file' ]
filename = 'file'
assert_that(
expected, equal_to(
flags.RemoveUnusedFlags( expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
@WindowsOnly
def RemoveUnusedFlags_RemoveStrayFilenames_CLDriver_test():
# Only --driver-mode=cl specified.
expected = [ 'g++', '-foo', '--driver-mode=cl', '-xc++', '-bar',
'include_dir', '/I', 'include_dir_other' ]
to_remove = [ '..' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
# clang-cl and --driver-mode=cl
expected = [ 'clang-cl.exe', '-foo', '--driver-mode=cl', '-xc++', '-bar',
'include_dir', '/I', 'include_dir_other' ]
to_remove = [ 'unrelated_file' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
# clang-cl only
expected = [ 'clang-cl.exe', '-foo', '-xc++', '-bar',
'include_dir', '/I', 'include_dir_other' ]
to_remove = [ 'unrelated_file' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
# clang-cl and --driver-mode=gcc
expected = [ 'clang-cl', '-foo', '-xc++', '--driver-mode=gcc',
'-bar', 'include_dir' ]
to_remove = [ 'unrelated_file', '/I', 'include_dir_other' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
# cl only with extension
expected = [ 'cl.EXE', '-foo', '-xc++', '-bar', 'include_dir' ]
to_remove = [ '-c', 'path\\to\\unrelated_file' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
# cl path with Windows separators
expected = [ 'path\\to\\cl', '-foo', '-xc++', '/I', 'path\\to\\include\\dir' ]
to_remove = [ '-c', 'path\\to\\unrelated_file' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
@WindowsOnly
def RemoveUnusedFlags_MultipleDriverModeFlagsWindows_test():
expected = [ 'g++',
'--driver-mode=cl',
'/Zi',
'-foo',
'--driver-mode=gcc',
'--driver-mode=cl',
'include_dir' ]
to_remove = [ 'unrelated_file', '/c' ]
filename = 'file'
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
flags_expected = [ '/usr/bin/g++', '--driver-mode=cl', '--driver-mode=gcc' ]
flags_all = [ '/usr/bin/g++',
'/Zi',
'--driver-mode=cl',
'/foo',
'--driver-mode=gcc' ]
filename = 'file'
assert_that( flags_expected,
equal_to( flags.RemoveUnusedFlags(
flags_all,
filename,
ShouldAllowWinStyleFlags( flags_all ) ) ) )
def RemoveUnusedFlags_Depfiles_test():
full_flags = [
'/bin/clang',
'-x', 'objective-c',
'-arch', 'armv7',
'-MMD',
'-MT', 'dependencies',
'-MF', 'file',
'--serialize-diagnostics', 'diagnostics'
]
expected = [
'/bin/clang',
'-x', 'objective-c',
'-arch', 'armv7',
]
assert_that( flags.RemoveUnusedFlags( full_flags,
'test.m',
ShouldAllowWinStyleFlags(
full_flags ) ),
contains_exactly( *expected ) )
def EnableTypoCorrection_Empty_test():
assert_that( flags._EnableTypoCorrection( [] ),
equal_to( [ '-fspell-checking' ] ) )
def EnableTypoCorrection_Trivial_test():
assert_that( flags._EnableTypoCorrection( [ '-x', 'c++' ] ),
equal_to( [ '-x', 'c++', '-fspell-checking' ] ) )
def EnableTypoCorrection_Reciprocal_test():
assert_that( flags._EnableTypoCorrection( [ '-fno-spell-checking' ] ),
equal_to( [ '-fno-spell-checking' ] ) )
def EnableTypoCorrection_ReciprocalOthers_test():
compile_flags = [ '-x', 'c++', '-fno-spell-checking' ]
assert_that( flags._EnableTypoCorrection( compile_flags ),
equal_to( compile_flags ) )
@pytest.mark.parametrize( 'flag', INCLUDE_FLAGS )
def RemoveUnusedFlags_RemoveFilenameWithoutPrecedingInclude_test( flag ):
to_remove = [ '/moo/boo' ]
filename = 'file'
expected = [ 'clang', flag, '/foo/bar', '-isystem/zoo/goo' ]
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected + to_remove,
filename,
ShouldAllowWinStyleFlags( expected + to_remove ) ) ) )
assert_that( expected,
equal_to( flags.RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected[ :1 ] +
to_remove +
expected[ 1: ] ) ) ) )
assert_that( expected + expected[ 1: ],
equal_to(
flags.RemoveUnusedFlags( expected +
to_remove +
expected[ 1: ],
filename,
ShouldAllowWinStyleFlags( expected +
to_remove +
expected[ 1: ] ) ) ) )
def RemoveXclangFlags_test():
expected = [ '-I', '/foo/bar', '-DMACRO=Value' ]
to_remove = [ '-Xclang', 'load', '-Xclang', 'libplugin.so',
'-Xclang', '-add-plugin', '-Xclang', 'plugin-name' ]
assert_that( expected,
equal_to( flags._RemoveXclangFlags( expected + to_remove ) ) )
assert_that( expected,
equal_to( flags._RemoveXclangFlags( to_remove + expected ) ) )
assert_that( expected + expected,
equal_to( flags._RemoveXclangFlags( expected +
to_remove +
expected ) ) )
def AddLanguageFlagWhenAppropriate_Passthrough_test():
compiler_flags = [ '-foo', '-bar' ]
assert_that( flags._AddLanguageFlagWhenAppropriate(
compiler_flags,
ShouldAllowWinStyleFlags( compiler_flags ) ),
contains_exactly( '-foo', '-bar' ) )
@WindowsOnly
def AddLanguageFlagWhenAppropriate_CLDriver_Passthrough_test():
compiler_flags = [ '-foo', '-bar', '--driver-mode=cl' ]
assert_that( flags._AddLanguageFlagWhenAppropriate(
compiler_flags,
ShouldAllowWinStyleFlags( compiler_flags ) ),
contains_exactly( '-foo', '-bar', '--driver-mode=cl' ) )
def _AddLanguageFlagWhenAppropriateTester( compiler, language_flag = [] ):
to_removes = [
[],
[ '/usr/bin/ccache' ],
[ 'some_command', 'another_command' ]
]
expected = [ '-foo', '-bar' ]
for to_remove in to_removes:
assert_that( [ compiler ] + language_flag + expected,
equal_to( flags._AddLanguageFlagWhenAppropriate(
to_remove + [ compiler ] + expected,
ShouldAllowWinStyleFlags( to_remove +
[ compiler ] +
expected ) ) ) )
@pytest.mark.parametrize( 'compiler', [ 'cc', 'gcc', 'clang', '/usr/bin/cc',
'/some/other/path', 'some_command' ] )
def AddLanguageFlagWhenAppropriate_CCompiler_test( compiler ):
_AddLanguageFlagWhenAppropriateTester( compiler )
@pytest.mark.parametrize( 'compiler', [ 'c++', 'g++', 'clang++', '/usr/bin/c++',
'/some/other/path++', 'some_command++',
'c++-5', 'g++-5.1', 'clang++-3.7.3', '/usr/bin/c++-5',
'c++-5.11', 'g++-50.1.49', 'clang++-3.12.3', '/usr/bin/c++-10',
'/some/other/path++-4.9.3', 'some_command++-5.1',
'/some/other/path++-4.9.31', 'some_command++-5.10' ] )
def AddLanguageFlagWhenAppropriate_CppCompiler_test( compiler ):
_AddLanguageFlagWhenAppropriateTester( compiler, [ '-x', 'c++' ] )
def CompilationDatabase_NoDatabase_test():
with TemporaryTestDir() as tmp_dir:
assert_that(
calling( flags.Flags().FlagsForFile ).with_args(
os.path.join( tmp_dir, 'test.cc' ) ),
raises( NoExtraConfDetected ) )
def CompilationDatabase_FileNotInDatabase_test():
compile_commands = []
with TemporaryTestDir() as tmp_dir:
with TemporaryClangProject( tmp_dir, compile_commands ):
assert_that( flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.cc' ) ),
equal_to( ( [], os.path.join( tmp_dir, 'test.cc' ) ) ) )
def CompilationDatabase_InvalidDatabase_test():
with TemporaryTestDir() as tmp_dir:
with TemporaryClangProject( tmp_dir, 'this is junk' ):
assert_that(
calling( flags.Flags().FlagsForFile ).with_args(
os.path.join( tmp_dir, 'test.cc' ) ),
raises( NoExtraConfDetected ) )
def CompilationDatabase_UseFlagsFromDatabase_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -I. -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.cc' ),
add_extra_clang_flags = False )[ 0 ],
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-x',
'c++',
'-I' + os.path.normpath( tmp_dir ),
'-I' + os.path.normpath( '/absolute/path' ),
'-Wall' ) )
def CompilationDatabase_UseFlagsFromSameDir_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
f = flags.Flags()
# If we ask for a file that is not in the DB but is in the same directory
# of another file present in the DB, we get its flags.
assert_that(
f.FlagsForFile(
os.path.join( tmp_dir, 'test1.cc' ),
add_extra_clang_flags = False ),
contains_exactly(
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-Wall' ),
os.path.join( tmp_dir, 'test1.cc' )
)
)
# If we ask for a file that is not in the DB but in a subdirectory
# of another file present in the DB, we get its flags.
assert_that(
f.FlagsForFile(
os.path.join( tmp_dir, 'some_dir', 'test1.cc' ),
add_extra_clang_flags = False ),
contains_exactly(
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-Wall' ),
os.path.join( tmp_dir, 'some_dir', 'test1.cc' )
)
)
def CompilationDatabase_HeaderFile_SameNameAsSourceFile_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
# If we ask for a header file with the same name as a source file, it
# returns the flags of that cc file (and a special language flag for C++
# headers).
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.h' ),
add_extra_clang_flags = False )[ 0 ],
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-Wall',
'-x',
'c++-header' ) )
def CompilationDatabase_HeaderFile_DifferentNameFromSourceFile_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
# Even if we ask for a header file with a different name than the source
# file, it still returns the flags from the cc file (and a special
# language flag for C++ headers).
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'not_in_the_db.h' ),
add_extra_clang_flags = False )[ 0 ],
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-Wall',
'-x',
'c++-header' ) )
def CompilationDatabase_ExplicitHeaderFileEntry_test():
with TemporaryTestDir() as tmp_dir:
# Have an explicit header file entry which should take priority over the
# corresponding source file
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -I. -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
{
'directory': tmp_dir,
'command': 'clang++ -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.h' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.h' ),
add_extra_clang_flags = False )[ 0 ],
contains_exactly( 'clang++',
'-x',
'c++',
'--driver-mode=g++',
'-I' + os.path.normpath( '/absolute/path' ),
'-Wall' ) )
def CompilationDatabase_CUDALanguageFlags_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -Wall ./test.cu',
'file': os.path.join( tmp_dir, 'test.cu' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
# If we ask for a header file, it returns the equivalent cu file
assert_that(
flags.Flags().FlagsForFile(
os.path.join( tmp_dir, 'test.cuh' ),
add_extra_clang_flags = False )[ 0 ],
contains_exactly( 'clang++',
'--driver-mode=g++',
'-Wall',
'-x',
'cuda' ) )
def _MakeRelativePathsInFlagsAbsoluteTest( test ):
wd = test[ 'wd' ] if 'wd' in test else '/not_test'
assert_that(
flags._MakeRelativePathsInFlagsAbsolute( test[ 'flags' ], wd ),
contains_exactly( *test[ 'expect' ] ) )
@pytest.mark.parametrize( 'test', [
# Already absolute, positional arguments
{
'flags': [ '-isystem', '/test' ],
'expect': [ '-isystem', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-I', '/test' ],
'expect': [ '-I', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-iquote', '/test' ],
'expect': [ '-iquote', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-isysroot', '/test' ],
'expect': [ '-isysroot', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-include-pch', '/test' ],
'expect': [ '-include-pch', os.path.normpath( '/test' ) ],
},
{
'flags': [ '-idirafter', '/test' ],
'expect': [ '-idirafter', os.path.normpath( '/test' ) ],
},
# Already absolute, single arguments
{
'flags': [ '-isystem/test' ],
'expect': [ '-isystem' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-I/test' ],
'expect': [ '-I' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-iquote/test' ],
'expect': [ '-iquote' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-isysroot/test' ],
'expect': [ '-isysroot' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-include-pch/test' ],
'expect': [ '-include-pch' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '-idirafter/test' ],
'expect': [ '-idirafter' + os.path.normpath( '/test' ) ],
},
# Already absolute, double-dash arguments
{
'flags': [ '--isystem=/test' ],
'expect': [ '--isystem=/test' ],
},
{
'flags': [ '--I=/test' ],
'expect': [ '--I=/test' ],
},
{
'flags': [ '--iquote=/test' ],
'expect': [ '--iquote=/test' ],
},
{
'flags': [ '--sysroot=/test' ],
'expect': [ '--sysroot=' + os.path.normpath( '/test' ) ],
},
{
'flags': [ '--include-pch=/test' ],
'expect': [ '--include-pch=/test' ],
},
{
'flags': [ '--idirafter=/test' ],
'expect': [ '--idirafter=/test' ],
},
# Relative, positional arguments
{
'flags': [ '-isystem', 'test' ],
'expect': [ '-isystem', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-I', 'test' ],
'expect': [ '-I', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-iquote', 'test' ],
'expect': [ '-iquote', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-isysroot', 'test' ],
'expect': [ '-isysroot', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-include-pch', 'test' ],
'expect': [ '-include-pch', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-idirafter', 'test' ],
'expect': [ '-idirafter', os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
# Relative, single arguments
{
'flags': [ '-isystemtest' ],
'expect': [ '-isystem' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-Itest' ],
'expect': [ '-I' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-iquotetest' ],
'expect': [ '-iquote' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-isysroottest' ],
'expect': [ '-isysroot' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-include-pchtest' ],
'expect': [ '-include-pch' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '-idiraftertest' ],
'expect': [ '-idirafter' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
# Already absolute, double-dash arguments
{
'flags': [ '--isystem=test' ],
'expect': [ '--isystem=test' ],
'wd': '/test',
},
{
'flags': [ '--I=test' ],
'expect': [ '--I=test' ],
'wd': '/test',
},
{
'flags': [ '--iquote=test' ],
'expect': [ '--iquote=test' ],
'wd': '/test',
},
{
'flags': [ '--sysroot=test' ],
'expect': [ '--sysroot=' + os.path.normpath( '/test/test' ) ],
'wd': '/test',
},
{
'flags': [ '--include-pch=test' ],
'expect': [ '--include-pch=test' ],
'wd': '/test',
},
{
'flags': [ '--idirafter=test' ],
'expect': [ '--idirafter=test' ],
'wd': '/test',
},
] )
def MakeRelativePathsInFlagsAbsolute_test( test ):
_MakeRelativePathsInFlagsAbsoluteTest( test )
@pytest.mark.parametrize( 'test', [
{
'flags': [
'ignored',
'-isystem',
'/test',
'-ignored',
'-I',
'/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem', os.path.normpath( '/test' ),
'-ignored',
'-I', os.path.normpath( '/test' ),
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'-isystem/test',
'-ignored',
'-I/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem' + os.path.normpath( '/test' ),
'-ignored',
'-I' + os.path.normpath( '/test/' ),
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'--isystem=/test',
'-ignored',
'--I=/test',
'--ignored=ignored'
],
'expect': [
'ignored',
'--isystem=/test',
'-ignored',
'--I=/test',
'--ignored=ignored'
]
},
{
'flags': [
'ignored',
'-isystem', 'test',
'-ignored',
'-I', 'test',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem', os.path.normpath( '/test/test' ),
'-ignored',
'-I', os.path.normpath( '/test/test' ),
'--ignored=ignored'
],
'wd': '/test',
},
{
'flags': [
'ignored',
'-isystemtest',
'-ignored',
'-Itest',
'--ignored=ignored'
],
'expect': [
'ignored',
'-isystem' + os.path.normpath( '/test/test' ),
'-ignored',
'-I' + os.path.normpath( '/test/test' ),
'--ignored=ignored'
],
'wd': '/test',
},
{
'flags': [
'ignored',
'--isystem=test',
'-ignored',
'--I=test',
'--ignored=ignored',
'--sysroot=test'
],
'expect': [
'ignored',
'--isystem=test',
'-ignored',
'--I=test',
'--ignored=ignored',
'--sysroot=' + os.path.normpath( '/test/test' ),
],
'wd': '/test',
},
] )
def MakeRelativePathsInFlagsAbsolute_IgnoreUnknown_test( test ):
_MakeRelativePathsInFlagsAbsoluteTest( test )
def MakeRelativePathsInFlagsAbsolute_NoWorkingDir_test():
_MakeRelativePathsInFlagsAbsoluteTest( {
'flags': [ 'list', 'of', 'flags', 'not', 'changed', '-Itest' ],
'expect': [ 'list', 'of', 'flags', 'not', 'changed', '-Itest' ],
'wd': ''
} )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Autoload;
/**
* ClassLoader implements a PSR-0, PSR-4 and classmap class loader.
*
* $loader = new \Composer\Autoload\ClassLoader();
*
* // register classes with namespaces
* $loader->add('Symfony\Component', __DIR__.'/component');
* $loader->add('Symfony', __DIR__.'/framework');
*
* // activate the autoloader
* $loader->register();
*
* // to enable searching the include path (eg. for PEAR packages)
* $loader->setUseIncludePath(true);
*
* In this example, if you try to use a class in the Symfony\Component
* namespace or one of its children (Symfony\Component\Console for instance),
* the autoloader will first look for the class under the component/
* directory, and it will then fallback to the framework/ directory if not
* found before giving up.
*
* This class is loosely based on the Symfony UniversalClassLoader.
*
* @author Fabien Potencier <fabien@symfony.com>
* @author Jordi Boggiano <j.boggiano@seld.be>
* @see https://www.php-fig.org/psr/psr-0/
* @see https://www.php-fig.org/psr/psr-4/
*/
class ClassLoader
{
private $vendorDir;
// PSR-4
private $prefixLengthsPsr4 = array();
private $prefixDirsPsr4 = array();
private $fallbackDirsPsr4 = array();
// PSR-0
private $prefixesPsr0 = array();
private $fallbackDirsPsr0 = array();
private $useIncludePath = false;
private $classMap = array();
private $classMapAuthoritative = false;
private $missingClasses = array();
private $apcuPrefix;
private static $registeredLoaders = array();
public function __construct($vendorDir = null)
{
$this->vendorDir = $vendorDir;
}
public function getPrefixes()
{
if (!empty($this->prefixesPsr0)) {
return call_user_func_array('array_merge', array_values($this->prefixesPsr0));
}
return array();
}
public function getPrefixesPsr4()
{
return $this->prefixDirsPsr4;
}
public function getFallbackDirs()
{
return $this->fallbackDirsPsr0;
}
public function getFallbackDirsPsr4()
{
return $this->fallbackDirsPsr4;
}
public function getClassMap()
{
return $this->classMap;
}
/**
* @param array $classMap Class to filename map
*/
public function addClassMap(array $classMap)
{
if ($this->classMap) {
$this->classMap = array_merge($this->classMap, $classMap);
} else {
$this->classMap = $classMap;
}
}
/**
* Registers a set of PSR-0 directories for a given prefix, either
* appending or prepending to the ones previously set for this prefix.
*
* @param string $prefix The prefix
* @param array|string $paths The PSR-0 root directories
* @param bool $prepend Whether to prepend the directories
*/
public function add($prefix, $paths, $prepend = false)
{
if (!$prefix) {
if ($prepend) {
$this->fallbackDirsPsr0 = array_merge(
(array) $paths,
$this->fallbackDirsPsr0
);
} else {
$this->fallbackDirsPsr0 = array_merge(
$this->fallbackDirsPsr0,
(array) $paths
);
}
return;
}
$first = $prefix[0];
if (!isset($this->prefixesPsr0[$first][$prefix])) {
$this->prefixesPsr0[$first][$prefix] = (array) $paths;
return;
}
if ($prepend) {
$this->prefixesPsr0[$first][$prefix] = array_merge(
(array) $paths,
$this->prefixesPsr0[$first][$prefix]
);
} else {
$this->prefixesPsr0[$first][$prefix] = array_merge(
$this->prefixesPsr0[$first][$prefix],
(array) $paths
);
}
}
/**
* Registers a set of PSR-4 directories for a given namespace, either
* appending or prepending to the ones previously set for this namespace.
*
* @param string $prefix The prefix/namespace, with trailing '\\'
* @param array|string $paths The PSR-4 base directories
* @param bool $prepend Whether to prepend the directories
*
* @throws \InvalidArgumentException
*/
public function addPsr4($prefix, $paths, $prepend = false)
{
if (!$prefix) {
// Register directories for the root namespace.
if ($prepend) {
$this->fallbackDirsPsr4 = array_merge(
(array) $paths,
$this->fallbackDirsPsr4
);
} else {
$this->fallbackDirsPsr4 = array_merge(
$this->fallbackDirsPsr4,
(array) $paths
);
}
} elseif (!isset($this->prefixDirsPsr4[$prefix])) {
// Register directories for a new namespace.
$length = strlen($prefix);
if ('\\' !== $prefix[$length - 1]) {
throw new \InvalidArgumentException("A non-empty PSR-4 prefix must end with a namespace separator.");
}
$this->prefixLengthsPsr4[$prefix[0]][$prefix] = $length;
$this->prefixDirsPsr4[$prefix] = (array) $paths;
} elseif ($prepend) {
// Prepend directories for an already registered namespace.
$this->prefixDirsPsr4[$prefix] = array_merge(
(array) $paths,
$this->prefixDirsPsr4[$prefix]
);
} else {
// Append directories for an already registered namespace.
$this->prefixDirsPsr4[$prefix] = array_merge(
$this->prefixDirsPsr4[$prefix],
(array) $paths
);
}
}
/**
* Registers a set of PSR-0 directories for a given prefix,
* replacing any others previously set for this prefix.
*
* @param string $prefix The prefix
* @param array|string $paths The PSR-0 base directories
*/
public function set($prefix, $paths)
{
if (!$prefix) {
$this->fallbackDirsPsr0 = (array) $paths;
} else {
$this->prefixesPsr0[$prefix[0]][$prefix] = (array) $paths;
}
}
/**
* Registers a set of PSR-4 directories for a given namespace,
* replacing any others previously set for this namespace.
*
* @param string $prefix The prefix/namespace, with trailing '\\'
* @param array|string $paths The PSR-4 base directories
*
* @throws \InvalidArgumentException
*/
public function setPsr4($prefix, $paths)
{
if (!$prefix) {
$this->fallbackDirsPsr4 = (array) $paths;
} else {
$length = strlen($prefix);
if ('\\' !== $prefix[$length - 1]) {
throw new \InvalidArgumentException("A non-empty PSR-4 prefix must end with a namespace separator.");
}
$this->prefixLengthsPsr4[$prefix[0]][$prefix] = $length;
$this->prefixDirsPsr4[$prefix] = (array) $paths;
}
}
/**
* Turns on searching the include path for class files.
*
* @param bool $useIncludePath
*/
public function setUseIncludePath($useIncludePath)
{
$this->useIncludePath = $useIncludePath;
}
/**
* Can be used to check if the autoloader uses the include path to check
* for classes.
*
* @return bool
*/
public function getUseIncludePath()
{
return $this->useIncludePath;
}
/**
* Turns off searching the prefix and fallback directories for classes
* that have not been registered with the class map.
*
* @param bool $classMapAuthoritative
*/
public function setClassMapAuthoritative($classMapAuthoritative)
{
$this->classMapAuthoritative = $classMapAuthoritative;
}
/**
* Should class lookup fail if not found in the current class map?
*
* @return bool
*/
public function isClassMapAuthoritative()
{
return $this->classMapAuthoritative;
}
/**
* APCu prefix to use to cache found/not-found classes, if the extension is enabled.
*
* @param string|null $apcuPrefix
*/
public function setApcuPrefix($apcuPrefix)
{
$this->apcuPrefix = function_exists('apcu_fetch') && filter_var(ini_get('apc.enabled'), FILTER_VALIDATE_BOOLEAN) ? $apcuPrefix : null;
}
/**
* The APCu prefix in use, or null if APCu caching is not enabled.
*
* @return string|null
*/
public function getApcuPrefix()
{
return $this->apcuPrefix;
}
/**
* Registers this instance as an autoloader.
*
* @param bool $prepend Whether to prepend the autoloader or not
*/
public function register($prepend = false)
{
spl_autoload_register(array($this, 'loadClass'), true, $prepend);
if (null === $this->vendorDir) {
return;
}
if ($prepend) {
self::$registeredLoaders = array($this->vendorDir => $this) + self::$registeredLoaders;
} else {
unset(self::$registeredLoaders[$this->vendorDir]);
self::$registeredLoaders[$this->vendorDir] = $this;
}
}
/**
* Unregisters this instance as an autoloader.
*/
public function unregister()
{
spl_autoload_unregister(array($this, 'loadClass'));
if (null !== $this->vendorDir) {
unset(self::$registeredLoaders[$this->vendorDir]);
}
}
/**
* Loads the given class or interface.
*
* @param string $class The name of the class
* @return bool|null True if loaded, null otherwise
*/
public function loadClass($class)
{
if ($file = $this->findFile($class)) {
includeFile($file);
return true;
}
}
/**
* Finds the path to the file where the class is defined.
*
* @param string $class The name of the class
*
* @return string|false The path if found, false otherwise
*/
public function findFile($class)
{
// class map lookup
if (isset($this->classMap[$class])) {
return $this->classMap[$class];
}
if ($this->classMapAuthoritative || isset($this->missingClasses[$class])) {
return false;
}
if (null !== $this->apcuPrefix) {
$file = apcu_fetch($this->apcuPrefix.$class, $hit);
if ($hit) {
return $file;
}
}
$file = $this->findFileWithExtension($class, '.php');
// Search for Hack files if we are running on HHVM
if (false === $file && defined('HHVM_VERSION')) {
$file = $this->findFileWithExtension($class, '.hh');
}
if (null !== $this->apcuPrefix) {
apcu_add($this->apcuPrefix.$class, $file);
}
if (false === $file) {
// Remember that this class does not exist.
$this->missingClasses[$class] = true;
}
return $file;
}
/**
* Returns the currently registered loaders indexed by their corresponding vendor directories.
*
* @return self[]
*/
public static function getRegisteredLoaders()
{
return self::$registeredLoaders;
}
private function findFileWithExtension($class, $ext)
{
// PSR-4 lookup
$logicalPathPsr4 = strtr($class, '\\', DIRECTORY_SEPARATOR) . $ext;
$first = $class[0];
if (isset($this->prefixLengthsPsr4[$first])) {
$subPath = $class;
while (false !== $lastPos = strrpos($subPath, '\\')) {
$subPath = substr($subPath, 0, $lastPos);
$search = $subPath . '\\';
if (isset($this->prefixDirsPsr4[$search])) {
$pathEnd = DIRECTORY_SEPARATOR . substr($logicalPathPsr4, $lastPos + 1);
foreach ($this->prefixDirsPsr4[$search] as $dir) {
if (file_exists($file = $dir . $pathEnd)) {
return $file;
}
}
}
}
}
// PSR-4 fallback dirs
foreach ($this->fallbackDirsPsr4 as $dir) {
if (file_exists($file = $dir . DIRECTORY_SEPARATOR . $logicalPathPsr4)) {
return $file;
}
}
// PSR-0 lookup
if (false !== $pos = strrpos($class, '\\')) {
// namespaced class name
$logicalPathPsr0 = substr($logicalPathPsr4, 0, $pos + 1)
. strtr(substr($logicalPathPsr4, $pos + 1), '_', DIRECTORY_SEPARATOR);
} else {
// PEAR-like class name
$logicalPathPsr0 = strtr($class, '_', DIRECTORY_SEPARATOR) . $ext;
}
if (isset($this->prefixesPsr0[$first])) {
foreach ($this->prefixesPsr0[$first] as $prefix => $dirs) {
if (0 === strpos($class, $prefix)) {
foreach ($dirs as $dir) {
if (file_exists($file = $dir . DIRECTORY_SEPARATOR . $logicalPathPsr0)) {
return $file;
}
}
}
}
}
// PSR-0 fallback dirs
foreach ($this->fallbackDirsPsr0 as $dir) {
if (file_exists($file = $dir . DIRECTORY_SEPARATOR . $logicalPathPsr0)) {
return $file;
}
}
// PSR-0 include paths.
if ($this->useIncludePath && $file = stream_resolve_include_path($logicalPathPsr0)) {
return $file;
}
return false;
}
}
/**
* Scope isolated include.
*
* Prevents access to $this/self from included files.
*/
function includeFile($file)
{
include $file;
} | php | github | https://github.com/composer/composer | tests/Composer/Test/Fixtures/functional/installed-versions2/vendor/composer/ClassLoader.php |
#!/usr/bin/python
#
# Problem: Spinning Blade
# Language: Python
# Author: KirarinSnow
# Usage: pypy thisfile.py <input.in >output.out
# Comments: Runs in 7 minutes using pypy for the large input.
mb = []
for i in range(500):
mb.append([])
for j in range(500):
mb[i].append(None)
s = []
for i in range(500):
s.append([])
for j in range(500):
s[i].append(None)
def compute():
r, c, d = map(int, raw_input().split())
g = [map(int, raw_input()) for i in range(r)]
for i in range(r):
for j in range(c):
mi = g[i][j]+d
mb[i][j] = (mi, i*mi, j*mi)
for i in range(r):
for j in range(c):
if i == 0 and j == 0:
s[i][j] = mb[i][j]
elif i == 0:
s[i][j] = map(lambda k: s[i][j-1][k] + mb[i][j][k], range(3))
elif j == 0:
s[i][j] = map(lambda k: s[i-1][j][k] + mb[i][j][k], range(3))
else:
s[i][j] = map(lambda k: s[i-1][j][k] + s[i][j-1][k] -
s[i-1][j-1][k] + mb[i][j][k], range(3))
tm = -1
for r1 in range(r):
for c1 in range(c):
for r2 in range(r1+2, r):
if c1 + (r2-r1) < c:
c2 = c1 + (r2-r1)
m = [0]*3
for k in range(3):
m[k] = s[r2][c2][k]
if r1-1 >= 0:
m[k] -= s[r1-1][c2][k]
if c1-1 >= 0:
m[k] += s[r1-1][c1-1][k]
if c1-1 >= 0:
m[k] -= s[r2][c1-1][k]
m[k] -= mb[r1][c1][k] + mb[r1][c2][k] + \
mb[r2][c1][k] + mb[r2][c2][k]
if m[0] >= 0 and (r1+r2)*m[0] == 2*m[1] and \
(c1+c2)*m[0] == 2*m[2]:
tm = max(tm, r2-r1+1)
return "IMPOSSIBLE" if tm < 0 else tm
for i in range(input()):
print "Case #%d: %s" % (i+1, compute()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TMultiplexedProtocol
import thrift.Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
import importlib
import re
import socket
import struct
import sys
class ThriftClient(object):
MATCH_SPEC_T = "_match_spec_t"
ACTION_SPEC_T = "_action_spec_t"
TABLE_ADD_WITH = "_table_add_with_"
TABLE_MODIFY_WITH = "_table_modify_with_"
TABLE_DELETE = "_table_delete"
ADD_MEMBER_WITH = "_add_member_with_"
MODIFY_MEMBER_WITH = "_modify_member_with_"
DEL_MEMBER = "_del_member"
CREATE_GROUP = "_create_group"
DEL_GROUP = "_del_group"
GET_FIRST_ENTRY_HANDLE = "_get_first_entry_handle"
GET_NEXT_ENTRY_HANDLES = "_get_next_entry_handles"
GET_ENTRY = "_get_entry"
THRIFT_SPEC = "thrift_spec"
SET_DEFAULT_ACTION = "_set_default_action_"
def __init__(self, module, hostname, port, p4_name):
self.p4_client_module = importlib.import_module(".".join(["p4_pd_rpc", p4_name]))
self.mc_client_module = importlib.import_module(".".join(["mc_pd_rpc", "mc"]))
self.conn_mgr_client_module = importlib.import_module(".".join(["conn_mgr_pd_rpc",
"conn_mgr"]))
self._p4_name = p4_name
self._utils = importlib.import_module("utils")
self.setup(hostname, port)
self._session_handle = self._conn_mgr.client_init()
from res_pd_rpc.ttypes import DevTarget_t
self._dev_target = DevTarget_t(0, self._utils.hex_to_i16(0xFFFF))
def get_spec_prefix(self):
return self._p4_name + '_'
def setup(self, hostname, port):
# Set up thrift client and contact server
self._transport = TSocket.TSocket(hostname, port)
self._transport = TTransport.TBufferedTransport(self._transport)
bprotocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._mc_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "mc")
self._conn_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "conn_mgr")
self._p4_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, self._p4_name)
self._client = self.p4_client_module.Client(self._p4_protocol)
self._mc = self.mc_client_module.Client(self._mc_protocol)
self._conn_mgr = self.conn_mgr_client_module.Client(self._conn_mgr_protocol)
self._transport.open()
def get_match_field_names(self, table_name):
return self.get_parameter_names(table_name, ThriftClient.MATCH_SPEC_T)
def get_action_parameter_names(self, action_name):
return self.get_parameter_names(action_name, ThriftClient.ACTION_SPEC_T)
def get_spec_class(self, name, spec_suffix):
spec_name = self.get_spec_prefix() + name + spec_suffix
return getattr(self.p4_client_module, spec_name)
def get_parameter_names(self, name, spec_suffix):
try:
spec_class = self.get_spec_class(name, spec_suffix)
parameter_names = [x[2] for x in spec_class.thrift_spec[1:]]
except AttributeError:
raise AttributeError("Spec not found for %s" % name)
return parameter_names
def set_default_action(self, table_name, action_name, action_spec_tuple):
add_entry_parameters = [self._session_handle, self._dev_target]
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_set_default_action_function(table_name, action_name)(*add_entry_parameters)
def add_entry(self, table_name, match_spec_tuple, action_name, action_spec_tuple, priority):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_parameters = [self._session_handle, self._dev_target, match_spec]
if priority != None:
add_entry_parameters.append(priority)
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_add_entry_function(table_name, action_name)(*add_entry_parameters)
def add_entry_with_selector(self, table_name, match_spec_tuple, group_handle):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_with_selector_parameters = [self._session_handle,
self._dev_target, match_spec, int(group_handle)]
return self.get_add_entry_with_selector(table_name)(*add_entry_with_selector_parameters)
def add_entry_with_member(self, table_name, match_spec_tuple, member_handle):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_with_member_parameters = [self._session_handle,
self._dev_target, match_spec, int(member_handle)]
return self.get_add_entry_with_member(table_name)(*add_entry_with_member_parameters)
def modify_entry(self, table_name, entry_handle, action_name, action_spec_tuple):
modify_entry_parameters = [ self._session_handle, self._dev_target.dev_id, int(entry_handle) ]
if action_spec_tuple is not ():
modify_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_modify_entry_function(table_name, action_name)(*modify_entry_parameters)
def delete_entry(self, table_name, entry_handle):
delete_entry_function_name = "%s%s" % (table_name, ThriftClient.TABLE_DELETE)
return getattr(self._client, delete_entry_function_name)(self._session_handle, self._dev_target.dev_id, int(entry_handle))
def add_member(self, action_profile_name, action_name, action_spec_tuple):
action_spec = self.get_action_spec(action_name, action_spec_tuple)
add_entry_parameters = [self._session_handle, self._dev_target]
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_add_member_function(action_profile_name, action_name)(*add_entry_parameters)
def delete_member(self, action_profile_name, member_handle):
return self.get_delete_member_function(action_profile_name)(self._session_handle, self._dev_target.dev_id, int(member_handle))
def create_group(self, action_profile_name, max_group_size):
return self.get_create_group_function(action_profile_name)(self._session_handle, self._dev_target, int(max_group_size))
def delete_group(self, action_profile_name, group_handle):
return self.get_delete_group_function(action_profile_name)(self._session_handle, self._dev_target.dev_id, group_handle)
def get_first_entry_handle(self, table_name):
first_entry_handle = int(self.get_get_first_entry_handle_function(table_name)(self._session_handle, self._dev_target))
if first_entry_handle < 0:
return "No entry handle found"
else:
return first_entry_handle
def get_next_entry_handles(self, table_name, entry_handle, n):
return self.get_get_next_entry_handles_function(table_name)(self._session_handle, self._dev_target.dev_id, entry_handle, n)
def show_entry(self, table_name, entry_handle):
# 4096 is the max_length for returned string
return self.get_show_entry_function(table_name)(self._session_handle, self._dev_target.dev_id, entry_handle, 4096)
def get_match_spec(self, table_name, match_spec_tuple):
match_spec_class = self.get_spec_class(table_name, ThriftClient.MATCH_SPEC_T)
return self.get_spec_from_spec_tuple(match_spec_class, match_spec_tuple)
def get_action_spec(self, action_name, action_spec_tuple):
action_spec_class = self.get_spec_class(action_name, ThriftClient.ACTION_SPEC_T)
return self.get_spec_from_spec_tuple(action_spec_class, action_spec_tuple)
def get_spec_from_spec_tuple(self, spec_class, spec_string):
thrift_spec = getattr(spec_class, ThriftClient.THRIFT_SPEC)
spec_parameters = []
for i in range(1, len(thrift_spec)):
parameter_type = thrift_spec[i][1]
if parameter_type == thrift.Thrift.TType.STRING:
is_success = False
try:
parameter = self._utils.macAddr_to_string(spec_string[i - 1])
if len(parameter) == 6:
spec_parameters.append(parameter)
is_success = True
except:
pass
if not is_success:
try:
parameter = socket.inet_pton(socket.AF_INET6, spec_string[i - 1])
if len(parameter) == 16:
spec_parameters.append(parameter)
is_success = True
except:
pass
if not is_success:
parameter = spec_string[i - 1]
try:
width, v = parameter.split('w')
width = int(width)
assert(width > 0)
v = int(v, 0)
except:
print "Make sure you prepend the length (in bytes) of the field"
print "A valid input is 8w0x55 for a 64-bit field set to 0x55"
raise ValueError("Cannot parse %s to TType.STRING" % parameter)
array = []
while v > 0:
array.append(v % 256)
v /= 256
width -= 1
if width < 0:
print "Value overflow"
raise ValueError("Cannot parse %s to TType.STRING" % parameter)
while width > 0:
array.append(0)
width -= 1
array.reverse()
parameter = self._utils.bytes_to_string(array)
spec_parameters.append(parameter)
if parameter_type == thrift.Thrift.TType.BYTE:
spec_parameters.append(self._utils.hex_to_byte(spec_string[i - 1]))
if parameter_type == thrift.Thrift.TType.I16:
parameter = int(spec_string[i - 1], 0)
spec_parameters.append(self._utils.hex_to_i16(parameter))
if parameter_type == thrift.Thrift.TType.I32:
is_success = False
try:
spec_parameters.append(self._utils.ipv4Addr_to_i32(spec_string[i - 1]))
is_success = True
except:
pass
if not is_success:
parameter = int(spec_string[i - 1], 0)
try:
spec_parameters.append(self._utils.hex_to_i32(parameter))
except socket.error:
raise ValueError("Cannot parse %s to TType.I32" % spec_string[i - 1])
return spec_class(*spec_parameters)
def get_table_names(self):
table_names = []
for function in dir(self.p4_client_module):
regex = '^(?P<table_name>\S+)%s' % (ThriftClient.SET_DEFAULT_ACTION)
m = re.search(regex, function)
if m is not None and m.group("table_name") not in table_names:
table_names.append(m.group("table_name"))
return table_names
def get_action_names(self, parent_object_name):
action_names = []
for function in dir(self._client):
regex = '^%s%s(?P<action_name>\S+)' % (parent_object_name, ThriftClient.TABLE_ADD_WITH)
m = re.search(regex, function)
if m is not None:
action_names.append(m.group("action_name"))
else:
regex = '^%s%s(?P<action_name>\S+)' % (parent_object_name, ThriftClient.ADD_MEMBER_WITH)
m = re.search(regex, function)
if m is not None:
action_names.append(m.group("action_name"))
return action_names
def get_match_data_names(self, table_name):
match_spec_class = self.get_spec_class(table_name, ThriftClient.MATCH_SPEC_T)
return [ x[2] for x in match_spec_class.thrift_spec[1:] ]
def get_action_data_names(self, action_name):
action_spec_class = self.get_spec_class(action_name, ThriftClient.ACTION_SPEC_T)
return [ x[2] for x in action_spec_class.thrift_spec[1:] ]
def get_add_entry_function(self, table_name, action_name):
add_entry_function_name = "%s%s%s" % (table_name, ThriftClient.TABLE_ADD_WITH, action_name)
return getattr(self._client, add_entry_function_name)
def get_set_default_action_function(self, table_name, action_name):
add_entry_function_name = "%s%s%s" % (table_name, ThriftClient.SET_DEFAULT_ACTION, action_name)
return getattr(self._client, add_entry_function_name)
def get_modify_entry_function(self, table_name, action_name):
modify_entry_function_name = "%s%s%s" % (table_name, ThriftClient.TABLE_MODIFY_WITH, action_name)
return getattr(self._client, modify_entry_function_name)
def get_get_first_entry_handle_function(self, table_name):
get_first_entry_handle_function_name = "%s%s" % (table_name, ThriftClient.GET_FIRST_ENTRY_HANDLE)
return getattr(self._client, get_first_entry_handle_function_name)
def get_get_next_entry_handles_function(self, table_name):
get_next_entry_handles_function_name = "%s%s" % (table_name, ThriftClient.GET_NEXT_ENTRY_HANDLES)
return getattr(self._client, get_next_entry_handles_function_name)
def get_show_entry_function(self, table_name):
show_entry_function_name = "%s%s" % (table_name, ThriftClient.GET_ENTRY)
return getattr(self._client, show_entry_function_name)
def get_add_member_function(self, action_profile_name, action_name):
add_member_function_name = "%s%s%s" % (action_profile_name, ThriftClient.ADD_MEMBER_WITH, action_name)
return getattr(self._client, add_member_function_name)
def get_modify_member_function(self, action_profile_name, action_name):
modify_member_function_name = "%s%s%s" % (action_profile_name, ThriftClient.MODIFY_MEMBER_WITH, action_name)
return getattr(self._client, modify_member_function_name)
def get_delete_member_function(self, action_profile_name):
delete_member_function_name = "%s%s" % (action_profile_name, ThriftClient.DEL_MEMBER)
return getattr(self._client, delete_member_function_name)
def get_create_group_function(self, action_profile_name):
create_group_function_name = "%s%s" % (action_profile_name, ThriftClient.CREATE_GROUP)
return getattr(self._client, create_group_function_name)
def get_delete_group_function(self, action_profile_name):
delete_group_function_name = "%s%s" % (action_profile_name, ThriftClient.DEL_GROUP)
return getattr(self._client, delete_group_function_name)
# Multicast api
def mc_mgrp_create(self, mgid):
return self._mc.mc_mgrp_create(self._session_handle, self._dev_target.dev_id, mgid)
def mc_node_create(self, rid, port_map, lag_map):
return self._mc.mc_node_create(self._session_handle, self._dev_target.dev_id, rid, port_map, lag_map)
def mc_node_update(self, l1_hdl, port_map, lag_map):
return self._mc.mc_node_update(self._session_handle, self._dev_target.dev_id, port_map, lag_map)
def mc_mgrp_destroy(self, mgrp_hdl):
return self._mc.mc_mgrp_destroy(self._session_handle, self._dev_target.dev_id, mgrp_hdl)
def mc_node_destroy(self, l1_hdl):
return self._mc.mc_node_destroy(self._session_handle, self._dev_target.dev_id, l1_hdl)
def mc_associate_node(self, grp_hdl, l1_hdl):
return self._mc.mc_associate_node(self._session_handle, self._dev_target.dev_id, grp_hdl, l1_hdl)
def mc_dissociate_node(self, grp_hdl, l1_hdl):
return self._mc.mc_dissociate_node(self._session_handle, self._dev_target.dev_id, grp_hdl, l1_hdl) | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import sys
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import timeutils
from nova import servicegroup
LOG = logging.getLogger(__name__)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='nova.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
def handle_schedule_error(context, ex, instance_uuid, request_spec):
if not isinstance(ex, exception.NoValidHost):
LOG.exception(_("Exception during scheduler.run_instance"))
compute_utils.add_instance_fault_from_exc(context,
instance_uuid, ex, sys.exc_info())
state = vm_states.ERROR.upper()
LOG.warning(_('Setting instance to %(state)s state.'),
locals(), instance_uuid=instance_uuid)
# update instance state and notify on the transition
(old_ref, new_ref) = db.instance_update_and_get_original(context,
instance_uuid, {'vm_state': vm_states.ERROR,
'task_state': None})
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
properties = request_spec.get('instance_properties', {})
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_states.ERROR,
method='run_instance',
reason=ex)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance', notifier.ERROR, payload)
def instance_update_db(context, instance_uuid):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
return db.instance_update(context, instance_uuid, values)
def encode_instance(instance, local=True):
"""Encode locally created instance for return via RPC"""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
# json encoding/decoding. We should be able to set a default
# json handler somehow to do it.
#
# For now, I'll just return the instance ID and let the caller
# do a DB lookup :-/
if local:
return dict(id=instance['id'], _is_precooked=False)
else:
inst = dict(instance)
inst['_is_precooked'] = True
return inst
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.compute_api = compute_api.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host, capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if self.servicegroup_api.service_is_up(service)]
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Must override schedule_prep_resize method for scheduler to work."""
msg = _("Driver must implement schedule_prep_resize")
raise NotImplementedError(msg)
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Must override schedule_run_instance method for scheduler to work."""
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
:param context:
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, block_migration.
:param disk_over_commit: if True, consider real(not virtual)
disk size.
:return:
The host where instance is running currently.
Then scheduler send request that host.
"""
# Check we can do live migration
self._live_migration_src_check(context, instance)
self._live_migration_dest_check(context, instance, dest)
self._live_migration_common_check(context, instance, dest)
migrate_data = self.compute_rpcapi.check_can_live_migrate_destination(
context, instance, dest, block_migration, disk_over_commit)
# Perform migration
src = instance['host']
self.compute_rpcapi.live_migration(context, host=src,
instance=instance, dest=dest,
block_migration=block_migration,
migrate_data=migrate_data)
def _live_migration_src_check(self, context, instance_ref):
"""Live migration check routine (for src host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(johngar) why is this not in the API layer?
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(
instance_id=instance_ref['uuid'])
# Checking src host exists and compute node
src = instance_ref['host']
try:
services = db.service_get_all_compute_by_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
if not self.servicegroup_api.service_is_up(services[0]):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Checking dest exists and compute node.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
raise exception.UnableToMigrateToSelf(
instance_id=instance_ref['uuid'], host=dest)
# Check memory requirements
self._assert_compute_node_has_enough_memory(context,
instance_ref, dest)
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
The following checks are based on
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
dservice_ref = self._get_compute_info(context, dest)
src = instance_ref['host']
oservice_ref = self._get_compute_info(context, src)
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
# Checking hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
raise exception.DestinationHypervisorTooOld()
def _assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
"""Checks if destination host has enough memory for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory of host
avail = self._get_compute_info(context, dest)['memory_mb']
# Getting total used memory and disk of host
# It should be sum of memories that are assigned as max value,
# because overcommitting is risky.
instance_refs = db.instance_get_all_by_host(context, dest)
used = sum([i['memory_mb'] for i in instance_refs])
mem_inst = instance_ref['memory_mb']
avail = avail - used
if not mem_inst or avail <= mem_inst:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host):
"""get compute node's information specified by key
:param context: security context
:param host: hostname(must be compute node)
:param key: column name of compute_nodes
:return: value specified by key
"""
compute_node_ref = db.service_get_all_compute_by_host(context, host)
return compute_node_ref[0]['compute_node'][0] | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.support;
import java.util.Locale;
import org.jspecify.annotations.Nullable;
import org.springframework.context.MessageSource;
import org.springframework.context.MessageSourceResolvable;
import org.springframework.context.NoSuchMessageException;
import org.springframework.context.i18n.LocaleContextHolder;
/**
* Helper class for easy access to messages from a MessageSource,
* providing various overloaded getMessage methods.
*
* <p>Available from ApplicationObjectSupport, but also reusable
* as a standalone helper to delegate to in application objects.
*
* @author Juergen Hoeller
* @since 23.10.2003
* @see ApplicationObjectSupport#getMessageSourceAccessor
*/
public class MessageSourceAccessor {
private final MessageSource messageSource;
private final @Nullable Locale defaultLocale;
/**
* Create a new MessageSourceAccessor, using LocaleContextHolder's locale
* as default locale.
* @param messageSource the MessageSource to wrap
* @see org.springframework.context.i18n.LocaleContextHolder#getLocale()
*/
public MessageSourceAccessor(MessageSource messageSource) {
this.messageSource = messageSource;
this.defaultLocale = null;
}
/**
* Create a new MessageSourceAccessor, using the given default locale.
* @param messageSource the MessageSource to wrap
* @param defaultLocale the default locale to use for message access
*/
public MessageSourceAccessor(MessageSource messageSource, Locale defaultLocale) {
this.messageSource = messageSource;
this.defaultLocale = defaultLocale;
}
/**
* Return the default locale to use if no explicit locale has been given.
* <p>The default implementation returns the default locale passed into the
* corresponding constructor, or LocaleContextHolder's locale as fallback.
* Can be overridden in subclasses.
* @see #MessageSourceAccessor(org.springframework.context.MessageSource, java.util.Locale)
* @see org.springframework.context.i18n.LocaleContextHolder#getLocale()
*/
protected Locale getDefaultLocale() {
return (this.defaultLocale != null ? this.defaultLocale : LocaleContextHolder.getLocale());
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param defaultMessage the String to return if the lookup fails
* @return the message
*/
public String getMessage(String code, String defaultMessage) {
String msg = this.messageSource.getMessage(code, null, defaultMessage, getDefaultLocale());
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param defaultMessage the String to return if the lookup fails
* @param locale the Locale in which to do lookup
* @return the message
*/
public String getMessage(String code, String defaultMessage, Locale locale) {
String msg = this.messageSource.getMessage(code, null, defaultMessage, locale);
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param defaultMessage the String to return if the lookup fails
* @return the message
*/
public String getMessage(String code, Object @Nullable [] args, String defaultMessage) {
String msg = this.messageSource.getMessage(code, args, defaultMessage, getDefaultLocale());
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param defaultMessage the String to return if the lookup fails
* @param locale the Locale in which to do lookup
* @return the message
*/
public String getMessage(String code, Object @Nullable [] args, String defaultMessage, Locale locale) {
String msg = this.messageSource.getMessage(code, args, defaultMessage, locale);
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code) throws NoSuchMessageException {
return this.messageSource.getMessage(code, null, getDefaultLocale());
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(code, null, locale);
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Object @Nullable [] args) throws NoSuchMessageException {
return this.messageSource.getMessage(code, args, getDefaultLocale());
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Object @Nullable [] args, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(code, args, locale);
}
/**
* Retrieve the given MessageSourceResolvable (for example, an ObjectError instance)
* in the default Locale.
* @param resolvable the MessageSourceResolvable
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(MessageSourceResolvable resolvable) throws NoSuchMessageException {
return this.messageSource.getMessage(resolvable, getDefaultLocale());
}
/**
* Retrieve the given MessageSourceResolvable (for example, an ObjectError instance)
* in the given Locale.
* @param resolvable the MessageSourceResolvable
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(MessageSourceResolvable resolvable, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(resolvable, locale);
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/context/support/MessageSourceAccessor.java |
from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def replace(text):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_replacedialog"):
engine._replacedialog = ReplaceDialog(root, engine)
dialog = engine._replacedialog
dialog.open(text)
class ReplaceDialog(SearchDialogBase):
title = "Replace Dialog"
icon = "Replace"
def __init__(self, root, engine):
SearchDialogBase.__init__(self, root, engine)
self.replvar = StringVar(root)
def open(self, text):
SearchDialogBase.open(self, text)
try:
first = text.index("sel.first")
except TclError:
first = None
try:
last = text.index("sel.last")
except TclError:
last = None
first = first or text.index("insert")
last = last or first
self.show_hit(first, last)
self.ok = 1
def create_entries(self):
SearchDialogBase.create_entries(self)
self.replent = self.make_entry("Replace with:", self.replvar)
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Find", self.find_it)
self.make_button("Replace", self.replace_it)
self.make_button("Replace+Find", self.default_command, 1)
self.make_button("Replace All", self.replace_all)
def find_it(self, event=None):
self.do_find(0)
def replace_it(self, event=None):
if self.do_find(self.ok):
self.do_replace()
def default_command(self, event=None):
if self.do_find(self.ok):
self.do_replace()
self.do_find(0)
def replace_all(self, event=None):
prog = self.engine.getprog()
if not prog:
return
repl = self.replvar.get()
text = self.text
res = self.engine.search_text(text, prog)
if not res:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.tag_remove("hit", "1.0", "end")
line = res[0]
col = res[1].start()
if self.engine.iswrap():
line = 1
col = 0
ok = 1
first = last = None
# XXX ought to replace circular instead of top-to-bottom when wrapping
text.undo_block_start()
while 1:
res = self.engine.search_forward(text, prog, line, col, 0, ok)
if not res:
break
line, m = res
chars = text.get("%d.0" % line, "%d.0" % (line+1))
orig = m.group()
new = m.expand(repl)
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
if new == orig:
text.mark_set("insert", last)
else:
text.mark_set("insert", first)
if first != last:
text.delete(first, last)
if new:
text.insert(first, new)
col = i + len(new)
ok = 0
text.undo_block_stop()
if first and last:
self.show_hit(first, last)
self.close()
def do_find(self, ok=0):
if not self.engine.getprog():
return False
text = self.text
res = self.engine.search_text(text, None, ok)
if not res:
text.bell()
return False
line, m = res
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
self.show_hit(first, last)
self.ok = 1
return True
def do_replace(self):
prog = self.engine.getprog()
if not prog:
return False
text = self.text
try:
first = pos = text.index("sel.first")
last = text.index("sel.last")
except TclError:
pos = None
if not pos:
first = last = pos = text.index("insert")
line, col = SearchEngine.get_line_col(pos)
chars = text.get("%d.0" % line, "%d.0" % (line+1))
m = prog.match(chars, col)
if not prog:
return False
new = m.expand(self.replvar.get())
text.mark_set("insert", first)
text.undo_block_start()
if m.group():
text.delete(first, last)
if new:
text.insert(first, new)
text.undo_block_stop()
self.show_hit(first, text.index("insert"))
self.ok = 0
return True
def show_hit(self, first, last):
text = self.text
text.mark_set("insert", first)
text.tag_remove("sel", "1.0", "end")
text.tag_add("sel", first, last)
text.tag_remove("hit", "1.0", "end")
if first == last:
text.tag_add("hit", first)
else:
text.tag_add("hit", first, last)
text.see("insert")
text.update_idletasks()
def close(self, event=None):
SearchDialogBase.close(self, event)
self.text.tag_remove("hit", "1.0", "end") | unknown | codeparrot/codeparrot-clean | ||
import {
__String,
arrayFrom,
arrayToMultiMap,
Block,
BreakOrContinueStatement,
CancellationToken,
canHaveSymbol,
CaseClause,
cast,
concatenate,
ConstructorDeclaration,
contains,
createGetCanonicalFileName,
createTextSpanFromBounds,
createTextSpanFromNode,
Debug,
DefaultClause,
find,
FindAllReferences,
findAncestor,
findChildOfKind,
findModifier,
forEach,
forEachChild,
forEachReturnStatement,
FunctionDeclaration,
FunctionLikeDeclaration,
getContainingFunction,
getTouchingPropertyName,
HighlightSpan,
HighlightSpanKind,
IfStatement,
isAccessor,
isAwaitExpression,
isBlock,
isBreakOrContinueStatement,
isCaseClause,
isClassDeclaration,
isClassLike,
isConstructorDeclaration,
isDeclaration,
isDefaultClause,
isFunctionBlock,
isFunctionLike,
isIfStatement,
isInterfaceDeclaration,
isIterationStatement,
isJsxClosingElement,
isJsxOpeningElement,
isLabeledStatement,
isModifierKind,
isModuleDeclaration,
isReturnStatement,
isSwitchStatement,
isThrowStatement,
isTryStatement,
isTypeAliasDeclaration,
isTypeNode,
isVariableStatement,
isWhiteSpaceSingleLine,
isYieldExpression,
IterationStatement,
mapDefined,
mapDefinedIterator,
MethodDeclaration,
Modifier,
ModifierFlags,
modifierToFlag,
ModuleBlock,
Node,
ObjectTypeDeclaration,
Program,
ReturnStatement,
SourceFile,
SwitchStatement,
SyntaxKind,
ThrowStatement,
toArray,
toPath,
tryCast,
TryStatement,
} from "./_namespaces/ts.js";
export interface DocumentHighlights {
fileName: string;
highlightSpans: HighlightSpan[];
}
/** @internal */
export namespace DocumentHighlights {
export function getDocumentHighlights(program: Program, cancellationToken: CancellationToken, sourceFile: SourceFile, position: number, sourceFilesToSearch: readonly SourceFile[]): DocumentHighlights[] | undefined {
const node = getTouchingPropertyName(sourceFile, position);
if (node.parent && (isJsxOpeningElement(node.parent) && node.parent.tagName === node || isJsxClosingElement(node.parent))) {
// For a JSX element, just highlight the matching tag, not all references.
const { openingElement, closingElement } = node.parent.parent;
const highlightSpans = [openingElement, closingElement].map(({ tagName }) => getHighlightSpanForNode(tagName, sourceFile));
return [{ fileName: sourceFile.fileName, highlightSpans }];
}
return getSemanticDocumentHighlights(position, node, program, cancellationToken, sourceFilesToSearch) || getSyntacticDocumentHighlights(node, sourceFile);
}
function getHighlightSpanForNode(node: Node, sourceFile: SourceFile): HighlightSpan {
return {
fileName: sourceFile.fileName,
textSpan: createTextSpanFromNode(node, sourceFile),
kind: HighlightSpanKind.none,
};
}
function getSemanticDocumentHighlights(position: number, node: Node, program: Program, cancellationToken: CancellationToken, sourceFilesToSearch: readonly SourceFile[]): DocumentHighlights[] | undefined {
const sourceFilesSet = new Set(sourceFilesToSearch.map(f => f.fileName));
const referenceEntries = FindAllReferences.getReferenceEntriesForNode(position, node, program, sourceFilesToSearch, cancellationToken, /*options*/ undefined, sourceFilesSet);
if (!referenceEntries) return undefined;
const map = arrayToMultiMap(referenceEntries.map(FindAllReferences.toHighlightSpan), e => e.fileName, e => e.span);
const getCanonicalFileName = createGetCanonicalFileName(program.useCaseSensitiveFileNames());
return arrayFrom(mapDefinedIterator(map.entries(), ([fileName, highlightSpans]) => {
if (!sourceFilesSet.has(fileName)) {
if (!program.redirectTargetsMap.has(toPath(fileName, program.getCurrentDirectory(), getCanonicalFileName))) {
return undefined;
}
const redirectTarget = program.getSourceFile(fileName);
const redirect = find(sourceFilesToSearch, f => !!f.redirectInfo && f.redirectInfo.redirectTarget === redirectTarget)!;
fileName = redirect.fileName;
Debug.assert(sourceFilesSet.has(fileName));
}
return { fileName, highlightSpans };
}));
}
function getSyntacticDocumentHighlights(node: Node, sourceFile: SourceFile): DocumentHighlights[] | undefined {
const highlightSpans = getHighlightSpans(node, sourceFile);
return highlightSpans && [{ fileName: sourceFile.fileName, highlightSpans }];
}
function getHighlightSpans(node: Node, sourceFile: SourceFile): HighlightSpan[] | undefined {
switch (node.kind) {
case SyntaxKind.IfKeyword:
case SyntaxKind.ElseKeyword:
return isIfStatement(node.parent) ? getIfElseOccurrences(node.parent, sourceFile) : undefined;
case SyntaxKind.ReturnKeyword:
return useParent(node.parent, isReturnStatement, getReturnOccurrences);
case SyntaxKind.ThrowKeyword:
return useParent(node.parent, isThrowStatement, getThrowOccurrences);
case SyntaxKind.TryKeyword:
case SyntaxKind.CatchKeyword:
case SyntaxKind.FinallyKeyword:
const tryStatement = node.kind === SyntaxKind.CatchKeyword ? node.parent.parent : node.parent;
return useParent(tryStatement, isTryStatement, getTryCatchFinallyOccurrences);
case SyntaxKind.SwitchKeyword:
return useParent(node.parent, isSwitchStatement, getSwitchCaseDefaultOccurrences);
case SyntaxKind.CaseKeyword:
case SyntaxKind.DefaultKeyword: {
if (isDefaultClause(node.parent) || isCaseClause(node.parent)) {
return useParent(node.parent.parent.parent, isSwitchStatement, getSwitchCaseDefaultOccurrences);
}
return undefined;
}
case SyntaxKind.BreakKeyword:
case SyntaxKind.ContinueKeyword:
return useParent(node.parent, isBreakOrContinueStatement, getBreakOrContinueStatementOccurrences);
case SyntaxKind.ForKeyword:
case SyntaxKind.WhileKeyword:
case SyntaxKind.DoKeyword:
return useParent(node.parent, (n): n is IterationStatement => isIterationStatement(n, /*lookInLabeledStatements*/ true), getLoopBreakContinueOccurrences);
case SyntaxKind.ConstructorKeyword:
return getFromAllDeclarations(isConstructorDeclaration, [SyntaxKind.ConstructorKeyword]);
case SyntaxKind.GetKeyword:
case SyntaxKind.SetKeyword:
return getFromAllDeclarations(isAccessor, [SyntaxKind.GetKeyword, SyntaxKind.SetKeyword]);
case SyntaxKind.AwaitKeyword:
return useParent(node.parent, isAwaitExpression, getAsyncAndAwaitOccurrences);
case SyntaxKind.AsyncKeyword:
return highlightSpans(getAsyncAndAwaitOccurrences(node));
case SyntaxKind.YieldKeyword:
return highlightSpans(getYieldOccurrences(node));
case SyntaxKind.InKeyword:
case SyntaxKind.OutKeyword:
return undefined;
default:
return isModifierKind(node.kind) && (isDeclaration(node.parent) || isVariableStatement(node.parent))
? highlightSpans(getModifierOccurrences(node.kind, node.parent))
: undefined;
}
function getFromAllDeclarations<T extends Node>(nodeTest: (node: Node) => node is T, keywords: readonly SyntaxKind[]): HighlightSpan[] | undefined {
return useParent(node.parent, nodeTest, decl => mapDefined(tryCast(decl, canHaveSymbol)?.symbol.declarations, d => nodeTest(d) ? find(d.getChildren(sourceFile), c => contains(keywords, c.kind)) : undefined));
}
function useParent<T extends Node>(node: Node, nodeTest: (node: Node) => node is T, getNodes: (node: T, sourceFile: SourceFile) => readonly Node[] | undefined): HighlightSpan[] | undefined {
return nodeTest(node) ? highlightSpans(getNodes(node, sourceFile)) : undefined;
}
function highlightSpans(nodes: readonly Node[] | undefined): HighlightSpan[] | undefined {
return nodes && nodes.map(node => getHighlightSpanForNode(node, sourceFile));
}
}
/**
* Aggregates all throw-statements within this node *without* crossing
* into function boundaries and try-blocks with catch-clauses.
*/
function aggregateOwnedThrowStatements(node: Node): readonly ThrowStatement[] | undefined {
if (isThrowStatement(node)) {
return [node];
}
else if (isTryStatement(node)) {
// Exceptions thrown within a try block lacking a catch clause are "owned" in the current context.
return concatenate(
node.catchClause ? aggregateOwnedThrowStatements(node.catchClause) : node.tryBlock && aggregateOwnedThrowStatements(node.tryBlock),
node.finallyBlock && aggregateOwnedThrowStatements(node.finallyBlock),
);
}
// Do not cross function boundaries.
return isFunctionLike(node) ? undefined : flatMapChildren(node, aggregateOwnedThrowStatements);
}
/**
* For lack of a better name, this function takes a throw statement and returns the
* nearest ancestor that is a try-block (whose try statement has a catch clause),
* function-block, or source file.
*/
function getThrowStatementOwner(throwStatement: ThrowStatement): Node | undefined {
let child: Node = throwStatement;
while (child.parent) {
const parent = child.parent;
if (isFunctionBlock(parent) || parent.kind === SyntaxKind.SourceFile) {
return parent;
}
// A throw-statement is only owned by a try-statement if the try-statement has
// a catch clause, and if the throw-statement occurs within the try block.
if (isTryStatement(parent) && parent.tryBlock === child && parent.catchClause) {
return child;
}
child = parent;
}
return undefined;
}
function aggregateAllBreakAndContinueStatements(node: Node): readonly BreakOrContinueStatement[] | undefined {
return isBreakOrContinueStatement(node) ? [node] : isFunctionLike(node) ? undefined : flatMapChildren(node, aggregateAllBreakAndContinueStatements);
}
function flatMapChildren<T>(node: Node, cb: (child: Node) => readonly T[] | T | undefined): readonly T[] {
const result: T[] = [];
node.forEachChild(child => {
const value = cb(child);
if (value !== undefined) {
result.push(...toArray(value));
}
});
return result;
}
function ownsBreakOrContinueStatement(owner: Node, statement: BreakOrContinueStatement): boolean {
const actualOwner = getBreakOrContinueOwner(statement);
return !!actualOwner && actualOwner === owner;
}
function getBreakOrContinueOwner(statement: BreakOrContinueStatement): Node | undefined {
return findAncestor(statement, node => {
switch (node.kind) {
case SyntaxKind.SwitchStatement:
if (statement.kind === SyntaxKind.ContinueStatement) {
return false;
}
// falls through
case SyntaxKind.ForStatement:
case SyntaxKind.ForInStatement:
case SyntaxKind.ForOfStatement:
case SyntaxKind.WhileStatement:
case SyntaxKind.DoStatement:
return !statement.label || isLabeledBy(node, statement.label.escapedText);
default:
// Don't cross function boundaries.
// TODO: GH#20090
return isFunctionLike(node) && "quit";
}
});
}
function getModifierOccurrences(modifier: Modifier["kind"], declaration: Node): Node[] {
return mapDefined(getNodesToSearchForModifier(declaration, modifierToFlag(modifier)), node => findModifier(node, modifier));
}
function getNodesToSearchForModifier(declaration: Node, modifierFlag: ModifierFlags): readonly Node[] | undefined {
// Types of node whose children might have modifiers.
const container = declaration.parent as ModuleBlock | SourceFile | Block | CaseClause | DefaultClause | ConstructorDeclaration | MethodDeclaration | FunctionDeclaration | ObjectTypeDeclaration;
switch (container.kind) {
case SyntaxKind.ModuleBlock:
case SyntaxKind.SourceFile:
case SyntaxKind.Block:
case SyntaxKind.CaseClause:
case SyntaxKind.DefaultClause:
// Container is either a class declaration or the declaration is a classDeclaration
if (modifierFlag & ModifierFlags.Abstract && isClassDeclaration(declaration)) {
return [...declaration.members, declaration];
}
else {
return container.statements;
}
case SyntaxKind.Constructor:
case SyntaxKind.MethodDeclaration:
case SyntaxKind.FunctionDeclaration:
return [...container.parameters, ...(isClassLike(container.parent) ? container.parent.members : [])];
case SyntaxKind.ClassDeclaration:
case SyntaxKind.ClassExpression:
case SyntaxKind.InterfaceDeclaration:
case SyntaxKind.TypeLiteral:
const nodes = container.members;
// If we're an accessibility modifier, we're in an instance member and should search
// the constructor's parameter list for instance members as well.
if (modifierFlag & (ModifierFlags.AccessibilityModifier | ModifierFlags.Readonly)) {
const constructor = find(container.members, isConstructorDeclaration);
if (constructor) {
return [...nodes, ...constructor.parameters];
}
}
else if (modifierFlag & ModifierFlags.Abstract) {
return [...nodes, container];
}
return nodes;
// Syntactically invalid positions that the parser might produce anyway
default:
return undefined;
}
}
function pushKeywordIf(keywordList: Node[], token: Node | undefined, ...expected: SyntaxKind[]): boolean {
if (token && contains(expected, token.kind)) {
keywordList.push(token);
return true;
}
return false;
}
function getLoopBreakContinueOccurrences(loopNode: IterationStatement): Node[] {
const keywords: Node[] = [];
if (pushKeywordIf(keywords, loopNode.getFirstToken(), SyntaxKind.ForKeyword, SyntaxKind.WhileKeyword, SyntaxKind.DoKeyword)) {
// If we succeeded and got a do-while loop, then start looking for a 'while' keyword.
if (loopNode.kind === SyntaxKind.DoStatement) {
const loopTokens = loopNode.getChildren();
for (let i = loopTokens.length - 1; i >= 0; i--) {
if (pushKeywordIf(keywords, loopTokens[i], SyntaxKind.WhileKeyword)) {
break;
}
}
}
}
forEach(aggregateAllBreakAndContinueStatements(loopNode.statement), statement => {
if (ownsBreakOrContinueStatement(loopNode, statement)) {
pushKeywordIf(keywords, statement.getFirstToken(), SyntaxKind.BreakKeyword, SyntaxKind.ContinueKeyword);
}
});
return keywords;
}
function getBreakOrContinueStatementOccurrences(breakOrContinueStatement: BreakOrContinueStatement): Node[] | undefined {
const owner = getBreakOrContinueOwner(breakOrContinueStatement);
if (owner) {
switch (owner.kind) {
case SyntaxKind.ForStatement:
case SyntaxKind.ForInStatement:
case SyntaxKind.ForOfStatement:
case SyntaxKind.DoStatement:
case SyntaxKind.WhileStatement:
return getLoopBreakContinueOccurrences(owner as IterationStatement);
case SyntaxKind.SwitchStatement:
return getSwitchCaseDefaultOccurrences(owner as SwitchStatement);
}
}
return undefined;
}
function getSwitchCaseDefaultOccurrences(switchStatement: SwitchStatement): Node[] {
const keywords: Node[] = [];
pushKeywordIf(keywords, switchStatement.getFirstToken(), SyntaxKind.SwitchKeyword);
// Go through each clause in the switch statement, collecting the 'case'/'default' keywords.
forEach(switchStatement.caseBlock.clauses, clause => {
pushKeywordIf(keywords, clause.getFirstToken(), SyntaxKind.CaseKeyword, SyntaxKind.DefaultKeyword);
forEach(aggregateAllBreakAndContinueStatements(clause), statement => {
if (ownsBreakOrContinueStatement(switchStatement, statement)) {
pushKeywordIf(keywords, statement.getFirstToken(), SyntaxKind.BreakKeyword);
}
});
});
return keywords;
}
function getTryCatchFinallyOccurrences(tryStatement: TryStatement, sourceFile: SourceFile): Node[] {
const keywords: Node[] = [];
pushKeywordIf(keywords, tryStatement.getFirstToken(), SyntaxKind.TryKeyword);
if (tryStatement.catchClause) {
pushKeywordIf(keywords, tryStatement.catchClause.getFirstToken(), SyntaxKind.CatchKeyword);
}
if (tryStatement.finallyBlock) {
const finallyKeyword = findChildOfKind(tryStatement, SyntaxKind.FinallyKeyword, sourceFile)!;
pushKeywordIf(keywords, finallyKeyword, SyntaxKind.FinallyKeyword);
}
return keywords;
}
function getThrowOccurrences(throwStatement: ThrowStatement, sourceFile: SourceFile): Node[] | undefined {
const owner = getThrowStatementOwner(throwStatement);
if (!owner) {
return undefined;
}
const keywords: Node[] = [];
forEach(aggregateOwnedThrowStatements(owner), throwStatement => {
keywords.push(findChildOfKind(throwStatement, SyntaxKind.ThrowKeyword, sourceFile)!);
});
// If the "owner" is a function, then we equate 'return' and 'throw' statements in their
// ability to "jump out" of the function, and include occurrences for both.
if (isFunctionBlock(owner)) {
forEachReturnStatement(owner as Block, returnStatement => {
keywords.push(findChildOfKind(returnStatement, SyntaxKind.ReturnKeyword, sourceFile)!);
});
}
return keywords;
}
function getReturnOccurrences(returnStatement: ReturnStatement, sourceFile: SourceFile): Node[] | undefined {
const func = getContainingFunction(returnStatement) as FunctionLikeDeclaration;
if (!func) {
return undefined;
}
const keywords: Node[] = [];
forEachReturnStatement(cast(func.body, isBlock), returnStatement => {
keywords.push(findChildOfKind(returnStatement, SyntaxKind.ReturnKeyword, sourceFile)!);
});
// Include 'throw' statements that do not occur within a try block.
forEach(aggregateOwnedThrowStatements(func.body!), throwStatement => {
keywords.push(findChildOfKind(throwStatement, SyntaxKind.ThrowKeyword, sourceFile)!);
});
return keywords;
}
function getAsyncAndAwaitOccurrences(node: Node): Node[] | undefined {
const func = getContainingFunction(node) as FunctionLikeDeclaration;
if (!func) {
return undefined;
}
const keywords: Node[] = [];
if (func.modifiers) {
func.modifiers.forEach(modifier => {
pushKeywordIf(keywords, modifier, SyntaxKind.AsyncKeyword);
});
}
forEachChild(func, child => {
traverseWithoutCrossingFunction(child, node => {
if (isAwaitExpression(node)) {
pushKeywordIf(keywords, node.getFirstToken(), SyntaxKind.AwaitKeyword);
}
});
});
return keywords;
}
function getYieldOccurrences(node: Node): Node[] | undefined {
const func = getContainingFunction(node) as FunctionDeclaration;
if (!func) {
return undefined;
}
const keywords: Node[] = [];
forEachChild(func, child => {
traverseWithoutCrossingFunction(child, node => {
if (isYieldExpression(node)) {
pushKeywordIf(keywords, node.getFirstToken(), SyntaxKind.YieldKeyword);
}
});
});
return keywords;
}
// Do not cross function/class/interface/module/type boundaries.
function traverseWithoutCrossingFunction(node: Node, cb: (node: Node) => void) {
cb(node);
if (!isFunctionLike(node) && !isClassLike(node) && !isInterfaceDeclaration(node) && !isModuleDeclaration(node) && !isTypeAliasDeclaration(node) && !isTypeNode(node)) {
forEachChild(node, child => traverseWithoutCrossingFunction(child, cb));
}
}
function getIfElseOccurrences(ifStatement: IfStatement, sourceFile: SourceFile): HighlightSpan[] {
const keywords = getIfElseKeywords(ifStatement, sourceFile);
const result: HighlightSpan[] = [];
// We'd like to highlight else/ifs together if they are only separated by whitespace
// (i.e. the keywords are separated by no comments, no newlines).
for (let i = 0; i < keywords.length; i++) {
if (keywords[i].kind === SyntaxKind.ElseKeyword && i < keywords.length - 1) {
const elseKeyword = keywords[i];
const ifKeyword = keywords[i + 1]; // this *should* always be an 'if' keyword.
let shouldCombineElseAndIf = true;
// Avoid recalculating getStart() by iterating backwards.
for (let j = ifKeyword.getStart(sourceFile) - 1; j >= elseKeyword.end; j--) {
if (!isWhiteSpaceSingleLine(sourceFile.text.charCodeAt(j))) {
shouldCombineElseAndIf = false;
break;
}
}
if (shouldCombineElseAndIf) {
result.push({
fileName: sourceFile.fileName,
textSpan: createTextSpanFromBounds(elseKeyword.getStart(), ifKeyword.end),
kind: HighlightSpanKind.reference,
});
i++; // skip the next keyword
continue;
}
}
// Ordinary case: just highlight the keyword.
result.push(getHighlightSpanForNode(keywords[i], sourceFile));
}
return result;
}
function getIfElseKeywords(ifStatement: IfStatement, sourceFile: SourceFile): Node[] {
const keywords: Node[] = [];
// Traverse upwards through all parent if-statements linked by their else-branches.
while (isIfStatement(ifStatement.parent) && ifStatement.parent.elseStatement === ifStatement) {
ifStatement = ifStatement.parent;
}
// Now traverse back down through the else branches, aggregating if/else keywords of if-statements.
while (true) {
const children = ifStatement.getChildren(sourceFile);
pushKeywordIf(keywords, children[0], SyntaxKind.IfKeyword);
// Generally the 'else' keyword is second-to-last, so we traverse backwards.
for (let i = children.length - 1; i >= 0; i--) {
if (pushKeywordIf(keywords, children[i], SyntaxKind.ElseKeyword)) {
break;
}
}
if (!ifStatement.elseStatement || !isIfStatement(ifStatement.elseStatement)) {
break;
}
ifStatement = ifStatement.elseStatement;
}
return keywords;
}
/**
* Whether or not a 'node' is preceded by a label of the given string.
* Note: 'node' cannot be a SourceFile.
*/
function isLabeledBy(node: Node, labelName: __String): boolean {
return !!findAncestor(node.parent, owner => !isLabeledStatement(owner) ? "quit" : owner.label.escapedText === labelName);
}
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/documentHighlights.ts |
import pygame
from pygame.locals import *
class Block(object):
# Static params
height = 60
font = pygame.font.Font(None, 42)
# Object params
size = None
pos_x = None
pos_y = None
box_text = None
color = None
text_color = None
cleared = False
def __init__(self, **args):
self.size = args['size']
self.pos_x = args['start']
self.box_text = args['text']
self.color = args['color']
self.text_color = args['text_color']
def move(self, amount):
self.pos_x = self.pos_x - amount
def range(self):
return range(self.pos_x, self.pos_x + size)
def right_edge(self):
return self.pos_x + self.size
def clear(self):
self.cleared = True
self.color = (0,255,0)
def draw(self, surface):
# Draw the box
pygame.draw.rect(surface, self.color,
(self.pos_x, self.pos_y, self.size, self.height))
# Draw the text in the box
text = self.font.render(self.box_text, True, self.text_color)
textpos = text.get_rect()
textpos.center = (self.pos_x + self.size / 2,
self.pos_y + self.height / 2)
surface.blit(text, textpos) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cgi/escape"
warn <<-WARNING, uplevel: Gem::BUNDLED_GEMS.uplevel if $VERBOSE
CGI library is removed from Ruby 4.0. Please use cgi/escape instead for CGI.escape and CGI.unescape features.
If you need to use the full features of CGI library, please add 'gem "cgi"' to your script
or use Bundler to ensure you are using the cgi gem instead of this file.
WARNING | ruby | github | https://github.com/ruby/ruby | lib/cgi.rb |
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { Box } from "@chakra-ui/react";
import { forwardRef } from "react";
import type { PropsWithChildren } from "react";
import { ResizableBox } from "react-resizable";
import "react-resizable/css/styles.css";
import { usePersistentResizableState } from "src/utils/usePersistentResizableState";
const ResizeHandle = forwardRef<HTMLDivElement>((props, ref) => (
<Box
background="linear-gradient(-45deg, transparent 6px, #ccc 6px, #ccc 8px, transparent 8px, transparent 12px, #ccc 12px, #ccc 14px, transparent 14px)"
bottom={0}
cursor="se-resize"
height={5}
position="absolute"
ref={ref}
right={0}
width={5}
{...props}
/>
));
type ResizableWrapperProps = {
readonly defaultSize?: { height: number; width: number };
readonly maxConstraints?: [width: number, height: number];
readonly storageKey: string;
} & PropsWithChildren;
const DEFAULT_SIZE = { height: 400, width: 500 };
const MAX_SIZE: [number, number] = [1200, 800];
export const MARKDOWN_DIALOG_STORAGE_KEY = "airflow-markdown-dialog-size";
export const ResizableWrapper = ({
children,
defaultSize = DEFAULT_SIZE,
maxConstraints = MAX_SIZE,
storageKey,
}: ResizableWrapperProps) => {
const { handleResize, handleResizeStop, size } = usePersistentResizableState(storageKey, defaultSize);
return (
<ResizableBox
handle={<ResizeHandle />}
height={size.height}
maxConstraints={maxConstraints}
minConstraints={[DEFAULT_SIZE.width, DEFAULT_SIZE.height]}
onResize={handleResize}
onResizeStop={handleResizeStop}
resizeHandles={["se"]}
style={{
backgroundColor: "inherit",
borderRadius: "inherit",
display: "flex",
flexDirection: "column",
overflow: "hidden",
position: "relative",
}}
width={size.width}
>
<div>{children}</div>
</ResizableBox>
);
}; | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/components/ui/ResizableWrapper.tsx |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http, _
from odoo.addons.website.models.website import slug
from odoo.http import request
class WebsiteHrRecruitment(http.Controller):
@http.route([
'/jobs',
'/jobs/country/<model("res.country"):country>',
'/jobs/department/<model("hr.department"):department>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>',
'/jobs/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/office/<int:office_id>',
'/jobs/department/<model("hr.department"):department>/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>/office/<int:office_id>',
], type='http', auth="public", website=True)
def jobs(self, country=None, department=None, office_id=None, **kwargs):
env = request.env(context=dict(request.env.context, show_address=True, no_tag_br=True))
Country = env['res.country']
Jobs = env['hr.job']
# List jobs available to current UID
job_ids = Jobs.search([], order="website_published desc,no_of_recruitment desc").ids
# Browse jobs as superuser, because address is restricted
jobs = Jobs.sudo().browse(job_ids)
# Default search by user country
if not (country or department or office_id or kwargs.get('all_countries')):
country_code = request.session['geoip'].get('country_code')
if country_code:
countries_ = Country.search([('code', '=', country_code)])
country = countries_[0] if countries_ else None
if not any(j for j in jobs if j.address_id and j.address_id.country_id == country):
country = False
# Filter job / office for country
if country and not kwargs.get('all_countries'):
jobs = [j for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id]
offices = set(j.address_id for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id)
else:
offices = set(j.address_id for j in jobs if j.address_id)
# Deduce departments and countries offices of those jobs
departments = set(j.department_id for j in jobs if j.department_id)
countries = set(o.country_id for o in offices if o.country_id)
if department:
jobs = (j for j in jobs if j.department_id and j.department_id.id == department.id)
if office_id and office_id in map(lambda x: x.id, offices):
jobs = (j for j in jobs if j.address_id and j.address_id.id == office_id)
else:
office_id = False
# Render page
return request.render("website_hr_recruitment.index", {
'jobs': jobs,
'countries': countries,
'departments': departments,
'offices': offices,
'country_id': country,
'department_id': department,
'office_id': office_id,
})
@http.route('/jobs/add', type='http', auth="user", website=True)
def jobs_add(self, **kwargs):
job = request.env['hr.job'].create({
'name': _('Job Title'),
})
return request.redirect("/jobs/detail/%s?enable_editor=1" % slug(job))
@http.route('/jobs/detail/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_detail(self, job, **kwargs):
return request.render("website_hr_recruitment.detail", {
'job': job,
'main_object': job,
})
@http.route('/jobs/apply/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_apply(self, job, **kwargs):
error = {}
default = {}
if 'website_hr_recruitment_error' in request.session:
error = request.session.pop('website_hr_recruitment_error')
default = request.session.pop('website_hr_recruitment_default')
return request.render("website_hr_recruitment.apply", {
'job': job,
'error': error,
'default': default,
}) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesSumReduction(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
expected_left_weight = -1.6463414634146338
# (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
expected_left_gain = 4.445121951219511
# -(-1 + 0.1) / (0.14 + 1)
expected_right_weight = 0.789473684211
# (-1 + 0.1) ** 2 / (0.14 + 1)
expected_right_gain = 0.710526315789
# (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
expected_bias_gain = 1.6235955056179772
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-8 + 0.1) / (0.26 + 1)
expected_left_weight = -6.26984126984
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_left_gain = 49.5317460317
expected_right_weight = 0
expected_right_gain = 0
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_bias_gain = 49.5317460317
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.test_session() as sess:
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
def testEmpty(self):
with self.test_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
values = array_ops.constant([], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testInactive(self):
with self.test_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, False]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
if __name__ == "__main__":
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
# Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
import typing
from test import support
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - cls isn't a type, or a tuple
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# check that we don't mask non AttributeErrors
# see: http://bugs.python.org/issue1574217
def test_isinstance_dont_mask_non_attribute_error(self):
class C(object):
def getclass(self):
raise RuntimeError
__class__ = property(getclass)
c = C()
self.assertRaises(RuntimeError, isinstance, c, bool)
# test another code path
class D: pass
self.assertRaises(RuntimeError, isinstance, c, D)
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_isinstance_with_or_union(self):
self.assertTrue(isinstance(Super(), Super | int))
self.assertFalse(isinstance(None, str | int))
self.assertTrue(isinstance(3, str | int))
self.assertTrue(isinstance("", str | int))
self.assertTrue(isinstance([], typing.List | typing.Tuple))
self.assertTrue(isinstance(2, typing.List | int))
self.assertFalse(isinstance(2, typing.List | typing.Tuple))
self.assertTrue(isinstance(None, int | None))
self.assertFalse(isinstance(3.14, int | str))
with self.assertRaises(TypeError):
isinstance(2, list[int])
with self.assertRaises(TypeError):
isinstance(2, list[int] | int)
with self.assertRaises(TypeError):
isinstance(2, float | str | list[int] | int)
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
self.assertTrue(issubclass(typing.List, typing.List|typing.Tuple))
self.assertFalse(issubclass(int, typing.List|typing.Tuple))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(int, (int, (float, int))))
self.assertEqual(True, issubclass(str, (str, (Child, str))))
@support.skip_wasi_stack_overflow()
@support.skip_emscripten_stack_overflow()
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RecursionError before the C stack is
# blown
self.assertRaises(RecursionError, blowstack, issubclass, str, str)
@support.skip_wasi_stack_overflow()
@support.skip_emscripten_stack_overflow()
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RecursionError before the C stack is
# blown
self.assertRaises(RecursionError, blowstack, isinstance, '', str)
def test_subclass_with_union(self):
self.assertTrue(issubclass(int, int | float | int))
self.assertTrue(issubclass(str, str | Child | str))
self.assertFalse(issubclass(dict, float|str))
self.assertFalse(issubclass(object, float|str))
with self.assertRaises(TypeError):
issubclass(2, Child | Super)
with self.assertRaises(TypeError):
issubclass(int, list[int] | Child)
def test_issubclass_refcount_handling(self):
# bpo-39382: abstract_issubclass() didn't hold item reference while
# peeking in the bases tuple, in the single inheritance case.
class A:
@property
def __bases__(self):
return (int, )
class B:
def __init__(self):
# setting this here increases the chances of exhibiting the bug,
# probably due to memory layout changes.
self.x = 1
@property
def __bases__(self):
return (A(), )
self.assertEqual(True, issubclass(B(), int))
def test_infinite_recursion_in_bases(self):
class X:
@property
def __bases__(self):
return self.__bases__
with support.infinite_recursion(25):
self.assertRaises(RecursionError, issubclass, X(), int)
self.assertRaises(RecursionError, issubclass, int, X())
self.assertRaises(RecursionError, isinstance, 1, X())
@support.skip_if_unlimited_stack_size
@support.skip_emscripten_stack_overflow()
@support.skip_wasi_stack_overflow()
def test_infinite_recursion_via_bases_tuple(self):
"""Regression test for bpo-30570."""
class Failure(object):
def __getattr__(self, attr):
return (self, None)
with support.infinite_recursion():
with self.assertRaises(RecursionError):
issubclass(Failure(), int)
@support.skip_if_unlimited_stack_size
@support.skip_emscripten_stack_overflow()
@support.skip_wasi_stack_overflow()
def test_infinite_cycle_in_bases(self):
"""Regression test for bpo-30570."""
class X:
@property
def __bases__(self):
return (self, self, self)
with support.infinite_recursion():
self.assertRaises(RecursionError, issubclass, X(), int)
def test_infinitely_many_bases(self):
"""Regression test for bpo-30570."""
class X:
def __getattr__(self, attr):
self.assertEqual(attr, "__bases__")
class A:
pass
class B:
pass
A.__getattr__ = B.__getattr__ = X.__getattr__
return (A(), B())
with support.infinite_recursion(25):
self.assertRaises(RecursionError, issubclass, X(), int)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RecursionError eventually.
tuple_arg = (compare_to,)
while True:
for _ in range(100):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_isinstance.py |
'''
XML parser. One function for each top-level element in the schema.
Most functions just declare a new object and add it to the module.
For typedefs, eventcopies, xidtypes, and other aliases though,
we do not create a new type object, we just record the existing one under a new name.
'''
from os.path import join
from xml.etree.cElementTree import parse
from xcbgen.xtypes import *
def import_(node, module, namespace):
'''
For imports, we load the file, create a new namespace object,
execute recursively, then record the import (for header files, etc.)
'''
# To avoid circular import error
from xcbgen import state
module.import_level = module.import_level + 1
new_file = join(namespace.dir, '%s.xml' % node.text)
new_root = parse(new_file).getroot()
new_namespace = state.Namespace(new_file)
execute(module, new_namespace)
module.import_level = module.import_level - 1
if not module.has_import(node.text):
module.add_import(node.text, new_namespace)
def typedef(node, module, namespace):
id = node.get('newname')
name = namespace.prefix + (id,)
type = module.get_type(node.get('oldname'))
module.add_type(id, namespace.ns, name, type)
def xidtype(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def xidunion(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def enum(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Enum(name, node)
module.add_type(id, namespace.ns, name, type)
def struct(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Struct(name, node)
module.add_type(id, namespace.ns, name, type)
def union(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Union(name, node)
module.add_type(id, namespace.ns, name, type)
def request(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Request(name, node)
module.add_request(id, name, type)
def event(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
event = Event(name, node)
event.add_opcode(node.get('number'), name, True)
module.add_event(id, name, event)
def eventcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
event = module.get_event(node.get('ref'))
event.add_opcode(node.get('number'), name, False)
module.add_event(id, name, event)
def error(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
error = Error(name, node)
error.add_opcode(node.get('number'), name, True)
module.add_error(id, name, error)
def errorcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
error = module.get_error(node.get('ref'))
error.add_opcode(node.get('number'), name, False)
module.add_error(id, name, error)
funcs = {'import' : import_,
'typedef' : typedef,
'xidtype' : xidtype,
'xidunion' : xidunion,
'enum' : enum,
'struct' : struct,
'union' : union,
'request' : request,
'event' : event,
'eventcopy' : eventcopy,
'error' : error,
'errorcopy' : errorcopy}
def execute(module, namespace):
for elt in list(namespace.root):
funcs[elt.tag](elt, module, namespace) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_portgroup_info
short_description: Gathers info about an ESXi host's Port Group configuration
description:
- This module can be used to gather information about an ESXi host's Port Group configuration when ESXi hostname or Cluster name is given.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
policies:
description:
- Gather information about Security, Traffic Shaping, as well as Teaming and failover.
- The property C(ts) stands for Traffic Shaping and C(lb) for Load Balancing.
type: bool
default: false
cluster_name:
description:
- Name of the cluster.
- Info will be returned for all hostsystem belonging to this cluster name.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather information from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather portgroup info about all ESXi Host in given Cluster
vmware_portgroup_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
- name: Gather portgroup info about ESXi Host system
vmware_portgroup_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
'''
RETURN = r'''
hosts_portgroup_info:
description: metadata about host's portgroup configuration
returned: on success
type: dict
sample: {
"esx01": [
{
"failback": true,
"failover_active": ["vmnic0", "vmnic1"],
"failover_standby": [],
"failure_detection": "link_status_only",
"lb": "loadbalance_srcid",
"notify": true,
"portgroup": "Management Network",
"security": [false, false, false],
"ts": "No override",
"vlan_id": 0,
"vswitch": "vSwitch0"
},
{
"failback": true,
"failover_active": ["vmnic2"],
"failover_standby": ["vmnic3"],
"failure_detection": "No override",
"lb": "No override",
"notify": true,
"portgroup": "vMotion",
"security": [false, false, false],
"ts": "No override",
"vlan_id": 33,
"vswitch": "vSwitch1"
}
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class PortgroupInfoManager(PyVmomi):
"""Class to manage Port Group info"""
def __init__(self, module):
super(PortgroupInfoManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
self.policies = self.params.get('policies')
@staticmethod
def normalize_pg_info(portgroup_obj, policy_info):
"""Create Port Group information"""
pg_info_dict = dict()
spec = portgroup_obj.spec
pg_info_dict['portgroup'] = spec.name
pg_info_dict['vlan_id'] = spec.vlanId
pg_info_dict['vswitch'] = spec.vswitchName
if policy_info:
# Security info
if spec.policy.security:
promiscuous_mode = spec.policy.security.allowPromiscuous
mac_changes = spec.policy.security.macChanges
forged_transmits = spec.policy.security.forgedTransmits
pg_info_dict['security'] = (
["No override" if promiscuous_mode is None else promiscuous_mode,
"No override" if mac_changes is None else mac_changes,
"No override" if forged_transmits is None else forged_transmits]
)
else:
pg_info_dict['security'] = ["No override", "No override", "No override"]
# Traffic Shaping info
if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None:
pg_info_dict['ts'] = portgroup_obj.spec.policy.shapingPolicy.enabled
else:
pg_info_dict['ts'] = "No override"
# Teaming and failover info
if spec.policy.nicTeaming:
if spec.policy.nicTeaming.policy is None:
pg_info_dict['lb'] = "No override"
else:
pg_info_dict['lb'] = spec.policy.nicTeaming.policy
if spec.policy.nicTeaming.notifySwitches is None:
pg_info_dict['notify'] = "No override"
else:
pg_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches
if spec.policy.nicTeaming.rollingOrder is None:
pg_info_dict['failback'] = "No override"
else:
pg_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder
if spec.policy.nicTeaming.nicOrder is None:
pg_info_dict['failover_active'] = "No override"
pg_info_dict['failover_standby'] = "No override"
else:
pg_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic
pg_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic
if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is None:
pg_info_dict['failure_detection'] = "No override"
else:
if spec.policy.nicTeaming.failureCriteria.checkBeacon:
pg_info_dict['failure_detection'] = "beacon_probing"
else:
pg_info_dict['failure_detection'] = "link_status_only"
else:
pg_info_dict['lb'] = "No override"
pg_info_dict['notify'] = "No override"
pg_info_dict['failback'] = "No override"
pg_info_dict['failover_active'] = "No override"
pg_info_dict['failover_standby'] = "No override"
pg_info_dict['failure_detection'] = "No override"
return pg_info_dict
def gather_host_portgroup_info(self):
"""Gather Port Group info per ESXi host"""
hosts_pg_info = dict()
for host in self.hosts:
pgs = host.config.network.portgroup
hosts_pg_info[host.name] = []
for portgroup in pgs:
hosts_pg_info[host.name].append(
self.normalize_pg_info(portgroup_obj=portgroup, policy_info=self.policies)
)
return hosts_pg_info
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
policies=dict(type='bool', required=False, default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
host_pg_mgr = PortgroupInfoManager(module)
module.exit_json(changed=False, hosts_portgroup_info=host_pg_mgr.gather_host_portgroup_info())
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
//===--- SILStage.swift ---------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2023 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
public enum SILStage: Int {
/// "Raw" SIL, emitted by SILGen, but not yet run through guaranteed
/// optimization and diagnostic passes.
///
/// Raw SIL does not have fully-constructed SSA and may contain undiagnosed
/// dataflow errors.
case raw
/// Canonical SIL, which has been run through at least the guaranteed
/// optimization and diagnostic passes.
///
/// Canonical SIL has stricter invariants than raw SIL. It must not contain
/// dataflow errors, and some instructions must be canonicalized to simpler
/// forms.
case canonical
/// Lowered SIL, which has been prepared for IRGen and will no longer
/// be passed to canonical SIL transform passes.
///
/// In lowered SIL, the SILType of all SILValues is its SIL storage
/// type. Explicit storage is required for all address-only and resilient
/// types.
///
/// Generating the initial Raw SIL is typically referred to as lowering (from
/// the AST). To disambiguate, refer to the process of generating the lowered
/// stage of SIL as "address lowering".
case lowered
} | swift | github | https://github.com/apple/swift | SwiftCompilerSources/Sources/SIL/SILStage.swift |
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Manage upstream ansible-core releases."""
from __future__ import annotations
import argparse
import contextlib
import dataclasses
import datetime
import enum
import functools
import gzip
import hashlib
import http.client
import inspect
import json
import math
import os
import pathlib
import re
import secrets
import shlex
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import typing as t
import urllib.error
import urllib.parse
import urllib.request
import venv
import webbrowser
import zipfile
import jinja2
from packaging.version import Version, InvalidVersion
# region CLI Framework
def path_to_str(value: t.Any) -> str:
"""Return the given value converted to a string suitable for use as a command line argument."""
return f"{value}/" if isinstance(value, pathlib.Path) and value.is_dir() else str(value)
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: t.Literal[True]) -> CompletedProcess: ...
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: t.Literal[False]) -> None: ...
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: bool) -> CompletedProcess | None: ...
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str) -> None: ...
def run(
*args: t.Any,
env: dict[str, t.Any] | None,
cwd: pathlib.Path | str,
capture_output: bool = False,
) -> CompletedProcess | None:
"""Run the specified command."""
args = [arg.relative_to(cwd) if isinstance(arg, pathlib.Path) else arg for arg in args]
str_args = tuple(path_to_str(arg) for arg in args)
str_env = {key: path_to_str(value) for key, value in env.items()} if env is not None else None
display.show(f"--> {shlex.join(str_args)}", color=Display.CYAN)
try:
p = subprocess.run(str_args, check=True, text=True, env=str_env, cwd=cwd, capture_output=capture_output)
except subprocess.CalledProcessError as ex:
# improve type hinting and include stdout/stderr (if any) in the message
raise CalledProcessError(
message=str(ex),
cmd=str_args,
status=ex.returncode,
stdout=ex.stdout,
stderr=ex.stderr,
) from None
if not capture_output:
return None
# improve type hinting
return CompletedProcess(
stdout=p.stdout,
stderr=p.stderr,
)
@contextlib.contextmanager
def suppress_when(error_as_warning: bool) -> t.Generator[None, None, None]:
"""Conditionally convert an ApplicationError in the provided context to a warning."""
if error_as_warning:
try:
yield
except ApplicationError as ex:
display.warning(ex)
else:
yield
class ApplicationError(Exception):
"""A fatal application error which will be shown without a traceback."""
class CalledProcessError(Exception):
"""Results from a failed process."""
def __init__(self, message: str, cmd: tuple[str, ...], status: int, stdout: str | None, stderr: str | None) -> None:
if stdout and (stdout := stdout.strip()):
message += f"\n>>> Standard Output\n{stdout}"
if stderr and (stderr := stderr.strip()):
message += f"\n>>> Standard Error\n{stderr}"
super().__init__(message)
self.cmd = cmd
self.status = status
self.stdout = stdout
self.stderr = stderr
@dataclasses.dataclass(frozen=True)
class CompletedProcess:
"""Results from a completed process."""
stdout: str
stderr: str
class Display:
"""Display interface for sending output to the console."""
CLEAR = "\033[0m"
RED = "\033[31m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
def fatal(self, message: t.Any) -> None:
"""Print a fatal message to the console."""
self.show(f"FATAL: {message}", color=self.RED)
def warning(self, message: t.Any) -> None:
"""Print a warning message to the console."""
self.show(f"WARNING: {message}", color=self.PURPLE)
def show(self, message: t.Any, color: str | None = None) -> None:
"""Print a message to the console."""
print(f"{color or self.CLEAR}{message}{self.CLEAR}", flush=True)
class CommandFramework:
"""
Simple command line framework inspired by nox.
Argument parsing is handled by argparse. Each function annotated with an instance of this class becomes a subcommand.
Options are shared across all commands, and are defined by providing kwargs when creating an instance of this class.
Options are only defined for commands which have a matching parameter.
The name of each kwarg is the option name, which will be prefixed with `--` and with underscores converted to dashes.
The value of each kwarg is passed as kwargs to ArgumentParser.add_argument. Passing None results in an internal only parameter.
The following custom kwargs are recognized and are not passed to add_argument:
name - Override the positional argument (option) passed to add_argument.
exclusive - Put the argument in an exclusive group of the given name.
"""
def __init__(self, **kwargs: dict[str, t.Any] | None) -> None:
self.commands: list[t.Callable[..., None]] = []
self.arguments = kwargs
self.parsed_arguments: argparse.Namespace | None = None
def __call__[T: t.Callable[..., None]](self, func: T) -> T:
"""Register the decorated function as a CLI command."""
self.commands.append(func)
return func
def run(self, *args: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command(s), using any provided internal args."""
for arg in args:
self._run(arg, **kwargs)
def main(self) -> None:
"""Main program entry point."""
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(metavar="COMMAND", required=True)
for func in self.commands:
func_parser = subparsers.add_parser(self._format_command_name(func), description=func.__doc__, help=func.__doc__)
func_parser.set_defaults(func=func)
exclusive_groups = {}
signature = inspect.signature(func)
for name in signature.parameters:
if name not in self.arguments:
raise RuntimeError(f"The '{name}' argument, used by '{func.__name__}', has not been defined.")
if (arguments := self.arguments.get(name)) is None:
continue # internal use
arguments = arguments.copy()
exclusive = arguments.pop("exclusive", None)
# noinspection PyProtectedMember, PyUnresolvedReferences
command_parser: argparse._ActionsContainer
if exclusive:
if exclusive not in exclusive_groups:
exclusive_groups[exclusive] = func_parser.add_mutually_exclusive_group()
command_parser = exclusive_groups[exclusive]
else:
command_parser = func_parser
if option_name := arguments.pop("name", None):
arguments.update(dest=name)
else:
option_name = f"--{name.replace('_', '-')}"
command_parser.add_argument(option_name, **arguments)
try:
# noinspection PyUnresolvedReferences
import argcomplete
except ImportError:
pass
else:
argcomplete.autocomplete(parser)
self.parsed_arguments = parser.parse_args()
try:
self.run(self.parsed_arguments.func)
except ApplicationError as ex:
display.fatal(ex)
sys.exit(1)
def _run(self, func: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command, using any provided internal args."""
signature = inspect.signature(func)
func_args = {name: getattr(self.parsed_arguments, name) for name in signature.parameters if hasattr(self.parsed_arguments, name)}
func_args.update({name: value for name, value in kwargs.items() if name in signature.parameters})
printable_args = ", ".join(f"{name}={repr(value)}" for name, value in func_args.items())
label = f"{self._format_command_name(func)}({printable_args})"
display.show(f"==> {label}", color=Display.BLUE)
try:
func(**func_args)
except BaseException:
display.show(f"!!! {label}", color=Display.RED)
raise
display.show(f"<== {label}", color=Display.BLUE)
@staticmethod
def _format_command_name(func: t.Callable[..., None]) -> str:
"""Return the friendly name of the given command."""
return func.__name__.replace("_", "-")
display = Display()
# endregion
# region Data Classes
@dataclasses.dataclass(frozen=True)
class GitHubRelease:
"""Details required to create a GitHub release."""
user: str
repo: str
tag: str
target: str
title: str
body: str
pre_release: bool
@dataclasses.dataclass(frozen=True)
class PullRequest:
"""Details required to create a pull request."""
upstream_user: str
upstream_repo: str
upstream_branch: str
user: str
repo: str
branch: str
title: str
body: str
@dataclasses.dataclass(frozen=True)
class Remote:
"""Details about a git remote."""
name: str
user: str
repo: str
@dataclasses.dataclass(frozen=True)
class Remotes:
"""Details about git removes."""
fork: Remote
upstream: Remote
@dataclasses.dataclass(frozen=True)
class GitState:
"""Details about the state of the git repository."""
remotes: Remotes
branch: str | None
commit: str
@dataclasses.dataclass(frozen=True)
class ReleaseArtifact:
"""Information about a release artifact on PyPI."""
package_type: str
package_label: str
url: str
size: int
digest: str
digest_algorithm: str
# endregion
# region Utilities
SCRIPT_DIR = pathlib.Path(__file__).parent.resolve()
CHECKOUT_DIR = SCRIPT_DIR.parent
ANSIBLE_LIB_DIR = CHECKOUT_DIR / "lib"
ANSIBLE_DIR = ANSIBLE_LIB_DIR / "ansible"
ANSIBLE_BIN_DIR = CHECKOUT_DIR / "bin"
ANSIBLE_RELEASE_FILE = ANSIBLE_DIR / "release.py"
ANSIBLE_REQUIREMENTS_FILE = CHECKOUT_DIR / "requirements.txt"
ANSIBLE_CHANGELOG_REQUIREMENTS_FILE = CHECKOUT_DIR / "test/lib/ansible_test/_data/requirements/sanity.changelog.txt"
ANSIBLE_PYPROJECT_TOML_FILE = CHECKOUT_DIR / "pyproject.toml"
DIST_DIR = CHECKOUT_DIR / "dist"
VENV_DIR = DIST_DIR / ".venv" / "release"
CHANGELOGS_DIR = CHECKOUT_DIR / "changelogs"
CHANGELOGS_FRAGMENTS_DIR = CHANGELOGS_DIR / "fragments"
ANSIBLE_VERSION_PATTERN = re.compile("^__version__ = '(?P<version>.*)'$", re.MULTILINE)
ANSIBLE_VERSION_FORMAT = "__version__ = '{version}'"
DIGEST_ALGORITHM = "sha256"
# These endpoint names match those defined as defaults in twine.
# See: https://github.com/pypa/twine/blob/9c2c0a1c535155931c3d879359330cb836950c6a/twine/utils.py#L82-L85
PYPI_ENDPOINTS = dict(
pypi="https://pypi.org/pypi",
testpypi="https://test.pypi.org/pypi",
)
PIP_ENV = dict(
PIP_REQUIRE_VIRTUALENV="yes",
PIP_DISABLE_PIP_VERSION_CHECK="yes",
)
class VersionMode(enum.Enum):
"""How to handle the ansible-core version."""
DEFAULT = enum.auto()
"""Do not allow development versions. Do not allow post release versions."""
STRIP_POST = enum.auto()
"""Do not allow development versions. Strip the post release from the version if present."""
REQUIRE_POST = enum.auto()
"""Do not allow development versions. Require a post release version."""
REQUIRE_DEV_POST = enum.auto()
"""Require a development or post release version."""
ALLOW_DEV_POST = enum.auto()
"""Allow development and post release versions."""
def apply(self, version: Version) -> Version:
"""Apply the mode to the given version and return the result."""
original_version = version
release_component_count = 3
if len(version.release) != release_component_count:
raise ApplicationError(f"Version {version} contains {version.release} release components instead of {release_component_count}.")
if version.epoch:
raise ApplicationError(f"Version {version} contains an epoch component: {version.epoch}")
if version.local is not None:
raise ApplicationError(f"Version {version} contains a local component: {version.local}")
if version.is_devrelease and version.is_postrelease:
raise ApplicationError(f"Version {version} is a development and post release version.")
if self == VersionMode.ALLOW_DEV_POST:
return version
if self == VersionMode.REQUIRE_DEV_POST:
if not version.is_devrelease and not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a development or post release version.")
return version
if version.is_devrelease:
raise ApplicationError(f"Version {version} is a development release: {version.dev}")
if self == VersionMode.STRIP_POST:
if version.is_postrelease:
version = Version(str(version).removesuffix(f".post{version.post}"))
display.warning(f"Using version {version} by stripping the post release suffix from version {original_version}.")
return version
if self == VersionMode.REQUIRE_POST:
if not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a post release version.")
return version
if version.is_postrelease:
raise ApplicationError(f"Version {version} is a post release.")
if self == VersionMode.DEFAULT:
return version
raise NotImplementedError(self)
@t.overload
def git(*args: t.Any, capture_output: t.Literal[True]) -> CompletedProcess: ...
@t.overload
def git(*args: t.Any, capture_output: t.Literal[False]) -> None: ...
@t.overload
def git(*args: t.Any) -> None: ...
def git(*args: t.Any, capture_output: bool = False) -> CompletedProcess | None:
"""Run the specified git command."""
return run("git", *args, env=None, cwd=CHECKOUT_DIR, capture_output=capture_output)
def get_commit(rev: str | None = None) -> str:
"""Return the commit associated with the given rev, or HEAD if no rev is given."""
try:
return git("rev-parse", "--quiet", "--verify", "--end-of-options", f"{rev or 'HEAD'}^{{commit}}", capture_output=True).stdout.strip()
except CalledProcessError as ex:
if ex.status == 1 and not ex.stdout and not ex.stderr:
raise ApplicationError(f"Could not find commit: {rev}") from None
raise
def prepare_pull_request(version: Version, branch: str, title: str, add: t.Iterable[pathlib.Path | str], allow_stale: bool) -> PullRequest:
"""Return pull request parameters using the provided details."""
git_state = get_git_state(version, allow_stale)
if not git("status", "--porcelain", "--untracked-files=no", capture_output=True).stdout.strip():
raise ApplicationError("There are no changes to commit. Did you skip a step?")
upstream_branch = get_upstream_branch(version)
body = create_pull_request_body(title)
git("checkout", "-b", branch)
git("add", *add)
git("commit", "-m", title)
git("push", "--set-upstream", git_state.remotes.fork.name, branch)
git("checkout", git_state.branch or git_state.commit)
git("branch", "-d", branch)
pr = PullRequest(
upstream_user=git_state.remotes.upstream.user,
upstream_repo=git_state.remotes.upstream.repo,
upstream_branch=upstream_branch,
user=git_state.remotes.fork.user,
repo=git_state.remotes.fork.repo,
branch=branch,
title=title,
body=body,
)
return pr
def create_github_release(release: GitHubRelease) -> None:
"""Open a browser tab for creating the given GitHub release."""
# See: https://docs.github.com/en/repositories/releasing-projects-on-github/automation-for-release-forms-with-query-parameters
params = dict(
tag=release.tag,
target=release.target,
title=release.title,
body=release.body,
prerelease=1 if release.pre_release else 0,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{release.user}/{release.repo}/releases/new?{query_string}"
display.show("Opening release creation page in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request(pr: PullRequest) -> None:
"""Open a browser tab for creating the given pull request."""
# See: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/using-query-parameters-to-create-a-pull-request # noqa
params = dict(
quick_pull=1,
title=pr.title,
body=pr.body,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{pr.upstream_user}/{pr.upstream_repo}/compare/{pr.upstream_branch}...{pr.user}:{pr.repo}:{pr.branch}?{query_string}"
display.show("Opening pull request in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request_body(title: str) -> str:
"""Return a simple pull request body created from the given title."""
body = f"""
##### SUMMARY
{title}
##### ISSUE TYPE
Feature Pull Request
"""
return body.lstrip()
def get_remote(name: str, push: bool) -> Remote:
"""Return details about the specified remote."""
remote_url = git("remote", "get-url", *(["--push"] if push else []), name, capture_output=True).stdout.strip()
remote_match = re.search(r"[@/]github[.]com[:/](?P<user>[^/]+)/(?P<repo>[^.]+)(?:[.]git)?$", remote_url)
if not remote_match:
raise RuntimeError(f"Unable to identify the user and repo in the '{name}' remote: {remote_url}")
remote = Remote(
name=name,
user=remote_match.group("user"),
repo=remote_match.group("repo"),
)
return remote
@functools.cache
def get_remotes() -> Remotes:
"""Return details about the remotes we need to use."""
# assume the devel branch has its upstream remote pointing to the user's fork
fork_remote_name = git("branch", "--list", "devel", "--format=%(upstream:remotename)", capture_output=True).stdout.strip()
if not fork_remote_name:
raise ApplicationError("Could not determine the remote for your fork of Ansible.")
display.show(f"Detected '{fork_remote_name}' as the remote for your fork of Ansible.")
# assume there is only one ansible org remote, which would allow release testing using another repo in the same org without special configuration
all_remotes = git("remote", "-v", capture_output=True).stdout.strip().splitlines()
ansible_remote_names = set(line.split()[0] for line in all_remotes if re.search(r"[@/]github[.]com[:/]ansible/", line))
if not ansible_remote_names:
raise ApplicationError(f"Could not determine the remote which '{fork_remote_name}' was forked from.")
if len(ansible_remote_names) > 1:
raise ApplicationError(f"Found multiple candidates for the remote from which '{fork_remote_name}' was forked from: {', '.join(ansible_remote_names)}")
upstream_remote_name = ansible_remote_names.pop()
display.show(f"Detected '{upstream_remote_name}' as the remote from which '{fork_remote_name}' was forked from.")
if fork_remote_name == upstream_remote_name:
raise ApplicationError("The remote for your fork of Ansible cannot be the same as the remote from which it was forked.")
remotes = Remotes(
fork=get_remote(fork_remote_name, push=True),
upstream=get_remote(upstream_remote_name, push=False),
)
return remotes
def get_upstream_branch(version: Version) -> str:
"""Return the upstream branch name for the given version."""
return f"stable-{version.major}.{version.minor}"
def get_git_state(version: Version, allow_stale: bool) -> GitState:
"""Return information about the current state of the git repository."""
remotes = get_remotes()
upstream_branch = get_upstream_branch(version)
git("fetch", remotes.upstream.name, upstream_branch)
upstream_ref = f"{remotes.upstream.name}/{upstream_branch}"
upstream_commit = get_commit(upstream_ref)
commit = get_commit()
if commit != upstream_commit:
with suppress_when(allow_stale):
raise ApplicationError(f"The current commit ({commit}) does not match {upstream_ref} ({upstream_commit}).")
branch = git("branch", "--show-current", capture_output=True).stdout.strip() or None
state = GitState(
remotes=remotes,
branch=branch,
commit=commit,
)
return state
@functools.cache
def ensure_venv(requirements_content: str) -> dict[str, t.Any]:
"""Ensure the release venv is ready and return the env vars needed to use it."""
requirements_hash = hashlib.sha256(requirements_content.encode()).hexdigest()[:8]
python_version = ".".join(map(str, sys.version_info[:2]))
venv_dir = VENV_DIR / python_version / requirements_hash
venv_bin_dir = venv_dir / "bin"
venv_requirements_file = venv_dir / "requirements.txt"
venv_marker_file = venv_dir / "marker.txt"
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
if not venv_marker_file.exists():
display.show(f"Creating a Python {python_version} virtual environment ({requirements_hash}) ...")
if venv_dir.exists():
shutil.rmtree(venv_dir)
venv.create(venv_dir, with_pip=True)
venv_requirements_file.write_text(requirements_content)
run("pip", "install", "-r", venv_requirements_file, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
venv_marker_file.touch()
return env
def get_pypi_project(repository: str, project: str, version: Version | None = None) -> dict[str, t.Any]:
"""Return the project JSON from PyPI for the specified repository, project and version (optional)."""
endpoint = PYPI_ENDPOINTS[repository]
if version:
url = f"{endpoint}/{project}/{version}/json"
else:
url = f"{endpoint}/{project}/json"
opener = urllib.request.build_opener()
response: http.client.HTTPResponse
try:
with opener.open(url) as response:
data = json.load(response)
except urllib.error.HTTPError as ex:
if version:
target = f'{project!r} version {version}'
else:
target = f'{project!r}'
if ex.status == http.HTTPStatus.NOT_FOUND:
raise ApplicationError(f"Could not find {target} on PyPI.") from None
raise RuntimeError(f"Failed to get {target} from PyPI.") from ex
return data
def get_ansible_version(version: str | None = None, /, commit: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Parse and return the current ansible-core version, the provided version or the version from the provided commit."""
if version and commit:
raise ValueError("Specify only one of: version, commit")
if version:
source = ""
else:
if commit:
current = git("show", f"{commit}:{ANSIBLE_RELEASE_FILE.relative_to(CHECKOUT_DIR)}", capture_output=True).stdout
else:
current = ANSIBLE_RELEASE_FILE.read_text()
if not (match := ANSIBLE_VERSION_PATTERN.search(current)):
raise RuntimeError("Failed to get the ansible-core version.")
version = match.group("version")
source = f" in '{ANSIBLE_RELEASE_FILE}'"
try:
parsed_version = Version(version)
except InvalidVersion:
raise ApplicationError(f"Invalid version{source}: {version}") from None
parsed_version = mode.apply(parsed_version)
return parsed_version
def get_next_version(version: Version, /, final: bool = False, pre: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Return the next version after the specified version."""
# TODO: consider using development versions instead of post versions after a release is published
pre = pre or ""
micro = version.micro
if version.is_devrelease:
# The next version of a development release is the same version without the development component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1]}"
elif not pre:
pre = "b1" # when there is no existing pre and none specified, advance to b1
elif version.is_postrelease:
# The next version of a post release is the next pre-release *or* micro release component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1] + 1}"
elif not pre:
pre = "rc1" # when there is no existing pre and none specified, advance to rc1
if version.pre is None:
micro = version.micro + 1
else:
raise ApplicationError(f"Version {version} is not a development or post release version.")
version = f"{version.major}.{version.minor}.{micro}{pre}"
return get_ansible_version(version, mode=mode)
def check_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Verify the requested version is valid for the current version."""
if requested_version.release[:2] != current_version.release[:2]:
raise ApplicationError(f"Version {requested_version} does not match the major and minor portion of the current version: {current_version}")
if requested_version < current_version:
raise ApplicationError(f"Version {requested_version} is older than the current version: {current_version}")
# TODO: consider additional checks to avoid mistakes when incrementing the release version
def set_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Set the current ansible-core version."""
check_ansible_version(current_version, requested_version)
if requested_version == current_version:
return
display.show(f"Updating version {current_version} to {requested_version} ...")
current = ANSIBLE_RELEASE_FILE.read_text()
updated = ANSIBLE_VERSION_PATTERN.sub(ANSIBLE_VERSION_FORMAT.format(version=requested_version), current)
if current == updated:
raise RuntimeError("Failed to set the ansible-core version.")
ANSIBLE_RELEASE_FILE.write_text(updated)
def get_latest_setuptools_version() -> Version:
"""Return the latest setuptools version found on PyPI."""
data = get_pypi_project('pypi', 'setuptools')
version = Version(data['info']['version'])
return version
def set_setuptools_upper_bound(requested_version: Version) -> None:
"""Set the upper bound on setuptools in pyproject.toml."""
current = ANSIBLE_PYPROJECT_TOML_FILE.read_text()
pattern = re.compile(r'^(?P<begin>requires = \["setuptools >= )(?P<lower>[^,]+)(?P<middle>, <= )(?P<upper>[^"]+)(?P<end>".*)$', re.MULTILINE)
match = pattern.search(current)
if not match:
raise ApplicationError(f"Unable to find the 'requires' entry in: {ANSIBLE_PYPROJECT_TOML_FILE.relative_to(CHECKOUT_DIR)}")
current_version = Version(match.group('upper'))
if requested_version == current_version:
return
display.show(f"Updating setuptools upper bound from {current_version} to {requested_version} ...")
updated = pattern.sub(fr'\g<begin>\g<lower>\g<middle>{requested_version}\g<end>', current)
if current == updated:
raise RuntimeError("Failed to set the setuptools upper bound.")
ANSIBLE_PYPROJECT_TOML_FILE.write_text(updated)
def create_reproducible_sdist(original_path: pathlib.Path, output_path: pathlib.Path, mtime: int) -> None:
"""Read the specified sdist and write out a new copy with uniform file metadata at the specified location."""
with tarfile.open(original_path) as original_archive:
with tempfile.TemporaryDirectory() as temp_dir:
tar_file = pathlib.Path(temp_dir) / "sdist.tar"
with tarfile.open(tar_file, mode="w") as tar_archive:
for original_info in original_archive.getmembers(): # type: tarfile.TarInfo
tar_archive.addfile(create_reproducible_tar_info(original_info, mtime), original_archive.extractfile(original_info))
with tar_file.open("rb") as tar_archive:
with gzip.GzipFile(output_path, "wb", mtime=mtime) as output_archive:
shutil.copyfileobj(tar_archive, output_archive)
def create_reproducible_tar_info(original: tarfile.TarInfo, mtime: int) -> tarfile.TarInfo:
"""Return a copy of the given TarInfo with uniform file metadata."""
sanitized = tarfile.TarInfo()
sanitized.name = original.name
sanitized.size = original.size
sanitized.mtime = mtime
sanitized.mode = (original.mode & ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)) | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
sanitized.type = original.type
sanitized.linkname = original.linkname
sanitized.uid = 0
sanitized.gid = 0
sanitized.uname = "root"
sanitized.gname = "root"
if original.mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
sanitized.mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return sanitized
def test_built_artifact(path: pathlib.Path) -> None:
"""Test the specified built artifact by installing it in a venv and running some basic commands."""
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
venv_dir = temp_dir / "venv"
venv_bin_dir = venv_dir / "bin"
venv.create(venv_dir, with_pip=True)
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
run("pip", "install", path, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
run("ansible", "--version", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "--version", env=env, cwd=CHECKOUT_DIR)
def get_sdist_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the sdist file."""
return dist_dir / f"ansible_core-{version}.tar.gz"
def get_wheel_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the wheel file."""
return dist_dir / f"ansible_core-{version}-py3-none-any.whl"
def calculate_digest(path: pathlib.Path) -> str:
"""Return the digest for the specified file."""
with open(path, "rb") as f:
digest = hashlib.file_digest(f, DIGEST_ALGORITHM)
return digest.hexdigest()
@functools.cache
def get_release_artifact_details(repository: str, version: Version, validate: bool) -> list[ReleaseArtifact]:
"""Return information about the release artifacts hosted on PyPI."""
data = get_pypi_project(repository, 'ansible-core', version)
artifacts = [describe_release_artifact(version, item, validate) for item in data["urls"]]
expected_artifact_types = {"bdist_wheel", "sdist"}
found_artifact_types = set(artifact.package_type for artifact in artifacts)
if found_artifact_types != expected_artifact_types:
raise RuntimeError(f"Expected {expected_artifact_types} artifact types, but found {found_artifact_types} instead.")
return artifacts
def describe_release_artifact(version: Version, item: dict[str, t.Any], validate: bool) -> ReleaseArtifact:
"""Return release artifact details extracted from the given PyPI data."""
package_type = item["packagetype"]
# The artifact URL is documented as stable, so is safe to put in release notes.
# See: https://github.com/pypi/warehouse/blame/c95be4a1055f4b36a8852715eb80318c81fc00ca/docs/api-reference/integration-guide.rst#L86-L90
url = item["url"]
pypi_size = item["size"]
pypi_digest = item["digests"][DIGEST_ALGORITHM]
if package_type == "bdist_wheel":
local_artifact_file = get_wheel_path(version)
package_label = "Built Distribution"
elif package_type == "sdist":
local_artifact_file = get_sdist_path(version)
package_label = "Source Distribution"
else:
raise NotImplementedError(f"Package type '{package_type}' is not supported.")
if validate:
try:
local_size = local_artifact_file.stat().st_size
local_digest = calculate_digest(local_artifact_file)
except FileNotFoundError:
raise ApplicationError(f"Missing local artifact: {local_artifact_file.relative_to(CHECKOUT_DIR)}") from None
if local_size != pypi_size:
raise ApplicationError(f"The {version} local {package_type} size {local_size} does not match the PyPI size {pypi_size}.")
if local_digest != pypi_digest:
raise ApplicationError(f"The {version} local {package_type} digest '{local_digest}' does not match the PyPI digest '{pypi_digest}'.")
return ReleaseArtifact(
package_type=package_type,
package_label=package_label,
url=url,
size=pypi_size,
digest=pypi_digest,
digest_algorithm=DIGEST_ALGORITHM.upper(),
)
def get_next_release_date(start: datetime.date, step: int, after: datetime.date) -> datetime.date:
"""Return the next release date."""
if start > after:
raise ValueError(f"{start=} is greater than {after=}")
current_delta = after - start
release_delta = datetime.timedelta(days=(math.floor(current_delta.days / step) + 1) * step)
release = start + release_delta
return release
def create_template_environment() -> jinja2.Environment:
"""Create and return a jinja2 environment."""
env = jinja2.Environment()
env.filters.update(
basename=os.path.basename,
)
return env
def create_github_release_notes(upstream: Remote, repository: str, version: Version, validate: bool) -> str:
"""Create and return GitHub release notes."""
env = create_template_environment()
template = env.from_string(GITHUB_RELEASE_NOTES_TEMPLATE)
variables = dict(
version=version,
releases=get_release_artifact_details(repository, version, validate),
changelog=f"https://github.com/{upstream.user}/{upstream.repo}/blob/v{version}/changelogs/CHANGELOG-v{version.major}.{version.minor}.rst",
)
release_notes = template.render(**variables).strip()
return release_notes
# endregion
# region Templates
GITHUB_RELEASE_NOTES_TEMPLATE = """
# Changelog
See the [full changelog]({{ changelog }}) for the changes included in this release.
# Release Artifacts
{%- for release in releases %}
* {{ release.package_label }}: [{{ release.url|basename }}]({{ release.url }}) - ‌{{ release.size }} bytes
* {{ release.digest }} ({{ release.digest_algorithm }})
{%- endfor %}
"""
# endregion
# region Commands
command = CommandFramework(
repository=dict(metavar="REPO", choices=tuple(PYPI_ENDPOINTS), default="pypi", help="PyPI repository to use: %(choices)s [%(default)s]"),
version=dict(exclusive="version", help="version to set"),
pre=dict(exclusive="version", help="increment version to the specified pre-release (aN, bN, rcN)"),
final=dict(exclusive="version", action="store_true", help="increment version to the next final release"),
commit=dict(help="commit to tag"),
validate=dict(name="--no-validate", action="store_false", help="disable validation of PyPI artifacts against local ones"),
prompt=dict(name="--no-prompt", action="store_false", help="disable interactive prompt before publishing with twine"),
setuptools=dict(name='--no-setuptools', action="store_false", help="disable updating setuptools upper bound"),
allow_tag=dict(action="store_true", help="allow an existing release tag (for testing)"),
allow_stale=dict(action="store_true", help="allow a stale checkout (for testing)"),
allow_dirty=dict(action="store_true", help="allow untracked files and files with changes (for testing)"),
)
@command
def instructions() -> None:
"""Show instructions for the release process."""
message = """
Releases must be performed using an up-to-date checkout of a fork of the Ansible repository.
1. Make sure your checkout is up-to-date.
2. Run the `prepare` command [1], then:
a. Submit the PR opened in the browser.
b. Wait for CI to pass.
c. Merge the PR.
3. Update your checkout to include the commit from the PR which was just merged.
4. Run the `complete` command [2], then:
a. Submit the GitHub release opened in the browser.
b. Submit the PR opened in the browser.
c. Wait for CI to pass.
d. Merge the PR.
[1] Use the `--final`, `--pre` or `--version` option for control over the version.
[2] During the `publish` step, `twine` may prompt for credentials.
"""
display.show(message.strip())
@command
def show_version(final: bool = False, pre: str | None = None) -> None:
"""Show the current and next ansible-core version."""
current_version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
display.show(f"Current version: {current_version}")
try:
next_version = get_next_version(current_version, final=final, pre=pre)
except ApplicationError as ex:
display.show(f" Next version: Unknown - {ex}")
else:
display.show(f" Next version: {next_version}")
check_ansible_version(current_version, next_version)
@command
def check_state(allow_stale: bool = False) -> None:
"""Verify the git repository is in a usable state for creating a pull request."""
get_git_state(get_ansible_version(), allow_stale)
# noinspection PyUnusedLocal
@command
def prepare(final: bool = False, pre: str | None = None, version: str | None = None, setuptools: bool | None = None) -> None:
"""Prepare a release."""
command.run(
update_version,
update_setuptools,
check_state,
generate_summary,
generate_changelog,
create_release_pr,
)
@command
def update_version(final: bool = False, pre: str | None = None, version: str | None = None) -> None:
"""Update the version embedded in the source code."""
current_version = get_ansible_version(mode=VersionMode.REQUIRE_DEV_POST)
if version:
requested_version = get_ansible_version(version)
else:
requested_version = get_next_version(current_version, final=final, pre=pre)
set_ansible_version(current_version, requested_version)
@command
def update_setuptools(setuptools: bool) -> None:
"""Update the setuptools upper bound in pyproject.toml."""
if not setuptools:
return
requested_version = get_latest_setuptools_version()
set_setuptools_upper_bound(requested_version)
@command
def generate_summary() -> None:
"""Generate a summary changelog fragment for this release."""
version = get_ansible_version()
release_date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d")
summary_path = CHANGELOGS_FRAGMENTS_DIR / f"{version}_summary.yaml"
major_minor = f"{version.major}.{version.minor}"
content = f"""
release_summary: |
| Release Date: {release_date}
| `Porting Guide <https://docs.ansible.com/ansible-core/{major_minor}/porting_guides/porting_guide_core_{major_minor}.html>`__
"""
summary_path.write_text(content.lstrip())
@command
def generate_changelog() -> None:
"""Generate the changelog and validate the results."""
changelog_requirements = (
ANSIBLE_CHANGELOG_REQUIREMENTS_FILE.read_text()
+ ANSIBLE_REQUIREMENTS_FILE.read_text() # TODO: consider pinning the ansible requirements and dependencies
)
env = ensure_venv(changelog_requirements)
env.update(
PATH=os.pathsep.join((str(ANSIBLE_BIN_DIR), env["PATH"])),
PYTHONPATH=ANSIBLE_LIB_DIR,
)
# TODO: consider switching back to the original changelog generator instead of using antsibull-changelog
run("antsibull-changelog", "release", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("antsibull-changelog", "generate", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "sanity", CHANGELOGS_DIR, ANSIBLE_RELEASE_FILE, env=env, cwd=CHECKOUT_DIR)
@command
def create_release_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a release pull request."""
version = get_ansible_version()
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"New release v{version}",
add=(
CHANGELOGS_DIR,
ANSIBLE_RELEASE_FILE,
ANSIBLE_PYPROJECT_TOML_FILE,
),
allow_stale=allow_stale,
)
create_pull_request(pr)
# noinspection PyUnusedLocal
@command
def complete(repository: str, allow_dirty: bool = False) -> None:
"""Complete a release after the prepared changes have been merged."""
command.run(
check_state,
build,
test,
publish,
tag_release,
post_version,
create_post_pr,
)
@command
def build(allow_dirty: bool = False) -> None:
"""Build the sdist and wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
# TODO: consider pinning the build requirement and its dependencies
build_requirements = """
build
"""
env = ensure_venv(build_requirements)
dirty = git("status", "--porcelain", "--untracked-files=all", capture_output=True).stdout.strip().splitlines()
if dirty:
with suppress_when(allow_dirty):
raise ApplicationError(f"There are {len(dirty)} files which are untracked and/or have changes, which will be omitted from the build.")
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory(dir=DIST_DIR, prefix=f"build-{version}-", suffix=".tmp") as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
dist_dir = temp_dir / "dist"
commit_time = int(git("show", "-s", "--format=%ct", capture_output=True).stdout)
env.update(
SOURCE_DATE_EPOCH=commit_time,
)
git("worktree", "add", "-d", temp_dir)
try:
run("python", "-m", "build", env=env, cwd=temp_dir)
create_reproducible_sdist(get_sdist_path(version, dist_dir), sdist_file, commit_time)
get_wheel_path(version, dist_dir).rename(wheel_file)
finally:
git("worktree", "remove", temp_dir)
@command
def test() -> None:
"""Test the sdist and wheel."""
command.run(
test_sdist,
test_wheel,
)
@command
def test_sdist() -> None:
"""Test the sdist."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
sdist_file = get_sdist_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
sdist = stack.enter_context(tarfile.open(sdist_file))
except FileNotFoundError:
raise ApplicationError(f"Missing sdist: {sdist_file.relative_to(CHECKOUT_DIR)}") from None
sdist.extractall(temp_dir, filter='data')
pyc_glob = "*.pyc*"
pyc_files = sorted(path.relative_to(temp_dir) for path in temp_dir.rglob(pyc_glob))
if pyc_files:
raise ApplicationError(f"Found {len(pyc_files)} '{pyc_glob}' file(s): {', '.join(map(str, pyc_files))}")
test_built_artifact(sdist_file)
@command
def test_wheel() -> None:
"""Test the wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
wheel = stack.enter_context(zipfile.ZipFile(wheel_file))
except FileNotFoundError:
raise ApplicationError(f"Missing wheel for version {version}: {wheel_file}") from None
wheel.extractall(temp_dir)
test_built_artifact(wheel_file)
@command
def publish(repository: str, prompt: bool = True) -> None:
"""Publish to PyPI."""
version = get_ansible_version()
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
# TODO: consider pinning the twine requirement and its dependencies
publish_requirements = """
twine
"""
env = ensure_venv(publish_requirements)
if prompt:
try:
while input(f"Do you want to publish {version} to the '{repository}' repository?\nEnter the repository name to confirm: ") != repository:
pass
except KeyboardInterrupt:
display.show("")
raise ApplicationError("Publishing was aborted by the user.") from None
run("twine", "upload", "-r", repository, sdist_file, wheel_file, env=env, cwd=CHECKOUT_DIR)
@command
def tag_release(repository: str, commit: str | None = None, validate: bool = True, allow_tag: bool = False) -> None:
"""Create a GitHub release using the current or specified commit."""
upstream = get_remotes().upstream
if commit:
git("fetch", upstream.name) # fetch upstream to make sure the commit can be found
commit = get_commit(commit)
version = get_ansible_version(commit=commit)
tag = f"v{version}"
if upstream_tag := git("ls-remote", "--tags", upstream.name, tag, capture_output=True).stdout.strip():
with suppress_when(allow_tag):
raise ApplicationError(f"Version {version} has already been tagged: {upstream_tag}")
upstream_branch = get_upstream_branch(version)
upstream_refs = git("branch", "-r", "--format=%(refname)", "--contains", commit, capture_output=True).stdout.strip().splitlines()
upstream_ref = f"refs/remotes/{upstream.name}/{upstream_branch}"
if upstream_ref not in upstream_refs:
raise ApplicationError(f"Commit {upstream_ref} not found. Found {len(upstream_refs)} upstream ref(s): {', '.join(upstream_refs)}")
body = create_github_release_notes(upstream, repository, version, validate)
release = GitHubRelease(
user=upstream.user,
repo=upstream.repo,
target=commit,
tag=tag,
title=tag,
body=body,
pre_release=version.pre is not None,
)
create_github_release(release)
@command
def post_version() -> None:
"""Set the post release version."""
current_version = get_ansible_version()
requested_version = get_ansible_version(f"{current_version}.post0", mode=VersionMode.REQUIRE_POST)
set_ansible_version(current_version, requested_version)
@command
def create_post_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a post release pull request."""
version = get_ansible_version(mode=VersionMode.REQUIRE_POST)
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"Update Ansible release version to v{version}.",
add=(ANSIBLE_RELEASE_FILE,),
allow_stale=allow_stale,
)
create_pull_request(pr)
# endregion
if __name__ == "__main__":
command.main() | python | github | https://github.com/ansible/ansible | packaging/release.py |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory;
import java.lang.annotation.Annotation;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Field;
import java.lang.reflect.Member;
import java.util.Objects;
import org.jspecify.annotations.Nullable;
import org.springframework.core.MethodParameter;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
/**
* A simple descriptor for an injection point, pointing to a method/constructor
* parameter or a field.
*
* <p>Exposed by {@link UnsatisfiedDependencyException}. Also available as an
* argument for factory methods, reacting to the requesting injection point
* for building a customized bean instance.
*
* @author Juergen Hoeller
* @since 4.3
* @see UnsatisfiedDependencyException#getInjectionPoint()
* @see org.springframework.beans.factory.config.DependencyDescriptor
*/
public class InjectionPoint {
protected @Nullable MethodParameter methodParameter;
protected @Nullable Field field;
private volatile Annotation @Nullable [] fieldAnnotations;
/**
* Create an injection point descriptor for a method or constructor parameter.
* @param methodParameter the MethodParameter to wrap
*/
public InjectionPoint(MethodParameter methodParameter) {
Assert.notNull(methodParameter, "MethodParameter must not be null");
this.methodParameter = methodParameter;
}
/**
* Create an injection point descriptor for a field.
* @param field the field to wrap
*/
public InjectionPoint(Field field) {
Assert.notNull(field, "Field must not be null");
this.field = field;
}
/**
* Copy constructor.
* @param original the original descriptor to create a copy from
*/
protected InjectionPoint(InjectionPoint original) {
this.methodParameter = (original.methodParameter != null ?
new MethodParameter(original.methodParameter) : null);
this.field = original.field;
this.fieldAnnotations = original.fieldAnnotations;
}
/**
* Just available for serialization purposes in subclasses.
*/
protected InjectionPoint() {
}
/**
* Return the wrapped MethodParameter, if any.
* <p>Note: Either MethodParameter or Field is available.
* @return the MethodParameter, or {@code null} if none
*/
public @Nullable MethodParameter getMethodParameter() {
return this.methodParameter;
}
/**
* Return the wrapped Field, if any.
* <p>Note: Either MethodParameter or Field is available.
* @return the Field, or {@code null} if none
*/
public @Nullable Field getField() {
return this.field;
}
/**
* Return the wrapped MethodParameter, assuming it is present.
* @return the MethodParameter (never {@code null})
* @throws IllegalStateException if no MethodParameter is available
* @since 5.0
*/
protected final MethodParameter obtainMethodParameter() {
Assert.state(this.methodParameter != null, "MethodParameter is not available");
return this.methodParameter;
}
/**
* Obtain the annotations associated with the wrapped field or method/constructor parameter.
*/
public Annotation[] getAnnotations() {
if (this.field != null) {
Annotation[] fieldAnnotations = this.fieldAnnotations;
if (fieldAnnotations == null) {
fieldAnnotations = this.field.getAnnotations();
this.fieldAnnotations = fieldAnnotations;
}
return fieldAnnotations;
}
else {
return obtainMethodParameter().getParameterAnnotations();
}
}
/**
* Retrieve a field/parameter annotation of the given type, if any.
* @param annotationType the annotation type to retrieve
* @return the annotation instance, or {@code null} if none found
* @since 4.3.9
*/
public <A extends Annotation> @Nullable A getAnnotation(Class<A> annotationType) {
return (this.field != null ? this.field.getAnnotation(annotationType) :
obtainMethodParameter().getParameterAnnotation(annotationType));
}
/**
* Return the type declared by the underlying field or method/constructor parameter,
* indicating the injection type.
*/
public Class<?> getDeclaredType() {
return (this.field != null ? this.field.getType() : obtainMethodParameter().getParameterType());
}
/**
* Returns the wrapped member, containing the injection point.
* @return the Field / Method / Constructor as Member
*/
public Member getMember() {
return (this.field != null ? this.field : obtainMethodParameter().getMember());
}
/**
* Return the wrapped annotated element.
* <p>Note: In case of a method/constructor parameter, this exposes
* the annotations declared on the method or constructor itself
* (i.e. at the method/constructor level, not at the parameter level).
* Use {@link #getAnnotations()} to obtain parameter-level annotations in
* such a scenario, transparently with corresponding field annotations.
* @return the Field / Method / Constructor as AnnotatedElement
*/
public AnnotatedElement getAnnotatedElement() {
return (this.field != null ? this.field : obtainMethodParameter().getAnnotatedElement());
}
@Override
public boolean equals(@Nullable Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
InjectionPoint otherPoint = (InjectionPoint) other;
return (ObjectUtils.nullSafeEquals(this.field, otherPoint.field) &&
ObjectUtils.nullSafeEquals(this.methodParameter, otherPoint.methodParameter));
}
@Override
public int hashCode() {
return Objects.hash(this.field, this.methodParameter);
}
@Override
public String toString() {
return (this.field != null ? "field '" + this.field.getName() + "'" : String.valueOf(this.methodParameter));
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/InjectionPoint.java |
"""
Grades related signals.
"""
from django.dispatch import Signal
# Signal that indicates that a user's grade for a course has been updated.
# This is a downstream signal of SUBSECTION_SCORE_CHANGED.
# Signal that indicates that a user's raw score for a problem has been updated.
# This signal is generated when a scoring event occurs within the core
# platform. Note that this signal will be triggered
# regardless of the new and previous values of the score (i.e. it may be the
# case that this signal is generated when a user re-attempts a problem but
# receives the same score).
PROBLEM_RAW_SCORE_CHANGED = Signal(
providing_args=[
'user_id', # Integer User ID
'course_id', # Unicode string representing the course
'usage_id', # Unicode string indicating the courseware instance
'raw_earned', # Score obtained by the user
'raw_possible', # Maximum score available for the exercise
'weight', # Weight of the problem
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
'modified', # A datetime indicating when the database representation of
# this the problem score was saved.
'score_db_table', # The database table that houses the score that changed.
'score_deleted', # Boolean indicating whether the score changed due to
# the user state being deleted.
]
)
# Signal that indicates that a user's weighted score for a problem has been updated.
# This signal is generated when a scoring event occurs in the Submissions module
# or a PROBLEM_RAW_SCORE_CHANGED event is handled in the core platform.
# Note that this signal will be triggered
# regardless of the new and previous values of the score (i.e. it may be the
# case that this signal is generated when a user re-attempts a problem but
# receives the same score).
PROBLEM_WEIGHTED_SCORE_CHANGED = Signal(
providing_args=[
'user_id', # Integer User ID
'anonymous_user_id', # Anonymous User ID
'course_id', # Unicode string representing the course
'usage_id', # Unicode string indicating the courseware instance
'weighted_earned', # Score obtained by the user
'weighted_possible', # Maximum score available for the exercise
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
'modified', # A datetime indicating when the database representation of
# this the problem score was saved.
'score_db_table', # The database table that houses the score that changed.
'score_deleted', # Boolean indicating whether the score changed due to
# the user state being deleted.
]
)
# Signal that indicates that a user's score for a problem has been published
# for possible persistence and update. Typically, most clients should listen
# to the PROBLEM_WEIGHTED_SCORE_CHANGED signal instead, since that is signalled
# only after the problem's score is changed.
SCORE_PUBLISHED = Signal(
providing_args=[
'block', # Course block object
'user', # User object
'raw_earned', # Score obtained by the user
'raw_possible', # Maximum score available for the exercise
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
'score_db_table', # The database table that houses the score that changed.
]
)
# Signal that indicates that a user's score for a subsection has been updated.
# This is a downstream signal of PROBLEM_WEIGHTED_SCORE_CHANGED sent for each
# affected containing subsection.
SUBSECTION_SCORE_CHANGED = Signal(
providing_args=[
'course', # Course object
'course_structure', # BlockStructure object
'user', # User object
'subsection_grade', # SubsectionGrade object
]
)
# Signal that indicates that a user's score for a subsection has been overridden.
# This signal is generated when a user's exam attempt state is set to rejected or
# to verified from rejected. This signal may also be sent by any other client
# using the GradesService to override subsections in the future.
SUBSECTION_OVERRIDE_CHANGED = Signal(
providing_args=[
'user_id', # Integer User ID
'course_id', # Unicode string representing the course
'usage_id', # Unicode string indicating the courseware instance
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
'modified', # A datetime indicating when the database representation of
# this subsection override score was saved.
'score_deleted', # Boolean indicating whether the override score was
# deleted in this event.
'score_db_table', # The database table that houses the subsection override
# score that was created.
]
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""set_admin_state_up_not_null_ml2
Revision ID: d06e871c0d5
Revises: 2447ad0e9585
Create Date: 2014-03-21 17:22:20.545186
"""
# revision identifiers, used by Alembic.
revision = 'd06e871c0d5'
down_revision = '4eca4a84f08a'
# This migration will be executed only if the neutron DB schema
# contains the tables for the ML2 plugin brocade driver.
# This migration will be skipped when executed in offline mode.
import sqlalchemy as sa
from neutron.db import migration
@migration.skip_if_offline
def upgrade():
migration.alter_column_if_exists(
'ml2_brocadeports', 'admin_state_up',
nullable=False,
existing_type=sa.Boolean)
@migration.skip_if_offline
def downgrade():
migration.alter_column_if_exists(
'ml2_brocadeports', 'admin_state_up',
nullable=True,
existing_type=sa.Boolean) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function, unicode_literals, absolute_import, division
class Tableau(object):
def __init__(self):
# Map of variable to set of variables
self.columns = {}
# Map of variable to LinearExpression
self.rows = {}
# Set of Variables
self.infeasible_rows = set()
# Set of Variables
self.external_rows = set()
# Set of Variables.
self.external_parametric_vars = set()
def __repr__(self):
parts = []
parts.append('Tableau info:')
parts.append('Rows: %s (= %s constraints)' % (len(self.rows), len(self.rows) - 1))
parts.append('Columns: %s' % len(self.columns))
parts.append('Infeasible rows: %s' % len(self.infeasible_rows))
parts.append('External basic variables: %s' % len(self.external_rows))
parts.append('External parametric variables: %s' % len(self.external_parametric_vars))
return '\n'.join(parts)
def note_removed_variable(self, var, subject):
if subject:
self.columns[var].remove(subject)
def note_added_variable(self, var, subject):
if subject:
self.columns.setdefault(var, set()).add(subject)
def add_row(self, var, expr):
# print('add_row', var, expr)
self.rows[var] = expr
for clv in expr.terms:
self.columns.setdefault(clv, set()).add(var)
if clv.is_external:
self.external_parametric_vars.add(clv)
if var.is_external:
self.external_rows.add(var)
# print(self)
def remove_column(self, var):
rows = self.columns.pop(var, None)
if rows:
for clv in rows:
expr = self.rows[clv]
expr.remove_variable(var)
if var.is_external:
try:
self.external_rows.remove(var)
except KeyError:
pass
try:
self.external_parametric_vars.remove(var)
except KeyError:
pass
def remove_row(self, var):
# print("remove_row", var)
expr = self.rows.pop(var)
for clv in expr.terms.keys():
varset = self.columns[clv]
if varset:
# print("removing from varset", var)
varset.remove(var)
try:
self.infeasible_rows.remove(var)
except KeyError:
pass
if var.is_external:
try:
self.external_rows.remove(var)
except KeyError:
pass
# print("remove_row returning", expr)
return expr
def substitute_out(self, oldVar, expr):
varset = self.columns[oldVar]
for v in varset:
row = self.rows[v]
row.substitute_out(oldVar, expr, v, self)
if v.is_restricted and row.constant < 0.0:
self.infeasible_rows.add(v)
if oldVar.is_external:
self.external_rows.add(oldVar)
try:
self.external_parametric_vars.remove(oldVar)
except KeyError:
pass
del self.columns[oldVar] | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
const (
// vdsoArrayMax is the byte-size of a maximally sized array on this architecture.
// See cmd/compile/internal/arm/galign.go arch.MAXWIDTH initialization, but must also
// be constrained to max +ve int.
vdsoArrayMax = 1<<31 - 1
)
var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6}
var vdsoSymbolKeys = []vdsoSymbolKey{
{"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym},
}
// initialize to fall back to syscall
var vdsoClockgettimeSym uintptr = 0 | go | github | https://github.com/golang/go | src/runtime/vdso_linux_arm.go |
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_what_rast.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
This Python module handles output for v.what.rast.* GRASS7 modules.
"""
__author__ = 'Médéric Ribreux'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
def removeOutput(alg):
"""Remove the output fo v.what.rast"""
# We temporary remove the output 'sequence'
output = alg.getOutputFromName(u'output')
alg.removeOutputFromName(u'output')
# Launch the algorithm
alg.processCommand()
# We re-add the previous output
alg.addOutput(output)
def outputInput(alg):
"""Make output the initial point/polygon layer"""
output = alg.getOutputValue(u'output')
command = u"v.out.ogr -c type=auto -s -e input={} output=\"{}\" format=ESRI_Shapefile output_layer={}".format(
alg.exportedLayers[alg.getParameterValue(u'map')],
os.path.dirname(output),
os.path.basename(output)[:-4]
)
alg.commands.append(command)
alg.outputCommands.append(command) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from st2common.runners.base import ActionRunner
from st2tests.base import RunnerTestCase
from winrm_runner import winrm_command_runner
from winrm_runner.winrm_base import WinRmBaseRunner
class WinRmCommandRunnerTestCase(RunnerTestCase):
def setUp(self):
super(WinRmCommandRunnerTestCase, self).setUpClass()
self._runner = winrm_command_runner.get_runner()
def test_init(self):
runner = winrm_command_runner.WinRmCommandRunner("abcdef")
self.assertIsInstance(runner, WinRmBaseRunner)
self.assertIsInstance(runner, ActionRunner)
self.assertEqual(runner.runner_id, "abcdef")
@mock.patch("winrm_runner.winrm_command_runner.WinRmCommandRunner.run_cmd")
def test_run(self, mock_run_cmd):
mock_run_cmd.return_value = "expected"
self._runner.runner_parameters = {"cmd": "ipconfig /all"}
result = self._runner.run({})
self.assertEqual(result, "expected")
mock_run_cmd.assert_called_with("ipconfig /all") | unknown | codeparrot/codeparrot-clean | ||
"""Classes to deal with output of simulation data.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Holds classes to deal with the output of different properties, trajectories
and the restart files.
Classes:
PropertyOutput: Deals with outputting properties.
TrajectoryOutput: Deals with outputting trajectories.
CheckpointOutput: Deals with outputting restart files.
"""
import os
import numpy as np
import ipi.inputs.simulation
from ipi.utils.depend import *
from ipi.utils.io.io_xml import *
from ipi.engine.properties import getkey
__all__ = [ 'PropertyOutput', 'TrajectoryOutput', 'CheckpointOutput' ]
class PropertyOutput(dobject):
"""Class dealing with outputting a set of properties to file.
Does not do any calculation, just manages opening a file, getting data
from a Properties object and outputting with the desired stride.
Attributes:
filename: The name of the file to output to.
outlist: A list of the properties to be output.
stride: The number of steps that should be taken between outputting the
data to file.
flush: How often we should flush to disk.
nout: Number of steps since data was last flushed.
out: The output stream on which to output the properties.
simul: The simulation object to get the data to be output from.
"""
def __init__(self, filename="out", stride=1, flush=1, outlist=None):
"""Initializes a property output stream opening the corresponding
file name.
Also writes out headers.
Args:
filename: A string giving the name of the file to be output to.
stride: An integer giving how many steps should be taken between
outputting the data to file.
flush: Number of writes to file between flushing data.
outlist: A list of all the properties that should be output.
"""
if outlist is None:
outlist = np.zeros(0,np.dtype('|S1024'))
self.filename = filename
self.outlist = np.asarray(outlist,np.dtype('|S1024'))
self.stride = stride
self.flush = flush
self.nout = 0
self.out = None
def bind(self, simul):
"""Binds output proxy to simulation object.
Args:
simul: A simulation object to be bound.
"""
self.simul = simul
# Checks as soon as possible if some asked-for properties are
# missing or mispelled
for what in self.outlist:
key = getkey(what)
if not key in self.simul.properties.property_dict.keys():
print "Computable properties list: ", self.simul.properties.property_dict.keys()
raise KeyError(key + " is not a recognized property")
self.open_stream()
def open_stream(self):
"""Opens the output stream."""
try:
self.out = open(self.filename, "a")
except:
raise ValueError("Could not open file " + self.filename + " for output")
# print nice header if information is available on the properties
if (self.simul.step == 0) :
icol = 1
for what in self.outlist:
ohead = "# "
key = getkey(what)
prop = self.simul.properties.property_dict[key]
if "size" in prop and prop["size"] > 1:
ohead += "cols. %3d-%-3d" % ( icol, icol+prop["size"] - 1 )
icol += prop["size"]
else:
ohead += "column %3d " % ( icol )
icol += 1
ohead += " --> %s " % (what)
if "help" in prop:
ohead += ": " + prop["help"]
self.out.write(ohead + "\n")
def close_stream():
"""Closes the output stream."""
self.out.close()
def write(self):
"""Outputs the required properties of the system.
Note that properties are outputted using the same format as for the
output to the xml checkpoint files, as specified in io_xml.
Raises:
KeyError: Raised if one of the properties specified in the output list
are not contained in the property_dict member of properties.
"""
if not (self.simul.step + 1) % self.stride == 0:
return
self.out.write(" ")
for what in self.outlist:
try:
quantity = self.simul.properties[what]
except KeyError:
raise KeyError(what + " is not a recognized property")
if not hasattr(quantity,"__len__") :
self.out.write(write_type(float, quantity) + " ")
else:
for el in quantity:
self.out.write(write_type(float, el) + " ")
self.out.write("\n")
self.nout += 1
if self.flush > 0 and self.nout >= self.flush :
self.out.flush()
os.fsync(self.out) # we REALLY want to print out! pretty please OS let us do it.
self.nout = 0
class TrajectoryOutput(dobject):
"""Class dealing with outputting atom-based properties as a
trajectory file.
Does not do any calculation, just manages opening a file, getting data
from a Trajectories object and outputting with the desired stride.
Attributes:
filename: The (base) name of the file to output to.
format: The format of the trajectory file to be created.
what: The trajectory that needs to be output.
stride: The number of steps that should be taken between outputting the
data to file.
out: The output stream on which to output the trajectories.
flush: How often we should flush to disk.
nout: Number of steps since data was last flushed.
ibead: Index of the replica to print the trajectory of.
cell_units: The units that the cell parameters are given in.
simul: The simulation object to get the data to be output from.
"""
def __init__(self, filename="out", stride=1, flush=1, what="", format="xyz", cell_units="atomic_unit", ibead=-1):
""" Initializes a property output stream opening the corresponding
file name.
Also writes out headers.
Args:
filename: A string giving the name of the file to be output to.
stride: An integer giving how many steps should be taken between
outputting the data to file.
flush: How often we should flush to disk
what: A string specifying what trajectory should be output.
format: A string specifying the type of trajectory file to be created.
cell_units: A string specifying the units that the cell parameters are
given in.
ibead: If positive, prints out only the selected bead. If negative, prints out one file per bead.
"""
self.filename = filename
self.what = what
self.stride = stride
self.flush = flush
self.ibead = ibead
self.format = format
self.cell_units = cell_units
self.out = None
self.nout = 0
def bind(self, simul):
"""Binds output proxy to simulation object.
Args:
simul: A simulation object to be bound.
"""
self.simul = simul
# Checks as soon as possible if some asked-for trajs are missing or mispelled
key = getkey(self.what)
if not key in self.simul.trajs.traj_dict.keys():
print "Computable trajectories list: ", self.simul.trajs.traj_dict.keys()
raise KeyError(key + " is not a recognized output trajectory")
self.open_stream()
def open_stream(self):
"""Opens the output stream(s)."""
if getkey(self.what) in [ "positions", "velocities", "forces", "extras" ]:
# must write out trajectories for each bead, so must create b streams
self.out = []
for b in range(self.simul.beads.nbeads):
# zero-padded bead number
padb = ( ("%0" + str(int(1 + np.floor(np.log(self.simul.beads.nbeads)/np.log(10)))) + "d") % (b) )
try:
if (self.ibead < 0 or self.ibead == b):
if getkey(self.what) == "extras":
self.out.append(open(self.filename + "_" + padb, "a"))
else:
self.out.append(open(self.filename + "_" + padb + "." + self.format, "a"))
else:
self.out.append(None) # creates null outputs if a
# single bead output is chosen
except:
raise ValueError("Could not open file " + self.filename + "_" + padb + "." + self.format + " for output")
else:
try:
self.out = ( open(self.filename + "." + self.format, "a") )
except:
raise ValueError("Could not open file " + self.filename + "." + self.format + " for output")
def close_stream():
"""Closes the output stream."""
if hasattr(self.out, "__getitem__"):
for o in self.out:
o.close()
else:
self.out.close()
def write(self):
"""Writes out the required trajectories."""
if not (self.simul.step + 1) % self.stride == 0:
return
doflush = False
self.nout += 1
if self.flush > 0 and self.nout >= self.flush :
doflush = True
self.nout = 0
# quick-and-dirty way to check if a trajectory is "global" or per-bead
# Checks to see if there is a list of files or just a single file.
if hasattr(self.out, "__getitem__"):
if self.ibead < 0:
for b in range(len(self.out)):
self.simul.trajs.print_traj(self.what, self.out[b], b, format=self.format, cell_units=self.cell_units, flush=doflush)
elif self.ibead < len(self.out):
self.simul.trajs.print_traj(self.what, self.out[self.ibead], self.ibead, format=self.format, cell_units=self.cell_units, flush=doflush)
else:
raise ValueError("Selected bead index " + str(self.ibead) + " does not exist for trajectory " + self.what)
else:
self.simul.trajs.print_traj(self.what, self.out, b=0, format=self.format, cell_units=self.cell_units, flush=doflush)
class CheckpointOutput(dobject):
"""Class dealing with outputting checkpoints.
Saves the complete status of the simulation at regular intervals.
Attributes:
filename: The (base) name of the file to output to.
step: the number of times a checkpoint has been written out.
stride: The number of steps that should be taken between outputting the
data to file.
overwrite: If True, the checkpoint file is overwritten at each output.
If False, will output to 'filename_step'. Note that no check is done
on whether 'filename_step' exists already.
simul: The simulation object to get the data to be output from.
status: An input simulation object used to write out the checkpoint file.
"""
def __init__(self, filename="restart", stride=1000, overwrite=True, step=0):
"""Initializes a checkpoint output proxy.
Args:
filename: A string giving the name of the file to be output to.
stride: An integer giving how many steps should be taken between
outputting the data to file.
overwrite: If True, the checkpoint file is overwritten at each output.
If False, will output to 'filename_step'. Note that no check is done
on whether 'filename_step' exists already.
step: The number of checkpoint files that have been created so far.
"""
self.filename = filename
self.step = step
self.stride = stride
self.overwrite = overwrite
def bind(self, simul):
"""Binds output proxy to simulation object.
Args:
simul: A simulation object to be bound.
"""
self.simul = simul
self.status = ipi.inputs.simulation.InputSimulation()
self.status.store(simul)
def store(self):
"""Stores the current simulation status.
Used so that, if halfway through a step a kill signal is received,
we can output a checkpoint file corresponding to the beginning of the
current step, which is the last time that both the velocities and
positions would have been consistent.
"""
self.status.store(self.simul)
def write(self, store=True):
"""Writes out the required trajectories.
Used for both the checkpoint files and the soft-exit restart file.
We have slightly different behaviour for these two different types of
checkpoint file, as the soft-exit files have their store() function
called automatically, and we do not want this to be updated as the
status of the simulation after a soft-exit call is unlikely to be in
a consistent state. On the other hand, the standard checkpoint files
are not automatically updated in this way, and we must manually store the
current state of the system before writing them.
Args:
store: A boolean saying whether the state of the system should be
stored before writing the checkpoint file.
"""
if not (self.simul.step + 1) % self.stride == 0:
return
if self.overwrite:
filename = self.filename
else:
filename = self.filename + "_" + str(self.step)
if store:
self.step += 1 # advances the step counter before saving, so next time the correct index will be loaded.
self.store()
check_file = open(filename, "w")
check_file.write(self.status.write(name="simulation"))
check_file.close() | unknown | codeparrot/codeparrot-clean | ||
# $Id: __init__.py 7621 2013-03-04 13:20:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This package contains directive implementation modules.
"""
__docformat__ = 'reStructuredText'
import codecs
import re
import sys
from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module
if sys.version_info < (2,5):
from docutils._compat import __import__
_directive_registry = {
'attention': ('admonitions', 'Attention'),
'caution': ('admonitions', 'Caution'),
'code': ('body', 'CodeBlock'),
'danger': ('admonitions', 'Danger'),
'error': ('admonitions', 'Error'),
'important': ('admonitions', 'Important'),
'note': ('admonitions', 'Note'),
'tip': ('admonitions', 'Tip'),
'hint': ('admonitions', 'Hint'),
'warning': ('admonitions', 'Warning'),
'admonition': ('admonitions', 'Admonition'),
'sidebar': ('body', 'Sidebar'),
'topic': ('body', 'Topic'),
'line-block': ('body', 'LineBlock'),
'parsed-literal': ('body', 'ParsedLiteral'),
'math': ('body', 'MathBlock'),
'rubric': ('body', 'Rubric'),
'epigraph': ('body', 'Epigraph'),
'highlights': ('body', 'Highlights'),
'pull-quote': ('body', 'PullQuote'),
'compound': ('body', 'Compound'),
'container': ('body', 'Container'),
#'questions': ('body', 'question_list'),
'table': ('tables', 'RSTTable'),
'csv-table': ('tables', 'CSVTable'),
'list-table': ('tables', 'ListTable'),
'image': ('images', 'Image'),
'figure': ('images', 'Figure'),
'contents': ('parts', 'Contents'),
'sectnum': ('parts', 'Sectnum'),
'header': ('parts', 'Header'),
'footer': ('parts', 'Footer'),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes': ('references', 'TargetNotes'),
'meta': ('html', 'Meta'),
#'imagemap': ('html', 'imagemap'),
'raw': ('misc', 'Raw'),
'include': ('misc', 'Include'),
'replace': ('misc', 'Replace'),
'unicode': ('misc', 'Unicode'),
'class': ('misc', 'Class'),
'role': ('misc', 'Role'),
'default-role': ('misc', 'DefaultRole'),
'title': ('misc', 'Title'),
'date': ('misc', 'Date'),
'restructuredtext-test-directive': ('misc', 'TestDirective'),}
"""Mapping of directive name to (module name, class name). The
directive name is canonical & must be lowercase. Language-dependent
names are defined in the ``language`` subpackage."""
_directives = {}
"""Cache of imported directives."""
def directive(directive_name, language_module, document):
"""
Locate and return a directive function from its language-dependent name.
If not found in the current language, check English. Return None if the
named directive cannot be found.
"""
normname = directive_name.lower()
messages = []
msg_text = []
if normname in _directives:
return _directives[normname], messages
canonicalname = None
try:
canonicalname = language_module.directives[normname]
except AttributeError as error:
msg_text.append('Problem retrieving directive entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No directive entry for "%s" in module "%s".'
% (directive_name, language_module.__name__))
if not canonicalname:
try:
canonicalname = _fallback_language_module.directives[normname]
msg_text.append('Using English fallback for directive "%s".'
% directive_name)
except KeyError:
msg_text.append('Trying "%s" as canonical directive name.'
% directive_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
if msg_text:
message = document.reporter.info(
'\n'.join(msg_text), line=document.current_line)
messages.append(message)
try:
modulename, classname = _directive_registry[canonicalname]
except KeyError:
# Error handling done by caller.
return None, messages
try:
module = __import__(modulename, globals(), locals(), level=1)
except ImportError as detail:
messages.append(document.reporter.error(
'Error importing directive module "%s" (directive "%s"):\n%s'
% (modulename, directive_name, detail),
line=document.current_line))
return None, messages
try:
directive = getattr(module, classname)
_directives[normname] = directive
except AttributeError:
messages.append(document.reporter.error(
'No directive class "%s" in module "%s" (directive "%s").'
% (classname, modulename, directive_name),
line=document.current_line))
return None, messages
return directive, messages
def register_directive(name, directive):
"""
Register a nonstandard application-defined directive function.
Language lookups are not needed for such functions.
"""
_directives[name] = directive
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
if argument and argument.strip():
raise ValueError('no argument is allowed; "%s" supplied' % argument)
else:
return None
def unchanged_required(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
return argument # unchanged!
def unchanged(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
No argument implies empty string ("").
"""
if argument is None:
return ''
else:
return argument # unchanged!
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
path = ''.join([s.strip() for s in argument.splitlines()])
return path
def uri(argument):
"""
Return the URI argument with whitespace removed.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
uri = ''.join(argument.split())
return uri
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def percentage(argument):
"""
Check for an integer percentage value with optional percent sign.
"""
try:
argument = argument.rstrip(' %')
except AttributeError:
pass
return nonnegative_int(argument)
length_units = ['em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc']
def get_measure(argument, units):
"""
Check for a positive argument of one of the units and return a
normalized string of the form "<value><unit>" (without space in
between).
To be called from directive option conversion functions.
"""
match = re.match(r'^([0-9.]+) *(%s)$' % '|'.join(units), argument)
try:
float(match.group(1))
except (AttributeError, ValueError):
raise ValueError(
'not a positive measure of one of the following units:\n%s'
% ' '.join(['"%s"' % i for i in units]))
return match.group(1) + match.group(2)
def length_or_unitless(argument):
return get_measure(argument, length_units + [''])
def length_or_percentage_or_unitless(argument, default=''):
"""
Return normalized string of a length or percentage unit.
Add <default> if there is no unit. Raise ValueError if the argument is not
a positive measure of one of the valid CSS units (or without unit).
>>> length_or_percentage_or_unitless('3 pt')
'3pt'
>>> length_or_percentage_or_unitless('3%', 'em')
'3%'
>>> length_or_percentage_or_unitless('3')
'3'
>>> length_or_percentage_or_unitless('3', 'px')
'3px'
"""
try:
return get_measure(argument, length_units + ['%'])
except ValueError:
try:
return get_measure(argument, ['']) + default
except ValueError:
# raise ValueError with list of valid units:
return get_measure(argument, length_units + ['%'])
def class_option(argument):
"""
Convert the argument into a list of ID-compatible strings and return it.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
names = argument.split()
class_names = []
for name in names:
class_name = nodes.make_id(name)
if not class_name:
raise ValueError('cannot make "%s" into a class name' % name)
class_names.append(class_name)
return class_names
unicode_pattern = re.compile(
r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
def unicode_code(code):
r"""
Convert a Unicode character code to a Unicode character.
(Directive option conversion function.)
Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``,
``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style
numeric character entities (e.g. ``☮``). Other text remains as-is.
Raise ValueError for illegal Unicode code values.
"""
try:
if code.isdigit(): # decimal number
return chr(int(code))
else:
match = unicode_pattern.match(code)
if match: # hex number
value = match.group(1) or match.group(2)
return chr(int(value, 16))
else: # other text
return code
except OverflowError as detail:
raise ValueError('code too large (%s)' % detail)
def single_char_or_unicode(argument):
"""
A single character is returned as-is. Unicode characters codes are
converted as in `unicode_code`. (Directive option conversion function.)
"""
char = unicode_code(argument)
if len(char) > 1:
raise ValueError('%r invalid; must be a single character or '
'a Unicode code' % char)
return char
def single_char_or_whitespace_or_unicode(argument):
"""
As with `single_char_or_unicode`, but "tab" and "space" are also supported.
(Directive option conversion function.)
"""
if argument == 'tab':
char = '\t'
elif argument == 'space':
char = ' '
else:
char = single_char_or_unicode(argument)
return char
def positive_int(argument):
"""
Converts the argument into an integer. Raises ValueError for negative,
zero, or non-integer values. (Directive option conversion function.)
"""
value = int(argument)
if value < 1:
raise ValueError('negative or zero value; must be positive')
return value
def positive_int_list(argument):
"""
Converts a space- or comma-separated list of values into a Python list
of integers.
(Directive option conversion function.)
Raises ValueError for non-positive-integer values.
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return [positive_int(entry) for entry in entries]
def encoding(argument):
"""
Verfies the encoding argument by lookup.
(Directive option conversion function.)
Raises ValueError for unknown encodings.
"""
try:
codecs.lookup(argument)
except LookupError:
raise ValueError('unknown encoding: "%s"' % argument)
return argument
def choice(argument, values):
"""
Directive option utility function, supplied to enable options whose
argument must be a member of a finite set of possible values (must be
lower case). A custom conversion function must be written to use it. For
example::
from docutils.parsers.rst import directives
def yesno(argument):
return directives.choice(argument, ('yes', 'no'))
Raise ``ValueError`` if no argument is found or if the argument's value is
not valid (not an entry in the supplied list).
"""
try:
value = argument.lower().strip()
except AttributeError:
raise ValueError('must supply an argument; choose from %s'
% format_values(values))
if value in values:
return value
else:
raise ValueError('"%s" unknown; choose from %s'
% (argument, format_values(values)))
def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running VyOS
description:
- Collects a base set of device facts from a remote device that
is running VyOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, default, config, and neighbors. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
- name: collect all facts from the device
vyos_facts:
gather_subset: all
- name: collect only the config and default facts
vyos_facts:
gather_subset: config
- name: collect everything exception the config
vyos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.vyos.vyos import run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
COMMANDS = [
'show version',
'show host name',
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.responses[1]
def parse_version(self, data):
match = re.search(r'Version:\s*(.*)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*(\S+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = [
'show configuration commands',
'show system commit',
]
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses
commits = self.responses[1]
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
else:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
COMMANDS = [
'show lldp neighbors',
'show lldp neighbors detail',
]
def populate(self):
super(Neighbors, self).populate()
all_neighbors = self.responses[0]
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.responses[1]
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if not line:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
if values:
parsed.append(values)
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/topic"
require "models/reply"
require "models/warehouse_thing"
require "models/guid"
require "models/event"
require "models/dashboard"
require "models/uuid_item"
require "models/author"
require "models/person"
require "models/essay"
require "models/keyboard"
require "models/cpk"
class Wizard < ActiveRecord::Base
self.abstract_class = true
validates_uniqueness_of :name
end
class IneptWizard < Wizard
validates_uniqueness_of :city
end
class Conjurer < IneptWizard
end
class Thaumaturgist < IneptWizard
end
class ReplyTitle; end
class ReplyWithTitleObject < Reply
validates_uniqueness_of :content, scope: :title
def title; ReplyTitle.new; end
alias heading title
end
class TopicWithEvent < Topic
belongs_to :event, foreign_key: :parent_id
end
class TopicWithUniqEvent < Topic
belongs_to :event, foreign_key: :parent_id
validates :event, uniqueness: true
end
class BigIntTest < ActiveRecord::Base
INT_MAX_VALUE = 2147483647
self.table_name = "cars"
validates :engines_count, uniqueness: true, inclusion: { in: 0..INT_MAX_VALUE }
end
class BigIntReverseTest < ActiveRecord::Base
INT_MAX_VALUE = 2147483647
self.table_name = "cars"
validates :engines_count, inclusion: { in: 0..INT_MAX_VALUE }
validates :engines_count, uniqueness: true
end
class CoolTopic < Topic
validates_uniqueness_of :id
end
class TopicWithAfterCreate < Topic
after_create :set_author
def set_author
update!(author_name: "#{title} #{id}")
end
end
class LessonWithUniqKeyboard < ActiveRecord::Base
self.table_name = "lessons"
belongs_to :keyboard, primary_key: :name, foreign_key: :name
validates_uniqueness_of :keyboard
end
class UniquenessValidationTest < ActiveRecord::TestCase
INT_MAX_VALUE = 2147483647
fixtures :topics, "warehouse-things"
repair_validations(Topic, Reply)
def test_validate_uniqueness
Topic.validates_uniqueness_of(:title)
t = Topic.new("title" => "I'm uniqué!")
assert t.save, "Should save t as unique"
t.content = "Remaining unique"
assert t.save, "Should still save t as unique"
t2 = Topic.new("title" => "I'm uniqué!")
assert_not t2.valid?, "Shouldn't be valid"
assert_not t2.save, "Shouldn't save t2 as unique"
assert_equal ["has already been taken"], t2.errors[:title]
t2.title = "Now I am really also unique"
assert t2.save, "Should now save t2 as unique"
end
def test_validate_uniqueness_with_singleton_class
Topic.create!(title: "abc")
t2 = Topic.new(title: "abc")
t2.singleton_class.validates(:title, uniqueness: true)
assert_not_predicate t2, :valid?
t3 = Topic.new(title: "abc")
assert_predicate t3, :valid?
end
def test_validate_uniqueness_with_alias_attribute
Topic.alias_attribute :new_title, :title
Topic.validates_uniqueness_of(:new_title)
topic = Topic.new(new_title: "abc")
assert_predicate topic, :valid?
end
def test_validates_uniqueness_with_nil_value
Topic.validates_uniqueness_of(:title)
t = Topic.new("title" => nil)
assert t.save, "Should save t as unique"
t2 = Topic.new("title" => nil)
assert_not t2.valid?, "Shouldn't be valid"
assert_not t2.save, "Shouldn't save t2 as unique"
assert_equal ["has already been taken"], t2.errors[:title]
end
def test_validates_uniqueness_with_validates
Topic.validates :title, uniqueness: true
Topic.create!("title" => "abc")
t2 = Topic.new("title" => "abc")
assert_not_predicate t2, :valid?
assert t2.errors[:title]
end
def test_validate_uniqueness_when_integer_out_of_range
entry = BigIntTest.create(engines_count: INT_MAX_VALUE + 1)
assert_equal ["is not included in the list"], entry.errors[:engines_count]
end
def test_validate_uniqueness_when_integer_out_of_range_show_order_does_not_matter
entry = BigIntReverseTest.create(engines_count: INT_MAX_VALUE + 1)
assert_equal ["is not included in the list"], entry.errors[:engines_count]
end
def test_validates_uniqueness_with_newline_chars
Topic.validates_uniqueness_of(:title, case_sensitive: false)
t = Topic.new("title" => "new\nline")
assert t.save, "Should save t as unique"
end
def test_validate_uniqueness_with_scope
Reply.validates_uniqueness_of(:content, scope: "parent_id")
t = Topic.create("title" => "I'm unique!")
r1 = t.replies.create "title" => "r1", "content" => "hello world"
assert_predicate r1, :valid?, "Saving r1"
r2 = t.replies.create "title" => "r2", "content" => "hello world"
assert_not r2.valid?, "Saving r2 first time"
r2.content = "something else"
assert r2.save, "Saving r2 second time"
t2 = Topic.create("title" => "I'm unique too!")
r3 = t2.replies.create "title" => "r3", "content" => "hello world"
assert_predicate r3, :valid?, "Saving r3"
end
def test_validate_uniqueness_with_aliases
Reply.validates_uniqueness_of(:new_content, scope: :new_parent_id)
t = Topic.create(title: "I'm unique!")
r1 = t.replies.create(title: "r1", content: "hello world")
assert_predicate r1, :valid?, "Saving r1"
r2 = t.replies.create(title: "r2", content: "hello world")
assert_not_predicate r2, :valid?, "Saving r2 first time"
r2.content = "something else"
assert r2.save, "Saving r2 second time"
t2 = Topic.create("title" => "I'm unique too!")
r3 = t2.replies.create(title: "r3", content: "hello world")
assert_predicate r3, :valid?, "Saving r3"
end
def test_validate_uniqueness_with_scope_invalid_syntax
error = assert_raises(ArgumentError) do
Reply.validates_uniqueness_of(:content, scope: { parent_id: false })
end
assert_match(/Pass a symbol or an array of symbols instead/, error.to_s)
end
def test_validate_uniqueness_with_object_scope
Reply.validates_uniqueness_of(:content, scope: :topic)
t = Topic.create("title" => "I'm unique!")
r1 = t.replies.create "title" => "r1", "content" => "hello world"
assert_predicate r1, :valid?, "Saving r1"
r2 = t.replies.create "title" => "r2", "content" => "hello world"
assert_not r2.valid?, "Saving r2 first time"
end
def test_validate_uniqueness_with_polymorphic_object_scope
repair_validations(Essay) do
Essay.validates_uniqueness_of(:name, scope: :writer)
a = Author.create(name: "Sergey")
p = Person.create(first_name: "Sergey")
e1 = a.essays.create(name: "Essay")
assert_predicate e1, :valid?, "Saving e1"
e2 = p.essays.create(name: "Essay")
assert_predicate e2, :valid?, "Saving e2"
end
end
def test_validate_uniqueness_with_composed_attribute_scope
r1 = ReplyWithTitleObject.create "title" => "r1", "content" => "hello world"
assert_predicate r1, :valid?, "Saving r1"
r2 = ReplyWithTitleObject.create "title" => "r1", "content" => "hello world"
assert_not r2.valid?, "Saving r2 first time"
end
def test_validate_uniqueness_with_object_arg
Reply.validates_uniqueness_of(:topic)
t = Topic.create("title" => "I'm unique!")
r1 = t.replies.create "title" => "r1", "content" => "hello world"
assert_predicate r1, :valid?, "Saving r1"
r2 = t.replies.create "title" => "r2", "content" => "hello world"
assert_not r2.valid?, "Saving r2 first time"
end
def test_validate_uniqueness_scoped_to_defining_class
t = Topic.create("title" => "What, me worry?")
r1 = t.unique_replies.create "title" => "r1", "content" => "a barrel of fun"
assert_predicate r1, :valid?, "Saving r1"
r2 = t.silly_unique_replies.create "title" => "r2", "content" => "a barrel of fun"
assert_not r2.valid?, "Saving r2"
# Should succeed as validates_uniqueness_of only applies to
# UniqueReply and its subclasses
r3 = t.replies.create "title" => "r2", "content" => "a barrel of fun"
assert_predicate r3, :valid?, "Saving r3"
end
def test_validate_uniqueness_with_scope_array
Reply.validates_uniqueness_of(:author_name, scope: [:author_email_address, :parent_id])
t = Topic.create("title" => "The earth is actually flat!")
r1 = t.replies.create "author_name" => "jeremy", "author_email_address" => "jeremy@rubyonrails.com", "title" => "You're joking!", "content" => "Silly reply"
assert_predicate r1, :valid?, "Saving r1"
r2 = t.replies.create "author_name" => "jeremy", "author_email_address" => "jeremy@rubyonrails.com", "title" => "You're joking!", "content" => "Silly reply again..."
assert_not r2.valid?, "Saving r2. Double reply by same author."
r2.author_email_address = "jeremy_alt_email@rubyonrails.com"
assert r2.save, "Saving r2 the second time."
r3 = t.replies.create "author_name" => "jeremy", "author_email_address" => "jeremy_alt_email@rubyonrails.com", "title" => "You're wrong", "content" => "It's cubic"
assert_not r3.valid?, "Saving r3"
r3.author_name = "jj"
assert r3.save, "Saving r3 the second time."
r3.author_name = "jeremy"
assert_not r3.save, "Saving r3 the third time."
end
def test_validate_case_insensitive_uniqueness
Topic.validates_uniqueness_of(:title, :parent_id, case_sensitive: false, allow_nil: true)
t = Topic.new("title" => "I'm unique!", :parent_id => 2)
assert t.save, "Should save t as unique"
t.content = "Remaining unique"
assert t.save, "Should still save t as unique"
t2 = Topic.new("title" => "I'm UNIQUE!", :parent_id => 1)
assert_not t2.valid?, "Shouldn't be valid"
assert_not t2.save, "Shouldn't save t2 as unique"
assert_predicate t2.errors[:title], :any?
assert_predicate t2.errors[:parent_id], :any?
assert_equal ["has already been taken"], t2.errors[:title]
t2.title = "I'm truly UNIQUE!"
assert_not t2.valid?, "Shouldn't be valid"
assert_not t2.save, "Shouldn't save t2 as unique"
assert_empty t2.errors[:title]
assert_predicate t2.errors[:parent_id], :any?
t2.parent_id = 4
assert t2.save, "Should now save t2 as unique"
t2.parent_id = nil
t2.title = nil
assert_predicate t2, :valid?, "should validate with nil"
assert t2.save, "should save with nil"
t_utf8 = Topic.new("title" => "Я тоже уникальный!")
assert t_utf8.save, "Should save t_utf8 as unique"
# If database hasn't UTF-8 character set, this test fails
if Topic.all.merge!(select: "LOWER(title) AS title").find(t_utf8.id).title == "я тоже уникальный!"
t2_utf8 = Topic.new("title" => "я тоже УНИКАЛЬНЫЙ!")
assert_not t2_utf8.valid?, "Shouldn't be valid"
assert_not t2_utf8.save, "Shouldn't save t2_utf8 as unique"
end
end
def test_validate_case_sensitive_uniqueness_with_special_sql_like_chars
Topic.validates_uniqueness_of(:title, case_sensitive: true)
t = Topic.new("title" => "I'm unique!")
assert t.save, "Should save t as unique"
t2 = Topic.new("title" => "I'm %")
assert t2.save, "Should save t2 as unique"
t3 = Topic.new("title" => "I'm uniqu_!")
assert t3.save, "Should save t3 as unique"
end
def test_validate_case_insensitive_uniqueness_with_special_sql_like_chars
Topic.validates_uniqueness_of(:title, case_sensitive: false)
t = Topic.new("title" => "I'm unique!")
assert t.save, "Should save t as unique"
t2 = Topic.new("title" => "I'm %")
assert t2.save, "Should save t2 as unique"
t3 = Topic.new("title" => "I'm uniqu_!")
assert t3.save, "Should save t3 as unique"
end
def test_validate_uniqueness_by_default_database_collation
Topic.validates_uniqueness_of(:author_email_address)
topic1 = Topic.new(author_email_address: "david@loudthinking.com")
topic2 = Topic.new(author_email_address: "David@loudthinking.com")
assert_equal 1, Topic.where(author_email_address: "david@loudthinking.com").count
assert_not topic1.valid?
assert_not topic1.save
if current_adapter?(:Mysql2Adapter, :TrilogyAdapter)
# Case insensitive collation (utf8mb4_0900_ai_ci) by default.
# Should not allow "David" if "david" exists.
assert_not topic2.valid?
assert_not topic2.save
else
assert_predicate topic2, :valid?
assert topic2.save
end
assert_equal 1, Topic.where(author_email_address: "david@loudthinking.com").count
assert_equal 1, Topic.where(author_email_address: "David@loudthinking.com").count
end
def test_validate_case_sensitive_uniqueness
Topic.validates_uniqueness_of(:title, case_sensitive: true, allow_nil: true)
t = Topic.new("title" => "I'm unique!")
assert t.save, "Should save t as unique"
t.content = "Remaining unique"
assert t.save, "Should still save t as unique"
t2 = Topic.new("title" => "I'M UNIQUE!")
assert_predicate t2, :valid?, "Should be valid"
assert t2.save, "Should save t2 as unique"
assert_empty t2.errors[:title]
assert_empty t2.errors[:parent_id]
assert_not_equal ["has already been taken"], t2.errors[:title]
t3 = Topic.new("title" => "I'M uNiQUe!")
assert_predicate t3, :valid?, "Should be valid"
assert t3.save, "Should save t2 as unique"
assert_empty t3.errors[:title]
assert_empty t3.errors[:parent_id]
assert_not_equal ["has already been taken"], t3.errors[:title]
end
def test_validate_case_sensitive_uniqueness_with_attribute_passed_as_integer
Topic.validates_uniqueness_of(:title, case_sensitive: true)
Topic.create!("title" => 101)
t2 = Topic.new("title" => 101)
assert_not_predicate t2, :valid?
assert t2.errors[:title]
end
def test_validate_uniqueness_with_non_standard_table_names
i1 = WarehouseThing.create(value: 1000)
assert_not i1.valid?, "i1 should not be valid"
assert_predicate i1.errors[:value], :any?, "Should not be empty"
end
def test_validates_uniqueness_inside_scoping
Topic.validates_uniqueness_of(:title)
Topic.where(author_name: "David").scoping do
t1 = Topic.new("title" => "I'm unique!", "author_name" => "Mary")
assert t1.save
t2 = Topic.new("title" => "I'm unique!", "author_name" => "David")
assert_not_predicate t2, :valid?
end
end
def test_validate_uniqueness_with_columns_which_are_sql_keywords
repair_validations(Guid) do
Guid.validates_uniqueness_of :key
g = Guid.new
g.key = "foo"
assert_nothing_raised { !g.valid? }
end
end
def test_validate_uniqueness_with_limit
if current_adapter?(:SQLite3Adapter)
# Event.title has limit 5, but SQLite doesn't truncate.
e1 = Event.create(title: "abcdefgh")
assert_predicate e1, :valid?, "Could not create an event with a unique 8 characters title"
e2 = Event.create(title: "abcdefgh")
assert_not e2.valid?, "Created an event whose title is not unique"
elsif current_adapter?(:Mysql2Adapter, :TrilogyAdapter, :PostgreSQLAdapter)
assert_raise(ActiveRecord::ValueTooLong) do
Event.create(title: "abcdefgh")
end
else
assert_raise(ActiveRecord::StatementInvalid) do
Event.create(title: "abcdefgh")
end
end
end
def test_validate_uniqueness_with_limit_and_utf8
if current_adapter?(:SQLite3Adapter)
# Event.title has limit 5, but SQLite doesn't truncate.
e1 = Event.create(title: "一二三四五六七八")
assert_predicate e1, :valid?, "Could not create an event with a unique 8 characters title"
e2 = Event.create(title: "一二三四五六七八")
assert_not e2.valid?, "Created an event whose title is not unique"
elsif current_adapter?(:Mysql2Adapter, :TrilogyAdapter, :PostgreSQLAdapter)
assert_raise(ActiveRecord::ValueTooLong) do
Event.create(title: "一二三四五六七八")
end
else
assert_raise(ActiveRecord::StatementInvalid) do
Event.create(title: "一二三四五六七八")
end
end
end
def test_validate_straight_inheritance_uniqueness
w1 = IneptWizard.create(name: "Rincewind", city: "Ankh-Morpork")
assert_predicate w1, :valid?, "Saving w1"
# Should use validation from base class (which is abstract)
w2 = IneptWizard.new(name: "Rincewind", city: "Quirm")
assert_not w2.valid?, "w2 shouldn't be valid"
assert_predicate w2.errors[:name], :any?, "Should have errors for name"
assert_equal ["has already been taken"], w2.errors[:name], "Should have uniqueness message for name"
w3 = Conjurer.new(name: "Rincewind", city: "Quirm")
assert_not w3.valid?, "w3 shouldn't be valid"
assert_predicate w3.errors[:name], :any?, "Should have errors for name"
assert_equal ["has already been taken"], w3.errors[:name], "Should have uniqueness message for name"
w4 = Conjurer.create(name: "The Amazing Bonko", city: "Quirm")
assert_predicate w4, :valid?, "Saving w4"
w5 = Thaumaturgist.new(name: "The Amazing Bonko", city: "Lancre")
assert_not w5.valid?, "w5 shouldn't be valid"
assert_predicate w5.errors[:name], :any?, "Should have errors for name"
assert_equal ["has already been taken"], w5.errors[:name], "Should have uniqueness message for name"
w6 = Thaumaturgist.new(name: "Mustrum Ridcully", city: "Quirm")
assert_not w6.valid?, "w6 shouldn't be valid"
assert_predicate w6.errors[:city], :any?, "Should have errors for city"
assert_equal ["has already been taken"], w6.errors[:city], "Should have uniqueness message for city"
end
def test_validate_uniqueness_with_conditions
Topic.validates_uniqueness_of :title, conditions: -> { where(approved: true) }
Topic.create("title" => "I'm a topic", "approved" => true)
Topic.create("title" => "I'm an unapproved topic", "approved" => false)
t3 = Topic.new("title" => "I'm a topic", "approved" => true)
assert_not t3.valid?, "t3 shouldn't be valid"
t4 = Topic.new("title" => "I'm an unapproved topic", "approved" => false)
assert_predicate t4, :valid?, "t4 should be valid"
end
def test_validate_uniqueness_with_non_callable_conditions_is_not_supported
assert_raises(ArgumentError) {
Topic.validates_uniqueness_of :title, conditions: Topic.where(approved: true)
}
end
def test_validate_uniqueness_with_conditions_with_record_arg
Topic.validates_uniqueness_of :title, conditions: ->(record) {
where(written_on: record.written_on.beginning_of_day..record.written_on.end_of_day)
}
today_midday = Time.current.midday
todays_topic = Topic.new(title: "Highlights of the Day", written_on: today_midday)
assert todays_topic.save, "1st topic written today with this title should save"
todays_topic_duplicate = Topic.new(title: "Highlights of the Day", written_on: today_midday + 1.minute)
assert_predicate todays_topic_duplicate, :invalid?, "2nd topic written today with this title should be invalid"
tomorrows_topic = Topic.new(title: "Highlights of the Day", written_on: today_midday + 1.day)
assert_predicate tomorrows_topic, :valid?, "1st topic written tomorrow with this title should be valid"
end
def test_validate_uniqueness_on_existing_relation
event = Event.create
assert_predicate TopicWithUniqEvent.create(event: event), :valid?
topic = TopicWithUniqEvent.new(event: event)
assert_not_predicate topic, :valid?
assert_equal ["has already been taken"], topic.errors[:event]
end
def test_validate_uniqueness_on_empty_relation
topic = TopicWithUniqEvent.new
assert_predicate topic, :valid?
end
def test_validate_uniqueness_of_custom_primary_key
klass = Class.new(ActiveRecord::Base) do
self.table_name = "keyboards"
self.primary_key = :key_number
validates_uniqueness_of :key_number
def self.name
"Keyboard"
end
end
klass.create!(key_number: 10)
key2 = klass.create!(key_number: 11)
key2.key_number = 10
assert_not_predicate key2, :valid?
end
def test_validate_uniqueness_without_primary_key
klass = Class.new(ActiveRecord::Base) do
self.table_name = "dashboards"
validates_uniqueness_of :dashboard_id
def self.name; "Dashboard" end
end
abc = klass.create!(dashboard_id: "abc")
assert_predicate klass.new(dashboard_id: "xyz"), :valid?
assert_not_predicate klass.new(dashboard_id: "abc"), :valid?
abc.dashboard_id = "def"
e = assert_raises ActiveRecord::UnknownPrimaryKey do
abc.save!
end
assert_match(/\AUnknown primary key for table dashboards in model/, e.message)
assert_match(/Cannot validate uniqueness for persisted record without primary key.\z/, e.message)
end
def test_validate_uniqueness_ignores_itself_when_primary_key_changed
Topic.validates_uniqueness_of(:title)
t = Topic.new("title" => "This is a unique title")
assert t.save, "Should save t as unique"
t.id += 1
assert_predicate t, :valid?, "Should be valid"
assert t.save, "Should still save t as unique"
end
def test_validate_uniqueness_with_after_create_performing_save
TopicWithAfterCreate.validates_uniqueness_of(:title)
topic = TopicWithAfterCreate.create!(title: "Title1")
assert topic.author_name.start_with?("Title1")
topic2 = TopicWithAfterCreate.new(title: "Title1")
assert_not_predicate topic2, :valid?
assert_equal(["has already been taken"], topic2.errors[:title])
end
def test_validate_uniqueness_uuid
skip unless current_adapter?(:PostgreSQLAdapter)
item = UuidItem.create!(uuid: SecureRandom.uuid, title: "item1")
item.update(title: "item1-title2")
assert_empty item.errors
item2 = UuidValidatingItem.create!(uuid: SecureRandom.uuid, title: "item2")
item2.update(title: "item2-title2")
assert_empty item2.errors
end
def test_validate_uniqueness_regular_id
item = CoolTopic.create!(title: "MyItem")
assert_empty item.errors
item2 = CoolTopic.new(id: item.id, title: "MyItem2")
assert_not_predicate item2, :valid?
assert_equal(["has already been taken"], item2.errors[:id])
end
end
class UniquenessValidationWithIndexTest < ActiveRecord::TestCase
self.use_transactional_tests = false
def setup
@connection = Topic.lease_connection
@connection.schema_cache.clear!
Topic.delete_all
Event.delete_all
end
def teardown
Topic.clear_validators!
@connection.remove_index(:topics, name: :topics_index, if_exists: true)
end
def test_new_record
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.new(title: "abc")
assert_queries_count(1) do
t.valid?
end
end
def test_changing_non_unique_attribute
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.create!(title: "abc")
t.author_name = "John"
assert_no_queries do
t.valid?
end
end
def test_changing_unique_attribute
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.create!(title: "abc")
t.title = "abc v2"
assert_queries_count(1) do
t.valid?
end
end
def test_changing_non_unique_attribute_and_unique_attribute_is_nil
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.create!
assert_nil t.title
t.author_name = "John"
assert_queries_count(1) do
t.valid?
end
end
def test_conditions
Topic.validates_uniqueness_of(:title, conditions: -> { where.not(author_name: nil) })
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.create!(title: "abc")
t.title = "abc v2"
assert_queries_count(1) do
t.valid?
end
end
def test_case_sensitive
Topic.validates_uniqueness_of(:title, case_sensitive: true)
@connection.add_index(:topics, :title, unique: true, name: :topics_index)
t = Topic.create!(title: "abc")
t.title = "abc v2"
assert_queries_count(1) do
t.valid?
end
end
def test_partial_index
skip unless @connection.supports_partial_index?
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, unique: true, where: "approved", name: :topics_index)
t = Topic.create!(title: "abc")
t.author_name = "John"
assert_queries_count(1) do
t.valid?
end
end
def test_non_unique_index
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, :title, name: :topics_index)
t = Topic.create!(title: "abc")
t.author_name = "John"
assert_queries_count(1) do
t.valid?
end
end
def test_scope
Topic.validates_uniqueness_of(:title, scope: :author_name)
@connection.add_index(:topics, [:author_name, :title], unique: true, name: :topics_index)
t = Topic.create!(title: "abc", author_name: "John")
t.content = "hello world"
assert_no_queries do
t.valid?
end
t.author_name = "Amy"
assert_queries_count(1) do
t.valid?
end
end
def test_uniqueness_on_relation
TopicWithEvent.validates_uniqueness_of(:event)
@connection.add_index(:topics, :parent_id, unique: true, name: :topics_index)
e1 = Event.create!(title: "abc")
e2 = Event.create!(title: "cde")
t = TopicWithEvent.create!(event: e1)
t.content = "hello world"
assert_no_queries do
t.valid?
end
t.event = e2
assert_queries_count(1) do
t.valid?
end
ensure
TopicWithEvent.clear_validators!
Event.delete_all
end
def test_uniqueness_on_custom_relation_primary_key
Keyboard.create!(name: "Keyboard #1")
LessonWithUniqKeyboard.create!(name: "Keyboard #1")
another = LessonWithUniqKeyboard.new(name: "Keyboard #1")
assert_not_predicate another, :valid?
assert_equal ["has already been taken"], another.errors[:keyboard]
end
def test_index_of_sublist_of_columns
Topic.validates_uniqueness_of(:title, scope: :author_name)
@connection.add_index(:topics, :author_name, unique: true, name: :topics_index)
t = Topic.create!(title: "abc", author_name: "John")
t.content = "hello world"
assert_no_queries do
t.valid?
end
t.author_name = "Amy"
assert_queries_count(1) do
t.valid?
end
end
def test_index_of_columns_list_and_extra_columns
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, [:title, :author_name], unique: true, name: :topics_index)
t = Topic.create!(title: "abc", author_name: "John")
t.content = "hello world"
assert_queries_count(1) do
t.valid?
end
end
if current_adapter?(:PostgreSQLAdapter)
def test_expression_index
Topic.validates_uniqueness_of(:title)
@connection.add_index(:topics, "LOWER(title)", unique: true, name: :topics_index)
t = Topic.create!(title: "abc", author_name: "John")
t.content = "hello world"
assert_queries_count(1) do
t.valid?
end
end
end
end
class UniquenessWithCompositeKey < ActiveRecord::TestCase
class BookWithUniqueRevision < Cpk::Book
validates :revision, uniqueness: true
end
def test_uniqueness_validation_for_model_with_composite_key
book_one = BookWithUniqueRevision.create!(id: [1, 42], title: "Author 1's book", revision: 36)
book_two = BookWithUniqueRevision.create!(id: [2, 42], title: "Author 2's book", revision: 37)
assert_not_equal book_one.revision, book_two.revision
assert_changes("book_two.valid?", from: true, to: false) do
book_two.revision = book_one.revision
end
ensure
BookWithUniqueRevision.delete_all
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/validations/uniqueness_validation_test.rb |
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import signal
import netaddr
from neutron_lib import constants
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import uuidutils
from pyroute2.iproute import linux as iproute_linux
import testscenarios
import testtools
from neutron.agent.common import async_process
from neutron.agent.linux import ip_lib
from neutron.common import utils
from neutron.conf.agent import common as config
from neutron.privileged.agent.linux import ip_lib as priv_ip_lib
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux.bin import ip_monitor
from neutron.tests.functional import base as functional_base
LOG = logging.getLogger(__name__)
Device = collections.namedtuple('Device',
'name ip_cidrs mac_address namespace')
WRONG_IP = '0.0.0.0'
TEST_IP = '240.0.0.1'
TEST_IP_NEIGH = '240.0.0.2'
TEST_IP_SECONDARY = '240.0.0.3'
TEST_IP6_NEIGH = 'fd00::2'
TEST_IP6_SECONDARY = 'fd00::3'
TEST_IP_NUD_STATES = ((TEST_IP_NEIGH, 'permanent'),
(TEST_IP_SECONDARY, 'reachable'),
(TEST_IP6_NEIGH, 'permanent'),
(TEST_IP6_SECONDARY, 'reachable'))
class IpLibTestFramework(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpLibTestFramework, self).setUp()
self._configure()
def _configure(self):
config.register_interface_driver_opts_helper(cfg.CONF)
cfg.CONF.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
config.register_interface_opts()
self.driver = importutils.import_object(cfg.CONF.interface_driver,
cfg.CONF)
def generate_device_details(self, name=None, ip_cidrs=None,
mac_address=None, namespace=None):
if ip_cidrs is None:
ip_cidrs = ["%s/24" % TEST_IP]
return Device(name or utils.get_rand_name(),
ip_cidrs,
mac_address or
net.get_random_mac('fa:16:3e:00:00:00'.split(':')),
namespace or utils.get_rand_name())
def _safe_delete_device(self, device):
try:
device.link.delete()
except RuntimeError:
LOG.debug('Could not delete %s, was it already deleted?', device)
def manage_device(self, attr):
"""Create a tuntap with the specified attributes.
The device is cleaned up at the end of the test.
:param attr: A Device namedtuple
:return: A tuntap ip_lib.IPDevice
"""
ip = ip_lib.IPWrapper(namespace=attr.namespace)
if attr.namespace:
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
tap_device = ip.add_tuntap(attr.name)
self.addCleanup(self._safe_delete_device, tap_device)
tap_device.link.set_address(attr.mac_address)
self.driver.init_l3(attr.name, attr.ip_cidrs,
namespace=attr.namespace)
tap_device.link.set_up()
return tap_device
class IpLibTestCase(IpLibTestFramework):
def _check_routes(self, expected_routes, actual_routes):
actual_routes = [{key: route[key] for key in expected_routes[0].keys()}
for route in actual_routes]
self.assertEqual(expected_routes, actual_routes)
def test_rules_lifecycle(self):
PRIORITY = 32768
TABLE = 16
attr = self.generate_device_details()
device = self.manage_device(attr)
test_cases = {
constants.IP_VERSION_4: [
{
'ip': '1.1.1.1',
'to': '8.8.8.0/24'
},
{
'ip': '1.1.1.1',
'iif': device.name,
'to': '7.7.7.0/24'
}
],
constants.IP_VERSION_6: [
{
'ip': 'abcd::1',
'to': '1234::/64'
},
{
'ip': 'abcd::1',
'iif': device.name,
'to': '4567::/64'
}
]
}
expected_rules = {
constants.IP_VERSION_4: [
{
'from': '1.1.1.1',
'to': '8.8.8.0/24',
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
}, {
'from': '0.0.0.0/0',
'to': '7.7.7.0/24',
'iif': device.name,
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
}
],
constants.IP_VERSION_6: [
{
'from': 'abcd::1',
'to': '1234::/64',
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
},
{
'from': '::/0',
'to': '4567::/64',
'iif': device.name,
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast',
}
]
}
for ip_version, test_case in test_cases.items():
for rule in test_case:
ip_lib.add_ip_rule(namespace=device.namespace, table=TABLE,
priority=PRIORITY, **rule)
rules = ip_lib.list_ip_rules(device.namespace, ip_version)
for expected_rule in expected_rules[ip_version]:
self.assertIn(expected_rule, rules)
for rule in test_case:
ip_lib.delete_ip_rule(device.namespace, table=TABLE,
priority=PRIORITY, **rule)
rules = priv_ip_lib.list_ip_rules(device.namespace, ip_version)
for expected_rule in expected_rules[ip_version]:
self.assertNotIn(expected_rule, rules)
def test_device_exists(self):
attr = self.generate_device_details()
self.assertFalse(
ip_lib.device_exists(attr.name, namespace=attr.namespace))
device = self.manage_device(attr)
self.assertTrue(
ip_lib.device_exists(device.name, namespace=attr.namespace))
self.assertFalse(
ip_lib.device_exists(attr.name, namespace='wrong_namespace'))
device.link.delete()
self.assertFalse(
ip_lib.device_exists(attr.name, namespace=attr.namespace))
def test_ipdevice_exists(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertTrue(device.exists())
device.link.delete()
self.assertFalse(device.exists())
def test_vlan_exists(self):
attr = self.generate_device_details()
ip = ip_lib.IPWrapper(namespace=attr.namespace)
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
priv_ip_lib.create_interface(attr.name, attr.namespace, 'dummy')
self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
device = ip.add_vlan('vlan1999', attr.name, 1999)
self.assertTrue(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
device.link.delete()
self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
def test_vxlan_exists(self):
attr = self.generate_device_details()
ip = ip_lib.IPWrapper(namespace=attr.namespace)
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
device = ip.add_vxlan(attr.name, 9999)
self.addCleanup(self._safe_delete_device, device)
self.assertTrue(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
device.link.delete()
self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
def test_ipwrapper_get_device_by_ip_None(self):
ip_wrapper = ip_lib.IPWrapper(namespace=None)
self.assertIsNone(ip_wrapper.get_device_by_ip(ip=None))
def test_ipwrapper_get_device_by_ip(self):
# We need to pass both IP and cidr values to get_device_by_ip()
# to make sure it filters correctly.
test_ip = "%s/24" % TEST_IP
test_ip_secondary = "%s/24" % TEST_IP_SECONDARY
attr = self.generate_device_details(
ip_cidrs=[test_ip, test_ip_secondary]
)
self.manage_device(attr)
ip_wrapper = ip_lib.IPWrapper(namespace=attr.namespace)
self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP).name)
self.assertEqual(attr.name,
ip_wrapper.get_device_by_ip(TEST_IP_SECONDARY).name)
self.assertIsNone(ip_wrapper.get_device_by_ip(TEST_IP_NEIGH))
# this is in the same subnet, so will match if we pass as cidr
test_ip_neigh = "%s/24" % TEST_IP_NEIGH
self.assertEqual(attr.name,
ip_wrapper.get_device_by_ip(test_ip_neigh).name)
self.assertIsNone(ip_wrapper.get_device_by_ip(WRONG_IP))
def test_device_exists_with_ips_and_mac(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertTrue(
ip_lib.device_exists_with_ips_and_mac(*attr))
wrong_ip_cidr = '10.0.0.1/8'
wrong_mac_address = 'aa:aa:aa:aa:aa:aa'
attr = self.generate_device_details(name='wrong_name')
self.assertFalse(
ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(ip_cidrs=[wrong_ip_cidr])
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(mac_address=wrong_mac_address)
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(namespace='wrong_namespace')
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
device.link.delete()
def test_get_device_mac(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
mac_address = ip_lib.get_device_mac(attr.name,
namespace=attr.namespace)
self.assertEqual(attr.mac_address, mac_address)
device.link.delete()
def test_get_device_mac_too_long_name(self):
name = utils.get_rand_name(
max_length=constants.DEVICE_NAME_MAX_LEN + 5)
attr = self.generate_device_details(name=name)
device = self.manage_device(attr)
mac_address = ip_lib.get_device_mac(attr.name,
namespace=attr.namespace)
self.assertEqual(attr.mac_address, mac_address)
device.link.delete()
def test_gateway_lifecycle(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
metric = 1000
device = self.manage_device(attr)
gateways = {
constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0],
constants.IP_VERSION_6: "fd00::ff"
}
expected_gateways = {
constants.IP_VERSION_4: {
'metric': metric,
'via': gateways[constants.IP_VERSION_4]},
constants.IP_VERSION_6: {
'metric': metric,
'via': gateways[constants.IP_VERSION_6]}}
for ip_version, gateway_ip in gateways.items():
device.route.add_gateway(gateway_ip, metric)
self._check_routes(
[expected_gateways[ip_version]],
[device.route.get_gateway(ip_version=ip_version)])
device.route.delete_gateway(gateway_ip)
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
def test_gateway_flush(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
device = self.manage_device(attr)
gateways = {
constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0],
constants.IP_VERSION_6: "fd00::ff"
}
for ip_version, gateway_ip in gateways.items():
# Ensure that there is no gateway configured
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
# Now lets add gateway
device.route.add_gateway(gateway_ip, table="main")
self.assertIsNotNone(
device.route.get_gateway(ip_version=ip_version))
# Flush gateway and check that there is no any gateway configured
device.route.flush(ip_version, table="main")
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
def test_get_neigh_entries(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device = self.manage_device(attr)
device.neigh.add(TEST_IP_NEIGH, mac_address)
expected_neighs = [{'dst': TEST_IP_NEIGH,
'lladdr': mac_address,
'device': attr.name,
'state': 'permanent'}]
neighs = device.neigh.dump(4)
self.assertCountEqual(expected_neighs, neighs)
self.assertIsInstance(neighs, list)
device.neigh.delete(TEST_IP_NEIGH, mac_address)
neighs = device.neigh.dump(4, dst=TEST_IP_NEIGH, lladdr=mac_address)
self.assertEqual([], neighs)
def test_get_neigh_entries_no_namespace(self):
with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound):
ip_lib.dump_neigh_entries(4, namespace="nonexistent-netns")
def test_get_neigh_entries_no_interface(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
self.manage_device(attr)
with testtools.ExpectedException(ip_lib.NetworkInterfaceNotFound):
ip_lib.dump_neigh_entries(4, device="nosuchdevice",
namespace=attr.namespace)
def test_delete_neigh_entries(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device = self.manage_device(attr)
# trying to delete a non-existent entry shouldn't raise an error
device.neigh.delete(TEST_IP_NEIGH, mac_address)
def test_flush_neigh_ipv4(self):
# Entry with state "reachable" deleted.
self._flush_neigh(constants.IP_VERSION_4, TEST_IP_SECONDARY,
{TEST_IP_NEIGH})
# Entries belong to "ip_to_flush" passed CIDR, but "permanent" entry
# is not deleted.
self._flush_neigh(constants.IP_VERSION_4, '240.0.0.0/28',
{TEST_IP_NEIGH})
# "all" passed, but "permanent" entry is not deleted.
self._flush_neigh(constants.IP_VERSION_4, 'all', {TEST_IP_NEIGH})
def test_flush_neigh_ipv6(self):
# Entry with state "reachable" deleted.
self._flush_neigh(constants.IP_VERSION_6, TEST_IP6_SECONDARY,
{TEST_IP6_NEIGH})
# Entries belong to "ip_to_flush" passed CIDR, but "permanent" entry
# is not deleted.
self._flush_neigh(constants.IP_VERSION_6, 'fd00::0/64',
{TEST_IP6_NEIGH})
# "all" passed, but "permanent" entry is not deleted.
self._flush_neigh(constants.IP_VERSION_6, 'all', {TEST_IP6_NEIGH})
def _flush_neigh(self, version, ip_to_flush, ips_expected):
attr = self.generate_device_details(
ip_cidrs=['%s/24' % TEST_IP, 'fd00::1/64'],
namespace=utils.get_rand_name(20, 'ns-'))
device = self.manage_device(attr)
for test_ip, nud_state in TEST_IP_NUD_STATES:
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device.neigh.add(test_ip, mac_address, nud_state)
device.neigh.flush(version, ip_to_flush)
ips = {e['dst'] for e in device.neigh.dump(version)}
self.assertEqual(ips_expected, ips)
def _check_for_device_name(self, ip, name, should_exist):
exist = any(d for d in ip.get_devices() if d.name == name)
self.assertEqual(should_exist, exist)
def test_veth_exists(self):
namespace1 = self.useFixture(net_helpers.NamespaceFixture())
namespace2 = self.useFixture(net_helpers.NamespaceFixture())
dev_name1 = utils.get_rand_name()
dev_name2 = utils.get_rand_name()
device1, device2 = namespace1.ip_wrapper.add_veth(
dev_name1, dev_name2, namespace2.name)
self.addCleanup(self._safe_delete_device, device1)
self.addCleanup(self._safe_delete_device, device2)
self._check_for_device_name(namespace1.ip_wrapper, dev_name1, True)
self._check_for_device_name(namespace2.ip_wrapper, dev_name2, True)
self._check_for_device_name(namespace1.ip_wrapper, dev_name2, False)
self._check_for_device_name(namespace2.ip_wrapper, dev_name1, False)
# As it is veth pair, remove of device1 should be enough to remove
# both devices
device1.link.delete()
self._check_for_device_name(namespace1.ip_wrapper, dev_name1, False)
self._check_for_device_name(namespace2.ip_wrapper, dev_name2, False)
def test_macvtap_exists(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
src_dev_name = utils.get_rand_name()
src_dev = namespace.ip_wrapper.add_dummy(src_dev_name)
self.addCleanup(self._safe_delete_device, src_dev)
dev_name = utils.get_rand_name()
device = namespace.ip_wrapper.add_macvtap(dev_name, src_dev_name)
self.addCleanup(self._safe_delete_device, device)
self._check_for_device_name(namespace.ip_wrapper, dev_name, True)
device.link.delete()
self._check_for_device_name(namespace.ip_wrapper, dev_name, False)
def test_dummy_exists(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
dev_name = utils.get_rand_name()
device = namespace.ip_wrapper.add_dummy(dev_name)
self.addCleanup(self._safe_delete_device, device)
self._check_for_device_name(namespace.ip_wrapper, dev_name, True)
device.link.delete()
self._check_for_device_name(namespace.ip_wrapper, dev_name, False)
def test_set_link_mtu(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
device.link.set_mtu(1450)
self.assertEqual(1450, device.link.mtu)
# Check if proper exception will be raised when wrong MTU value is
# provided
self.assertRaises(ip_lib.InvalidArgument, device.link.set_mtu, 1)
def test_set_link_allmulticast_on(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertFalse(device.link.allmulticast)
device.link.set_allmulticast_on()
self.assertTrue(device.link.allmulticast)
def test_set_link_netns(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
original_namespace = device.namespace
original_ip_wrapper = ip_lib.IPWrapper(namespace=original_namespace)
new_namespace = self.useFixture(net_helpers.NamespaceFixture())
device.link.set_netns(new_namespace.name)
self.assertEqual(new_namespace.name, device.namespace)
self._check_for_device_name(
new_namespace.ip_wrapper, device.name, True)
self._check_for_device_name(
original_ip_wrapper, device.name, False)
def test_set_link_name(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
ip_wrapper = ip_lib.IPWrapper(namespace=device.namespace)
original_name = device.name
new_name = utils.get_rand_name()
# device has to be DOWN to rename it
device.link.set_down()
device.link.set_name(new_name)
self.assertEqual(new_name, device.name)
self._check_for_device_name(ip_wrapper, new_name, True)
self._check_for_device_name(ip_wrapper, original_name, False)
def test_set_link_alias(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
alias = utils.get_rand_name()
device.link.set_alias(alias)
self.assertEqual(alias, device.link.alias)
def _add_and_check_ips(self, device, ip_addresses):
for cidr, scope, expected_broadcast in ip_addresses:
# For IPv4 address add_broadcast flag will be set to True only
# if expected_broadcast is given.
# For IPv6 add_broadcast flag can be set to True always but
# broadcast address will not be set, so expected_broadcast for
# IPv6 should be always given as None.
add_broadcast = True
if cidr.version == constants.IP_VERSION_4:
add_broadcast = bool(expected_broadcast)
device.addr.add(str(cidr), scope, add_broadcast)
device_ips_info = [
(netaddr.IPNetwork(ip_info['cidr']),
ip_info['scope'],
ip_info['broadcast']) for
ip_info in device.addr.list()]
self.assertCountEqual(ip_addresses, device_ips_info)
def _flush_ips(self, device, ip_version):
device.addr.flush(ip_version)
for ip_address in device.addr.list():
cidr = netaddr.IPNetwork(ip_address['cidr'])
self.assertNotEqual(ip_version, cidr.version)
def test_add_ip_address(self):
ip_addresses = [
(netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'),
(netaddr.IPNetwork("11.11.11.11/28"), "link", None),
(netaddr.IPNetwork("2801::1/120"), "global", None),
(netaddr.IPNetwork("fe80::/64"), "link", None)]
attr = self.generate_device_details(ip_cidrs=[])
device = self.manage_device(attr)
self._add_and_check_ips(device, ip_addresses)
# Now let's check if adding already existing IP address will raise
# RuntimeError
ip_address = ip_addresses[0]
self.assertRaises(RuntimeError,
device.addr.add, str(ip_address[0]), ip_address[1])
def test_delete_ip_address(self):
attr = self.generate_device_details()
cidr = attr.ip_cidrs[0]
device = self.manage_device(attr)
device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()]
self.assertIn(cidr, device_cidrs)
device.addr.delete(cidr)
device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()]
self.assertNotIn(cidr, device_cidrs)
# Try to delete not existing IP address, it should be just fine and
# finish without any error raised
device.addr.delete(cidr)
def test_flush_ip_addresses(self):
ip_addresses = [
(netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'),
(netaddr.IPNetwork("11.11.11.11/28"), "link", None),
(netaddr.IPNetwork("2801::1/120"), "global", None),
(netaddr.IPNetwork("fe80::/64"), "link", None)]
attr = self.generate_device_details(ip_cidrs=[])
device = self.manage_device(attr)
self._add_and_check_ips(device, ip_addresses)
self._flush_ips(device, constants.IP_VERSION_4)
self._flush_ips(device, constants.IP_VERSION_6)
class TestSetIpNonlocalBind(functional_base.BaseSudoTestCase):
def test_assigned_value(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
for expected in (0, 1):
failed = ip_lib.set_ip_nonlocal_bind(expected, namespace.name)
try:
observed = ip_lib.get_ip_nonlocal_bind(namespace.name)
except RuntimeError as rte:
stat_message = (
'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind')
if stat_message in str(rte):
raise self.skipException(
"This kernel doesn't support %s in network "
"namespaces." % ip_lib.IP_NONLOCAL_BIND)
raise
self.assertFalse(failed)
self.assertEqual(expected, observed)
class NamespaceTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(NamespaceTestCase, self).setUp()
self.namespace = 'test_ns_' + uuidutils.generate_uuid()
ip_lib.create_network_namespace(self.namespace)
self.addCleanup(self._delete_namespace)
def _delete_namespace(self):
ip_lib.delete_network_namespace(self.namespace)
def test_network_namespace_exists_ns_exists(self):
self.assertTrue(ip_lib.network_namespace_exists(self.namespace))
def test_network_namespace_exists_ns_doesnt_exists(self):
self.assertFalse(ip_lib.network_namespace_exists('another_ns'))
def test_network_namespace_exists_ns_exists_try_is_ready(self):
self.assertTrue(ip_lib.network_namespace_exists(self.namespace,
try_is_ready=True))
def test_network_namespace_exists_ns_doesnt_exists_try_is_ready(self):
self.assertFalse(ip_lib.network_namespace_exists('another_ns',
try_is_ready=True))
class IpMonitorTestCase(testscenarios.WithScenarios,
functional_base.BaseLoggingTestCase):
scenarios = [
('namespace', {'namespace': 'ns_' + uuidutils.generate_uuid()}),
('no_namespace', {'namespace': None})
]
def setUp(self):
super(IpMonitorTestCase, self).setUp()
self.addCleanup(self._cleanup)
if self.namespace:
priv_ip_lib.create_netns(self.namespace)
self.devices = [('int_' + uuidutils.generate_uuid())[
:constants.DEVICE_NAME_MAX_LEN] for _ in range(5)]
self.ip_wrapper = ip_lib.IPWrapper(self.namespace)
self.temp_file = self.get_temp_file_path('out_' + self.devices[0] +
'.tmp')
self.proc = self._run_ip_monitor(ip_monitor)
def _cleanup(self):
self.proc.stop(kill_timeout=10, kill_signal=signal.SIGTERM)
if self.namespace:
priv_ip_lib.remove_netns(self.namespace)
else:
for device in self.devices:
try:
priv_ip_lib.delete_interface(device, self.namespace)
except priv_ip_lib.NetworkInterfaceNotFound:
pass
@staticmethod
def _normalize_module_name(name):
for suf in ['.pyc', '.pyo']:
if name.endswith(suf):
return name[:-len(suf)] + '.py'
return name
def _run_ip_monitor(self, module):
executable = self._normalize_module_name(module.__file__)
proc = async_process.AsyncProcess(
[executable, self.temp_file, str(self.namespace)],
run_as_root=True)
proc.start(block=True)
return proc
def _read_file(self, ip_addresses):
try:
registers = []
with open(self.temp_file, 'r') as f:
data = f.read()
for line in data.splitlines():
register = jsonutils.loads(line)
registers.append({'name': register['name'],
'cidr': register['cidr'],
'event': register['event']})
for ip_address in ip_addresses:
if ip_address not in registers:
return False
return True
except (OSError, IOError, ValueError):
return False
def _check_read_file(self, ip_addresses):
try:
utils.wait_until_true(lambda: self._read_file(ip_addresses),
timeout=30)
except utils.WaitTimeout:
with open(self.temp_file, 'r') as f:
registers = f.read()
self.fail('Defined IP addresses: %s, IP addresses registered: %s' %
(ip_addresses, registers))
def _handle_ip_addresses(self, event, ip_addresses):
for ip_address in (_ip for _ip in ip_addresses
if _ip['event'] == event):
ip_device = ip_lib.IPDevice(ip_address['name'], self.namespace)
if event == 'removed':
ip_device.addr.delete(ip_address['cidr'])
if event == 'added':
ip_device.addr.add(ip_address['cidr'])
def test_add_remove_ip_address_and_interface(self):
for device in self.devices:
self.ip_wrapper.add_dummy(device)
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
ip_addresses = [
{'cidr': '192.168.250.1/24', 'event': 'added',
'name': self.devices[0]},
{'cidr': '192.168.250.2/24', 'event': 'added',
'name': self.devices[1]},
{'cidr': '192.168.250.3/24', 'event': 'added',
'name': self.devices[2]},
{'cidr': '192.168.250.10/24', 'event': 'added',
'name': self.devices[3]},
{'cidr': '192.168.250.10/24', 'event': 'removed',
'name': self.devices[3]},
{'cidr': '2001:db8::1/64', 'event': 'added',
'name': self.devices[4]},
{'cidr': '2001:db8::2/64', 'event': 'added',
'name': self.devices[4]}]
self._handle_ip_addresses('added', ip_addresses)
self._handle_ip_addresses('removed', ip_addresses)
self._check_read_file(ip_addresses)
ip_device = ip_lib.IPDevice(self.devices[4], self.namespace)
ip_device.link.delete()
ip_addresses = [
{'cidr': '2001:db8::1/64', 'event': 'removed',
'name': self.devices[4]},
{'cidr': '2001:db8::2/64', 'event': 'removed',
'name': self.devices[4]}]
self._check_read_file(ip_addresses)
def test_interface_added_after_initilization(self):
for device in self.devices[:len(self.devices) - 1]:
self.ip_wrapper.add_dummy(device)
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
ip_addresses = [
{'cidr': '192.168.251.21/24', 'event': 'added',
'name': self.devices[0]},
{'cidr': '192.168.251.22/24', 'event': 'added',
'name': self.devices[1]}]
self._handle_ip_addresses('added', ip_addresses)
self._check_read_file(ip_addresses)
self.ip_wrapper.add_dummy(self.devices[-1])
ip_addresses.append({'cidr': '192.168.251.23/24', 'event': 'added',
'name': self.devices[-1]})
self._handle_ip_addresses('added', [ip_addresses[-1]])
self._check_read_file(ip_addresses)
def test_add_and_remove_multiple_ips(self):
# NOTE(ralonsoh): testing [1], adding multiple IPs.
# [1] https://bugs.launchpad.net/neutron/+bug/1832307
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
self.ip_wrapper.add_dummy(self.devices[0])
ip_addresses = []
for i in range(100):
_cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32'
ip_addresses.append({'cidr': _cidr, 'event': 'added',
'name': self.devices[0]})
self._handle_ip_addresses('added', ip_addresses)
self._check_read_file(ip_addresses)
for i in range(100):
_cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32'
ip_addresses.append({'cidr': _cidr, 'event': 'removed',
'name': self.devices[0]})
self._handle_ip_addresses('removed', ip_addresses)
self._check_read_file(ip_addresses)
class IpRouteCommandTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpRouteCommandTestCase, self).setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
ip_lib.IPWrapper(self.namespace).add_dummy('test_device')
self.device = ip_lib.IPDevice('test_device', namespace=self.namespace)
self.device.link.set_up()
self.device_cidr_ipv4 = '192.168.100.1/24'
self.device_cidr_ipv6 = '2020::1/64'
self.device.addr.add(self.device_cidr_ipv4)
self.device.addr.add(self.device_cidr_ipv6)
self.cidrs = ['192.168.0.0/24', '10.0.0.0/8', '2001::/64', 'faaa::/96']
def _assert_route(self, ip_version, table=None, source_prefix=None,
cidr=None, scope=None, via=None, metric=None,
not_in=False):
if not_in:
fn = lambda: cmp not in self.device.route.list_routes(ip_version,
table=table)
msg = 'Route found: %s'
else:
fn = lambda: cmp in self.device.route.list_routes(ip_version,
table=table)
msg = 'Route not found: %s'
if cidr:
ip_version = utils.get_ip_version(cidr)
else:
ip_version = utils.get_ip_version(via)
cidr = constants.IP_ANY[ip_version]
if constants.IP_VERSION_6 == ip_version:
scope = ip_lib.IP_ADDRESS_SCOPE[0]
elif not scope:
scope = 'global' if via else 'link'
if not metric:
metric = ip_lib.IP_ROUTE_METRIC_DEFAULT[ip_version]
table = table or iproute_linux.DEFAULT_TABLE
table = ip_lib.IP_RULE_TABLES_NAMES.get(table, table)
cmp = {'table': table,
'cidr': cidr,
'source_prefix': source_prefix,
'scope': scope,
'device': 'test_device',
'via': via,
'metric': metric,
'proto': 'static'}
try:
utils.wait_until_true(fn, timeout=5)
except utils.WaitTimeout:
raise self.fail(msg % cmp)
def test_add_route_table(self):
tables = (None, 1, 253, 254, 255)
for cidr in self.cidrs:
for table in tables:
self.device.route.add_route(cidr, table=table)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, table=table)
def test_add_route_via(self):
gateway_ipv4 = str(netaddr.IPNetwork(self.device_cidr_ipv4).ip)
gateway_ipv6 = str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1)
for cidr in self.cidrs:
ip_version = utils.get_ip_version(cidr)
gateway = (gateway_ipv4 if ip_version == constants.IP_VERSION_4
else gateway_ipv6)
self.device.route.add_route(cidr, via=gateway)
self._assert_route(ip_version, cidr=cidr, via=gateway)
def test_add_route_metric(self):
metrics = (None, 1, 10, 255)
for cidr in self.cidrs:
for metric in metrics:
self.device.route.add_route(cidr, metric=metric)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, metric=metric)
def test_add_route_scope(self):
for cidr in self.cidrs:
for scope in ip_lib.IP_ADDRESS_SCOPE_NAME:
self.device.route.add_route(cidr, scope=scope)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, scope=scope)
def test_add_route_gateway(self):
gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip),
str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1))
for gateway in gateways:
ip_version = utils.get_ip_version(gateway)
self.device.route.add_gateway(gateway)
self._assert_route(ip_version, cidr=None, via=gateway,
scope='global')
def test_list_onlink_routes_ipv4(self):
cidr_ipv4 = []
for cidr in self.cidrs:
if utils.get_ip_version(cidr) == constants.IP_VERSION_4:
cidr_ipv4.append(cidr)
self.device.route.add_onlink_route(cidr)
for cidr in cidr_ipv4:
self._assert_route(constants.IP_VERSION_4, cidr=cidr)
routes = self.device.route.list_onlink_routes(constants.IP_VERSION_4)
self.assertEqual(len(cidr_ipv4), len(routes))
def test_get_and_delete_gateway(self):
gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip),
str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1))
scopes = ('global', 'site', 'link')
metrics = (None, 1, 255)
tables = (None, 1, 254, 255)
for gateway, scope, metric, table in itertools.product(
gateways, scopes, metrics, tables):
ip_version = utils.get_ip_version(gateway)
self.device.route.add_gateway(gateway, scope=scope, metric=metric,
table=table)
self._assert_route(ip_version, cidr=None, via=gateway, scope=scope,
metric=metric, table=table)
self.assertEqual(gateway, self.device.route.get_gateway(
ip_version=ip_version, table=table)['via'])
self.device.route.delete_gateway(gateway, table=table, scope=scope)
self.assertIsNone(self.device.route.get_gateway(
ip_version=ip_version, table=table))
def test_delete_route(self):
scopes = ('global', 'site', 'link')
tables = (None, 1, 254, 255)
for cidr, scope, table in itertools.product(
self.cidrs, scopes, tables):
ip_version = utils.get_ip_version(cidr)
self.device.route.add_route(cidr, table=table, scope=scope)
self._assert_route(ip_version, cidr=cidr, scope=scope, table=table)
self.device.route.delete_route(cidr, table=table, scope=scope)
self._assert_route(ip_version, cidr=cidr, scope=scope, table=table,
not_in=True)
def test_flush(self):
tables = (None, 1, 200)
ip_versions = (constants.IP_VERSION_4, constants.IP_VERSION_6)
for cidr, table in itertools.product(self.cidrs, tables):
self.device.route.add_route(cidr, table=table)
for ip_version, table in itertools.product(ip_versions, tables):
routes = self.device.route.list_routes(ip_version, table=table)
self.assertGreater(len(routes), 0)
self.device.route.flush(ip_version, table=table)
routes = self.device.route.list_routes(ip_version, table=table)
self.assertEqual([], routes)
class IpAddrCommandTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpAddrCommandTestCase, self).setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
ip_lib.IPWrapper(self.namespace).add_dummy('test_device')
self.device = ip_lib.IPDevice('test_device', namespace=self.namespace)
self.device.link.set_up()
def test_list_with_scope(self):
scope_ip = [
('global', '192.168.100.1/24'),
('global', '2001:db8::1/64'),
('link', '192.168.101.1/24'),
('link', 'fe80::1:1/64'),
('site', 'fec0:0:0:f101::1/64'),
('host', '192.168.102.1/24')]
for scope, _ip in scope_ip:
self.device.addr.add(_ip, scope=scope)
devices = self.device.addr.list()
devices_cidr = {device['cidr'] for device in devices}
for scope in scope_ip:
self.assertIn(scope[1], devices_cidr)
for scope, _ip in scope_ip:
devices_filtered = self.device.addr.list(scope=scope)
devices_cidr = {device['cidr'] for device in devices_filtered}
self.assertIn(_ip, devices_cidr)
class GetDevicesWithIpTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super().setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
self.devices = []
self.num_devices = 5
self.num_devices_with_ip = 3
for idx in range(self.num_devices):
dev_name = 'test_device_%s' % idx
ip_lib.IPWrapper(self.namespace).add_dummy(dev_name)
device = ip_lib.IPDevice(dev_name, namespace=self.namespace)
device.link.set_up()
self.devices.append(device)
self.cidrs = [netaddr.IPNetwork('10.10.0.0/24'),
netaddr.IPNetwork('10.20.0.0/24'),
netaddr.IPNetwork('2001:db8:1234:1111::/64'),
netaddr.IPNetwork('2001:db8:1234:2222::/64')]
for idx in range(self.num_devices_with_ip):
for cidr in self.cidrs:
self.devices[idx].addr.add(str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
@staticmethod
def _remove_loopback_interface(ip_addresses):
return [ipa for ipa in ip_addresses if
ipa['name'] != ip_lib.LOOPBACK_DEVNAME]
@staticmethod
def _remove_ipv6_scope_link(ip_addresses):
# Remove all IPv6 addresses with scope link (fe80::...).
return [ipa for ipa in ip_addresses if not (
ipa['scope'] == 'link' and utils.get_ip_version(ipa['cidr']))]
@staticmethod
def _pop_ip_address(ip_addresses, cidr):
for idx, ip_address in enumerate(copy.deepcopy(ip_addresses)):
if cidr == ip_address['cidr']:
ip_addresses.pop(idx)
return
def test_get_devices_with_ip(self):
ip_addresses = ip_lib.get_devices_with_ip(self.namespace)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
self.assertEqual(self.num_devices_with_ip * len(self.cidrs),
len(ip_addresses))
for idx in range(self.num_devices_with_ip):
for cidr in self.cidrs:
cidr = (str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
self._pop_ip_address(ip_addresses, cidr)
self.assertEqual(0, len(ip_addresses))
def test_get_devices_with_ip_name(self):
for idx in range(self.num_devices_with_ip):
dev_name = 'test_device_%s' % idx
ip_addresses = ip_lib.get_devices_with_ip(self.namespace,
name=dev_name)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
for cidr in self.cidrs:
cidr = (str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
self._pop_ip_address(ip_addresses, cidr)
self.assertEqual(0, len(ip_addresses))
for idx in range(self.num_devices_with_ip, self.num_devices):
dev_name = 'test_device_%s' % idx
ip_addresses = ip_lib.get_devices_with_ip(self.namespace,
name=dev_name)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
self.assertEqual(0, len(ip_addresses))
class ListIpRoutesTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super().setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
self.device_names = ['test_device1', 'test_device2']
self.device_ips = ['10.0.0.1/24', '10.0.1.1/24']
self.device_cidrs = [netaddr.IPNetwork(ip_address).cidr for ip_address
in self.device_ips]
for idx, dev in enumerate(self.device_names):
ip_lib.IPWrapper(self.namespace).add_dummy(dev)
device = ip_lib.IPDevice(dev, namespace=self.namespace)
device.link.set_up()
device.addr.add(self.device_ips[idx])
def test_list_ip_routes_multipath(self):
multipath = [
{'device': self.device_names[0],
'via': str(self.device_cidrs[0].ip + 100), 'weight': 10},
{'device': self.device_names[1],
'via': str(self.device_cidrs[1].ip + 100), 'weight': 20},
{'via': str(self.device_cidrs[1].ip + 101), 'weight': 30},
{'via': str(self.device_cidrs[1].ip + 102)}]
ip_lib.add_ip_route(self.namespace, '1.2.3.0/24',
constants.IP_VERSION_4, via=multipath)
routes = ip_lib.list_ip_routes(self.namespace, constants.IP_VERSION_4)
multipath[2]['device'] = self.device_names[1]
multipath[3]['device'] = self.device_names[1]
multipath[3]['weight'] = 1
for route in (route for route in routes if
route['cidr'] == '1.2.3.0/24'):
if not isinstance(route['via'], list):
continue
self.assertEqual(len(multipath), len(route['via']))
for nexthop in multipath:
for mp in route['via']:
if nexthop != mp:
continue
break
else:
self.fail('Not matching route, routes: %s' % routes)
return
self.fail('Not matching route, routes: %s' % routes) | unknown | codeparrot/codeparrot-clean | ||
module.exports = 4; | javascript | github | https://github.com/webpack/webpack | test/cases/chunks/context-weak/dir/four.js |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals.events;
public class ShareAcknowledgementCommitCallbackRegistrationEvent extends ApplicationEvent {
boolean isCallbackRegistered;
public ShareAcknowledgementCommitCallbackRegistrationEvent(boolean isCallbackRegistered) {
super(Type.SHARE_ACKNOWLEDGEMENT_COMMIT_CALLBACK_REGISTRATION);
this.isCallbackRegistered = isCallbackRegistered;
}
public boolean isCallbackRegistered() {
return isCallbackRegistered;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgementCommitCallbackRegistrationEvent.java |
# -*- coding: utf-8 -*-
import xbmcgui, xbmc, xbmcplugin
enable_debug = False
#######################################
# Xbmc Helpers
#######################################
def select(title, menuItems):
select = xbmcgui.Dialog().select(title, menuItems)
if select == -1:
return None
else:
return menuItems[select]
def getKeyboard(default = '', heading = '', hidden = False):
kboard = xbmc.Keyboard(default, heading, hidden)
kboard.doModal()
if kboard.isConfirmed():
return kboard.getText()
return ''
def getImage(title):
dialog = xbmcgui.Dialog()
image = dialog.browse(1, title, 'pictures', '.jpg|.png', True)
return image
def showMessage(msg):
xbmc.executebuiltin('Notification(SportsDevil,' + str(msg.encode('utf-8', 'ignore')) + ')')
def showBusyAnimation():
xbmc.executebuiltin( 'ActivateWindow(busydialog)' )
def hideBusyAnimation():
xbmc.executebuiltin( 'Dialog.Close(busydialog,true)' )
def closeAllDialogs():
xbmc.executebuiltin('Dialog.Close(all, true)')
def log(msg):
if enable_debug:
try:
xbmc.log(msg)
except:
xbmc.log(msg.encode('utf-8'))
def setSortMethodsForCurrentXBMCList(handle, sortKeys):
def addSortMethod(method):
xbmcplugin.addSortMethod(handle = handle, sortMethod = method)
if not sortKeys or sortKeys==[]:
addSortMethod(xbmcplugin.SORT_METHOD_UNSORTED)
else:
if 'name' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_LABEL)
if 'size' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_SIZE)
if 'duration' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DURATION)
if 'genre' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_GENRE)
if 'rating' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_VIDEO_RATING)
if 'date' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DATE)
if 'file' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_FILE)
def getContainerFolderPath():
return xbmc.getInfoLabel('Container.FolderPath')
def getListItemPath():
return xbmc.getInfoLabel('ListItem.Path')
def getCurrentWindow():
return xbmc.getInfoLabel('System.CurrentWindow')
def getCurrentControl():
return xbmc.getInfoLabel('System.CurrentControl')
def getCurrentWindowXmlFile():
return xbmc.getInfoLabel('Window.Property(xmlfile)') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Menu.
Base class for any menu.
"""
# Import: std
import sys
# Import: OpenGL
from OpenGL.GLUT import *
# Import: PyCEGUI
import PyCEGUI
# Import: User
from errors import InitializationError
# Menu
class Menu(object):
# Initialize
def initialize(self, name, title=None, layout=None, background=None):
# If specified, load the layout.
if layout:
window = PyCEGUI.WindowManager.getSingleton().loadWindowLayout(layout + ".layout")
# Otherwise, build it up through code.
else:
# 1) Menu root window = background image.
winMgr = PyCEGUI.WindowManager.getSingleton()
if background:
window = winMgr.createWindow('CEGUIDemo/StaticImage', name)
# 1.a) Disable standard background
window.setProperty("BackgroundEnabled", "false")
# 1.b) Set the background image
window.setProperty("Image", "set:%s image:full_image" % background)
else:
window = winMgr.createWindow('CEGUIDemo/FrameWindow', name)
# 1.d) Set area rectangle
window.setArea(PyCEGUI.UDim(0, 0), PyCEGUI.UDim(0, 0),
PyCEGUI.UDim(1, 0), PyCEGUI.UDim(1, 0))
# 1.e) Disable frame and standard background
window.setProperty("FrameEnabled", "false")
# 2) Logo image
imgSetMgr = PyCEGUI.ImagesetManager.getSingleton()
imgSetMgr.createFromImageFile("Logo", "sd-logo.png")
imgLogo = winMgr.createWindow('CEGUIDemo/StaticImage', name + "/ImgLogo")
imgLogo.setArea(PyCEGUI.UDim(0, 0), PyCEGUI.UDim(0, 0),
PyCEGUI.UDim(0.3, 0), PyCEGUI.UDim(0.25, 0))
imgLogo.setProperty("FrameEnabled", "false")
imgLogo.setProperty("BackgroundEnabled", "false")
imgLogo.setProperty("Image", "set:Logo image:full_image")
window.addChildWindow(imgLogo)
# 3) Title
txtTitle = winMgr.createWindow('CEGUIDemo/PageTitle', name + "/TxtTitle")
txtTitle.setArea(PyCEGUI.UDim(0.2, 0), PyCEGUI.UDim(0.1, 0),
PyCEGUI.UDim(0.7, 0), PyCEGUI.UDim(0.1, 0))
txtTitle.setText(title or "<undefined title>")
txtTitle.setTooltipText("Yeah, this is the title of the menu !")
txtTitle.setProperty("Font", "MenuTitle")
txtTitle.setProperty("FrameEnabled", "false")
txtTitle.setProperty("BackgroundEnabled", "true")
window.addChildWindow(txtTitle)
# 4) Frame rate indicator
txtFrameRate = winMgr.createWindow('CEGUIDemo/StaticText', name + "/TxtFrameRate")
txtFrameRate.setArea(PyCEGUI.UDim(0.95, 0), PyCEGUI.UDim(0.01, 0),
PyCEGUI.UDim(0.04, 0), PyCEGUI.UDim(0.03, 0))
txtFrameRate.setText("--.-")
txtFrameRate.setTooltipText("Frame rate (F/s)")
txtFrameRate.setProperty("Font", "TextSmall")
txtFrameRate.setProperty("FrameEnabled", "false")
txtFrameRate.setProperty("BackgroundEnabled", "false")
txtFrameRate.setProperty("HorzFormatting", "RightAligned")
txtFrameRate.setProperty("VertFormatting", "TopAligned")
txtFrameRate.setAlwaysOnTop(True);
window.addChildWindow(txtFrameRate)
# Retrieve the root window and its children.
self.window = window
self.imgLogo = window.getChild(name + "/ImgLogo")
self.txtTitle = window.getChild(name + "/TxtTitle")
self.txtFrameRate = window.getChild(name + "/TxtFrameRate")
# Trace info. about children.
#print("Menu: Children (n=%d) :" % self.window.getChildCount())
#for chldInd in range(self.window.getChildCount()):
# print(" #%d : name=%r" % (chldInd, self.window.getChildAtIdx(chldInd).getName()))
# Setup animations.
self.animFadeIn = PyCEGUI.AnimationManager.getSingleton().instantiateAnimation("MenuFadeIn")
self.animFadeIn.setTargetWindow(self.window)
#self.animFadeIn.start()
#self.animFadeOut = PyCEGUI.AnimationManager.getSingleton().instantiateAnimation("MenuFadeOut")
#self.animFadeOut.setTargetWindow(self.window)
#self.animFadeOut.start()
return window
# - Wrapper method to define the subscription/listener relationships.
# - If there are a lot, it may behoove the coder to encapsulate them in methods, then call those methods here.
def connectHandlers(self):
# Event subscriptions :
# * keyboard.
self.window.subscribeEvent(PyCEGUI.Window.EventKeyDown, self, 'onKeyDown')
# * window update (for the frame rate indicator).
self.window.subscribeEvent(PyCEGUI.Window.EventWindowUpdated, self, 'onUpdate')
#def disconnectHandlers(self):
# self.window.removeEvent(PyCEGUI.Window.EventKeyDown)
# self.window.removeEvent(PyCEGUI.Window.EventWindowUpdated)
# Setup
def setup(self):
self.connectHandlers()
# Debug : Seems that EventActivated is only fired once ever !
self.window.subscribeEvent(PyCEGUI.Window.EventActivated, self, 'onActivated')
self.window.subscribeEvent(PyCEGUI.Window.EventActivated, self, 'onDeactivated')
# Activate
def deactivate(self):
print("%s.deactivate" % self.__class__.__name__)
# Detach.
#self.disconnectHandlers()
self.window.setMutedState(True)
self.window.hide()
self.window.deactivate()
# Activate
def activate(self, previous=None):
print("%s.activate" % self.__class__.__name__)
# Attach new.
PyCEGUI.System.getSingleton().setGUISheet(self.window)
#self.connectHandlers()
self.window.setMutedState(False)
self.window.show()
self.window.activate() # Set focus (needed for EventKeyDown being received).
# Save previous menu if specified (in case we need to return to this one).
if previous:
self.prevMenu = previous
# Return to the previous menu.
def switchTo(self, menu):
self.deactivate()
menu.activate(previous=self)
# Return to the previous menu.
def back(self):
if self.prevMenu:
self.deactivate()
self.prevMenu.activate()
else:
print("Warning: No previous menu to return to ; ignoring.")
# Update frame rate indicator.
def onUpdate(self, winArgs): # Bug: Not an UpdateEventArgs, but a WindowEventArgs !
self.currTime = glutGet(GLUT_ELAPSED_TIME)
elapsed = (self.currTime - self.lastTime) / 1000.0
self.nFrames += 1
if elapsed >= 1.0: # Skidding mean for each second
self.txtFrameRate.setText("%4.1f" % (self.nFrames / elapsed))
self.nFrames = 0
self.lastTime = self.currTime
def onKeyDown(self, keyArgs):
# Just in case not specialised in actual class.
print("Menu.onKeyDown: sc=", keyArgs.scancode)
return False
# Debug : Seems that EventActivated is only fired once ever !
def onActivated(self, args):
print("%s.onActivated" % self.__class__.__name__)
#self.connectHandlers()
# Initialize frame rate data.
self.nFrames = 0
self.currTime = self.lastTime = glutGet(GLUT_ELAPSED_TIME)
return False
def onDeactivated(self, args):
print("%s.onDeactivated" % self.__class__.__name__)
#self.disconnectHandlers()
return False | unknown | codeparrot/codeparrot-clean | ||
# $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Reader modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import utils, parsers, Component
from docutils.transforms import universal
if sys.version_info < (2,5):
from docutils._compat import __import__
class Reader(Component):
"""
Abstract base class for docutils Readers.
Each reader module or package must export a subclass also called 'Reader'.
The two steps of a Reader's responsibility are `scan()` and
`parse()`. Call `read()` to process a document.
"""
component_type = 'reader'
config_section = 'readers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Decorations,
universal.ExposeInternals,
universal.StripComments,]
def __init__(self, parser=None, parser_name=None):
"""
Initialize the Reader instance.
Several instance attributes are defined with dummy initial values.
Subclasses may use these attributes as they wish.
"""
self.parser = parser
"""A `parsers.Parser` instance shared by all doctrees. May be left
unspecified if the document source determines the parser."""
if parser is None and parser_name:
self.set_parser(parser_name)
self.source = None
"""`docutils.io` IO object, source of input data."""
self.input = None
"""Raw text input; either a single string or, for more complex cases,
a collection of strings."""
def set_parser(self, parser_name):
"""Set `self.parser` by name."""
parser_class = parsers.get_parser_class(parser_name)
self.parser = parser_class()
def read(self, source, parser, settings):
self.source = source
if not self.parser:
self.parser = parser
self.settings = settings
self.input = self.source.read()
self.parse()
return self.document
def parse(self):
"""Parse `self.input` into a document tree."""
self.document = document = self.new_document()
self.parser.parse(self.input, document)
document.current_source = document.current_line = None
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = utils.new_document(self.source.source_path, self.settings)
return document
class ReReader(Reader):
"""
A reader which rereads an existing document tree (e.g. a
deserializer).
Often used in conjunction with `writers.UnfilteredWriter`.
"""
def get_transforms(self):
# Do not add any transforms. They have already been applied
# by the reader which originally created the document.
return Component.get_transforms(self)
_reader_aliases = {}
def get_reader_class(reader_name):
"""Return the Reader class from the `reader_name` module."""
reader_name = reader_name.lower()
if reader_name in _reader_aliases:
reader_name = _reader_aliases[reader_name]
try:
module = __import__(reader_name, globals(), locals(), level=1)
except ImportError:
module = __import__(reader_name, globals(), locals(), level=0)
return module.Reader | unknown | codeparrot/codeparrot-clean | ||
/*
SPDX-License-Identifier: MIT
Source: https://github.com/grafana/grafana/blob/main/public/vendor/flot/jquery.flot.time.js
*/
/* eslint-disable prefer-rest-params */
/* eslint-disable no-useless-concat */
/* eslint-disable default-case */
/* eslint-disable prefer-spread */
/* eslint-disable no-loop-func */
/* eslint-disable @typescript-eslint/no-this-alias */
/* eslint-disable no-redeclare */
/* eslint-disable no-useless-escape */
/* eslint-disable prefer-const */
/* eslint-disable @typescript-eslint/explicit-function-return-type */
/* eslint-disable @typescript-eslint/no-use-before-define */
/* eslint-disable eqeqeq */
/* eslint-disable no-var */
/* Pretty handling of time axes.
Copyright (c) 2007-2013 IOLA and Ole Laursen.
Licensed under the MIT license.
Set axis.mode to "time" to enable. See the section "Time series data" in
API.txt for details.
*/
(function($) {
const options = {
xaxis: {
timezone: null, // "browser" for local to the client or timezone for timezone-js
timeformat: null, // format string to use
twelveHourClock: false, // 12 or 24 time in time mode
monthNames: null, // list of names of months
},
};
// round to nearby lower multiple of base
function floorInBase(n, base) {
return base * Math.floor(n / base);
}
// Returns a string with the date d formatted according to fmt.
// A subset of the Open Group's strftime format is supported.
function formatDate(d, fmt, monthNames, dayNames) {
if (typeof d.strftime == 'function') {
return d.strftime(fmt);
}
const leftPad = function(n, pad) {
n = '' + n;
pad = '' + (pad == null ? '0' : pad);
return n.length == 1 ? pad + n : n;
};
const r = [];
let escape = false;
const hours = d.getHours();
const isAM = hours < 12;
if (monthNames == null) {
monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
}
if (dayNames == null) {
dayNames = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
}
let hours12;
if (hours > 12) {
hours12 = hours - 12;
} else if (hours == 0) {
hours12 = 12;
} else {
hours12 = hours;
}
for (let i = 0; i < fmt.length; ++i) {
let c = fmt.charAt(i);
if (escape) {
switch (c) {
case 'a':
c = '' + dayNames[d.getDay()];
break;
case 'b':
c = '' + monthNames[d.getMonth()];
break;
case 'd':
c = leftPad(d.getDate(), '');
break;
case 'e':
c = leftPad(d.getDate(), ' ');
break;
case 'h': // For back-compat with 0.7; remove in 1.0
case 'H':
c = leftPad(hours);
break;
case 'I':
c = leftPad(hours12);
break;
case 'l':
c = leftPad(hours12, ' ');
break;
case 'm':
c = leftPad(d.getMonth() + 1, '');
break;
case 'M':
c = leftPad(d.getMinutes());
break;
// quarters not in Open Group's strftime specification
case 'q':
c = '' + (Math.floor(d.getMonth() / 3) + 1);
break;
case 'S':
c = leftPad(d.getSeconds());
break;
case 'y':
c = leftPad(d.getFullYear() % 100);
break;
case 'Y':
c = '' + d.getFullYear();
break;
case 'p':
c = isAM ? '' + 'am' : '' + 'pm';
break;
case 'P':
c = isAM ? '' + 'AM' : '' + 'PM';
break;
case 'w':
c = '' + d.getDay();
break;
}
r.push(c);
escape = false;
} else {
if (c == '%') {
escape = true;
} else {
r.push(c);
}
}
}
return r.join('');
}
// To have a consistent view of time-based data independent of which time
// zone the client happens to be in we need a date-like object independent
// of time zones. This is done through a wrapper that only calls the UTC
// versions of the accessor methods.
function makeUtcWrapper(d) {
function addProxyMethod(sourceObj, sourceMethod, targetObj, targetMethod) {
sourceObj[sourceMethod] = function() {
return targetObj[targetMethod].apply(targetObj, arguments);
};
}
const utc = {
date: d,
};
// support strftime, if found
if (d.strftime != undefined) {
addProxyMethod(utc, 'strftime', d, 'strftime');
}
addProxyMethod(utc, 'getTime', d, 'getTime');
addProxyMethod(utc, 'setTime', d, 'setTime');
const props = ['Date', 'Day', 'FullYear', 'Hours', 'Milliseconds', 'Minutes', 'Month', 'Seconds'];
for (let p = 0; p < props.length; p++) {
addProxyMethod(utc, 'get' + props[p], d, 'getUTC' + props[p]);
addProxyMethod(utc, 'set' + props[p], d, 'setUTC' + props[p]);
}
return utc;
}
// select time zone strategy. This returns a date-like object tied to the
// desired timezone
function dateGenerator(ts, opts) {
if (opts.timezone == 'browser') {
return new Date(ts);
} else if (!opts.timezone || opts.timezone == 'utc') {
return makeUtcWrapper(new Date(ts));
}
// } else if (typeof timezoneJS != 'undefined' && typeof timezoneJS.Date != 'undefined') {
// const d = new timezoneJS.Date();
// // timezone-js is fickle, so be sure to set the time zone before
// // setting the time.
// d.setTimezone(opts.timezone);
// d.setTime(ts);
// return d;
// }
return makeUtcWrapper(new Date(ts));
}
// map of app. size of time units in milliseconds
const timeUnitSize = {
second: 1000,
minute: 60 * 1000,
hour: 60 * 60 * 1000,
day: 24 * 60 * 60 * 1000,
month: 30 * 24 * 60 * 60 * 1000,
quarter: 3 * 30 * 24 * 60 * 60 * 1000,
year: 365.2425 * 24 * 60 * 60 * 1000,
};
// the allowed tick sizes, after 1 year we use
// an integer algorithm
const baseSpec = [
[1, 'second'],
[2, 'second'],
[5, 'second'],
[10, 'second'],
[30, 'second'],
[1, 'minute'],
[2, 'minute'],
[5, 'minute'],
[10, 'minute'],
[30, 'minute'],
[1, 'hour'],
[2, 'hour'],
[4, 'hour'],
[8, 'hour'],
[12, 'hour'],
[1, 'day'],
[2, 'day'],
[3, 'day'],
[0.25, 'month'],
[0.5, 'month'],
[1, 'month'],
[2, 'month'],
];
// we don't know which variant(s) we'll need yet, but generating both is
// cheap
const specMonths = baseSpec.concat([[3, 'month'], [6, 'month'], [1, 'year']]);
const specQuarters = baseSpec.concat([[1, 'quarter'], [2, 'quarter'], [1, 'year']]);
function init(plot) {
plot.hooks.processOptions.push(function(plot) {
$.each(plot.getAxes(), function(axisName, axis) {
const opts = axis.options;
if (opts.mode == 'time') {
axis.tickGenerator = function(axis) {
const ticks = [];
const d = dateGenerator(axis.min, opts);
let minSize = 0;
// make quarter use a possibility if quarters are
// mentioned in either of these options
const spec =
(opts.tickSize && opts.tickSize[1] === 'quarter') || (opts.minTickSize && opts.minTickSize[1] === 'quarter')
? specQuarters
: specMonths;
if (opts.minTickSize != null) {
if (typeof opts.tickSize == 'number') {
minSize = opts.tickSize;
} else {
minSize = opts.minTickSize[0] * timeUnitSize[opts.minTickSize[1]];
}
}
for (var i = 0; i < spec.length - 1; ++i) {
if (
axis.delta < (spec[i][0] * timeUnitSize[spec[i][1]] + spec[i + 1][0] * timeUnitSize[spec[i + 1][1]]) / 2 &&
spec[i][0] * timeUnitSize[spec[i][1]] >= minSize
) {
break;
}
}
let size = spec[i][0];
let unit = spec[i][1];
// special-case the possibility of several years
if (unit == 'year') {
// if given a minTickSize in years, just use it,
// ensuring that it's an integer
if (opts.minTickSize != null && opts.minTickSize[1] == 'year') {
size = Math.floor(opts.minTickSize[0]);
} else {
const magn = Math.pow(10, Math.floor(Math.log(axis.delta / timeUnitSize.year) / Math.LN10));
const norm = axis.delta / timeUnitSize.year / magn;
if (norm < 1.5) {
size = 1;
} else if (norm < 3) {
size = 2;
} else if (norm < 7.5) {
size = 5;
} else {
size = 10;
}
size *= magn;
}
// minimum size for years is 1
if (size < 1) {
size = 1;
}
}
axis.tickSize = opts.tickSize || [size, unit];
const tickSize = axis.tickSize[0];
unit = axis.tickSize[1];
const step = tickSize * timeUnitSize[unit];
if (unit == 'second') {
d.setSeconds(floorInBase(d.getSeconds(), tickSize));
} else if (unit == 'minute') {
d.setMinutes(floorInBase(d.getMinutes(), tickSize));
} else if (unit == 'hour') {
d.setHours(floorInBase(d.getHours(), tickSize));
} else if (unit == 'month') {
d.setMonth(floorInBase(d.getMonth(), tickSize));
} else if (unit == 'quarter') {
d.setMonth(3 * floorInBase(d.getMonth() / 3, tickSize));
} else if (unit == 'year') {
d.setFullYear(floorInBase(d.getFullYear(), tickSize));
}
// reset smaller components
d.setMilliseconds(0);
if (step >= timeUnitSize.minute) {
d.setSeconds(0);
}
if (step >= timeUnitSize.hour) {
d.setMinutes(0);
}
if (step >= timeUnitSize.day) {
d.setHours(0);
}
if (step >= timeUnitSize.day * 4) {
d.setDate(1);
}
if (step >= timeUnitSize.month * 2) {
d.setMonth(floorInBase(d.getMonth(), 3));
}
if (step >= timeUnitSize.quarter * 2) {
d.setMonth(floorInBase(d.getMonth(), 6));
}
if (step >= timeUnitSize.year) {
d.setMonth(0);
}
let carry = 0;
let v = Number.NaN;
let prev;
do {
prev = v;
v = d.getTime();
ticks.push(v);
if (unit == 'month' || unit == 'quarter') {
if (tickSize < 1) {
// a bit complicated - we'll divide the
// month/quarter up but we need to take
// care of fractions so we don't end up in
// the middle of a day
d.setDate(1);
const start = d.getTime();
d.setMonth(d.getMonth() + (unit == 'quarter' ? 3 : 1));
const end = d.getTime();
d.setTime(v + carry * timeUnitSize.hour + (end - start) * tickSize);
carry = d.getHours();
d.setHours(0);
} else {
d.setMonth(d.getMonth() + tickSize * (unit == 'quarter' ? 3 : 1));
}
} else if (unit == 'year') {
d.setFullYear(d.getFullYear() + tickSize);
} else {
d.setTime(v + step);
}
} while (v < axis.max && v != prev);
return ticks;
};
axis.tickFormatter = function(v, axis) {
const d = dateGenerator(v, axis.options);
// first check global format
if (opts.timeformat != null) {
return formatDate(d, opts.timeformat, opts.monthNames, opts.dayNames);
}
// possibly use quarters if quarters are mentioned in
// any of these places
const useQuarters =
(axis.options.tickSize && axis.options.tickSize[1] == 'quarter') ||
(axis.options.minTickSize && axis.options.minTickSize[1] == 'quarter');
const t = axis.tickSize[0] * timeUnitSize[axis.tickSize[1]];
const span = axis.max - axis.min;
const suffix = opts.twelveHourClock ? ' %p' : '';
const hourCode = opts.twelveHourClock ? '%I' : '%H';
let fmt;
if (t < timeUnitSize.minute) {
fmt = hourCode + ':%M:%S' + suffix;
} else if (t < timeUnitSize.day) {
if (span < 2 * timeUnitSize.day) {
fmt = hourCode + ':%M' + suffix;
} else {
fmt = '%b %d ' + hourCode + ':%M' + suffix;
}
} else if (t < timeUnitSize.month) {
fmt = '%b %d';
} else if ((useQuarters && t < timeUnitSize.quarter) || (!useQuarters && t < timeUnitSize.year)) {
if (span < timeUnitSize.year) {
fmt = '%b';
} else {
fmt = '%b %Y';
}
} else if (useQuarters && t < timeUnitSize.year) {
if (span < timeUnitSize.year) {
fmt = 'Q%q';
} else {
fmt = 'Q%q %Y';
}
} else {
fmt = '%Y';
}
const rt = formatDate(d, fmt, opts.monthNames, opts.dayNames);
return rt;
};
}
});
});
}
$.plot.plugins.push({
init: init,
options: options,
name: 'time',
version: '1.0',
});
// Time-axis support used to be in Flot core, which exposed the
// formatDate function on the plot object. Various plugins depend
// on the function, so we need to re-expose it here.
$.plot.formatDate = formatDate;
})(window.jQuery); | javascript | github | https://github.com/prometheus/prometheus | web/ui/react-app/src/vendor/flot/jquery.flot.time.js |
name: vllm-test
on:
push:
branches:
- main
- release/*
tags:
- ciflow/vllm/*
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
build:
name: vllm-x-pytorch-build
if: github.repository_owner == 'pytorch'
uses: ./.github/workflows/_linux-build.yml
with:
# When building vLLM, uv doesn't like that we rename wheel without changing the wheel metadata
allow-reuse-old-whl: false
build-additional-packages: "vision audio"
build-external-packages: "vllm"
build-environment: linux-jammy-cuda12.9-py3.12-gcc11
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.9-cudnn9-py3.12-gcc11-vllm
cuda-arch-list: '8.0 8.9 9.0 10.0'
runner: linux.24xlarge.memory
test-matrix: |
{ include: [
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_multi_model_processor_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.dgx.b200" },
{ config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
{ config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
{ config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_lora_test", shard: 2, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_lora_test", shard: 3, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_lora_tp_test_distributed", shard: 1, num_shards: 1, runner: "linux.g6.12xlarge.nvidia.gpu"},
{ config: "vllm_distributed_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.12xlarge.nvidia.gpu"}
]}
secrets: inherit
test:
name: vllm-x-pytorch-test
uses: ./.github/workflows/_linux-test.yml
needs: build
with:
build-environment: linux-jammy-cuda12.9-py3.12-gcc11
docker-image: ${{ needs.build.outputs.docker-image }}
test-matrix: ${{ needs.build.outputs.test-matrix }}
secrets: inherit | unknown | github | https://github.com/pytorch/pytorch | .github/workflows/vllm.yml |
/*
* Copyright (C) 2014 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.graph;
import com.google.common.annotations.Beta;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
/**
* A subinterface of {@link Network} which adds mutation methods. When mutation is not required,
* users should prefer the {@link Network} interface.
*
* @author James Sexton
* @author Joshua O'Madadhain
* @param <N> Node parameter type
* @param <E> Edge parameter type
* @since 20.0
*/
@Beta
public interface MutableNetwork<N, E> extends Network<N, E> {
/**
* Adds {@code node} if it is not already present.
*
* <p><b>Nodes must be unique</b>, just as {@code Map} keys must be. They must also be non-null.
*
* @return {@code true} if the network was modified as a result of this call
*/
@CanIgnoreReturnValue
boolean addNode(N node);
/**
* Adds {@code edge} connecting {@code nodeU} to {@code nodeV}.
*
* <p>If the graph is directed, {@code edge} will be directed in this graph; otherwise, it will be
* undirected.
*
* <p><b>{@code edge} must be unique to this graph</b>, just as a {@code Map} key must be. It must
* also be non-null.
*
* <p>If {@code nodeU} and {@code nodeV} are not already present in this graph, this method will
* silently {@link #addNode(Object) add} {@code nodeU} and {@code nodeV} to the graph.
*
* <p>If {@code edge} already connects {@code nodeU} to {@code nodeV} (in the specified order if
* this network {@link #isDirected()}, else in any order), then this method will have no effect.
*
* @return {@code true} if the network was modified as a result of this call
* @throws IllegalArgumentException if {@code edge} already exists in the graph and does not
* connect {@code nodeU} to {@code nodeV}
* @throws IllegalArgumentException if the introduction of the edge would violate {@link
* #allowsParallelEdges()} or {@link #allowsSelfLoops()}
*/
@CanIgnoreReturnValue
boolean addEdge(N nodeU, N nodeV, E edge);
/**
* Adds {@code edge} connecting {@code endpoints}. In an undirected network, {@code edge} will
* also connect {@code nodeV} to {@code nodeU}.
*
* <p>If this graph is directed, {@code edge} will be directed in this graph; if it is undirected,
* {@code edge} will be undirected in this graph.
*
* <p>If this graph is directed, {@code endpoints} must be ordered.
*
* <p><b>{@code edge} must be unique to this graph</b>, just as a {@code Map} key must be. It must
* also be non-null.
*
* <p>If either or both endpoints are not already present in this graph, this method will silently
* {@link #addNode(Object) add} each missing endpoint to the graph.
*
* <p>If {@code edge} already connects an endpoint pair equal to {@code endpoints}, then this
* method will have no effect.
*
* @return {@code true} if the network was modified as a result of this call
* @throws IllegalArgumentException if {@code edge} already exists in the graph and connects some
* other endpoint pair that is not equal to {@code endpoints}
* @throws IllegalArgumentException if the introduction of the edge would violate {@link
* #allowsParallelEdges()} or {@link #allowsSelfLoops()}
* @throws IllegalArgumentException if the endpoints are unordered and the graph is directed
* @since 27.1
*/
@CanIgnoreReturnValue
boolean addEdge(EndpointPair<N> endpoints, E edge);
/**
* Removes {@code node} if it is present; all edges incident to {@code node} will also be removed.
*
* @return {@code true} if the network was modified as a result of this call
*/
@CanIgnoreReturnValue
boolean removeNode(N node);
/**
* Removes {@code edge} from this network, if it is present.
*
* @return {@code true} if the network was modified as a result of this call
*/
@CanIgnoreReturnValue
boolean removeEdge(E edge);
} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/graph/MutableNetwork.java |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
deprecated: >-
Deprecated in 2.3. Use M(ec2_vpc_net) along with supporting modules including
M(ec2_vpc_igw), M(ec2_vpc_route_table), M(ec2_vpc_subnet), M(ec2_vpc_dhcp_options),
M(ec2_vpc_nat_gateway), M(ec2_vpc_nacl).
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. C(10.0.0.0/16), required when I(state=present)."
required: false
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- Toggles the "Enable DNS resolution" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- Toggles the "Enable DNS hostname support for instances" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form C({ cidr: ..., az: ... , resource_tags: ... }).'
- Where C(az) is the desired availability zone of the subnet, optional.
- 'Tags C(resource_tags) use dictionary form C({ "Environment":"Dev", "Tier":"Web", ...}), optional.'
- C(resource_tags) see resource_tags for VPC below. The main difference is subnet tags not specified here will be deleted.
- All VPC subnets not in this list will be removed as well.
- As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
vpc_id:
description:
- A VPC id to terminate when I(state=absent).
required: false
default: null
resource_tags:
description:
- 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }).
- Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC.
required: false
default: "no"
choices: [ "yes", "no" ]
route_tables:
description:
- 'A dictionary array of route tables to add of the form: C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }). This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false
default: null
wait:
description:
- Wait for the VPC to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or terminate the VPC.
required: true
choices: [ "present", "absent" ]
author: "Carson Gee (@carsongee)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
- ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
- ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
- ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
# If you have added elements not managed by this module, e.g. instances, NATs, etc then
# the delete will fail until those dependencies are removed.
'''
import time
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id is None and cidr is None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are different.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {
'gateway_id': None,
'instance_id': None,
'interface_id': None,
'vpc_peering_connection_id': None,
'state': 'active'
}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC connection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is difference between the provided routes and remote routes and if subnet associations are different.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError as e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
subnet_tags_current = True
new_subnet_tags = subnet.get('resource_tags', {})
subnet_tags_delete = []
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
# Check if AWS subnet tags are in playbook subnet tags
existing_tags_subset_of_new_tags = (set(csn.tags.items()).issubset(set(new_subnet_tags.items())))
# Check if subnet tags in playbook are in AWS subnet tags
new_tags_subset_of_existing_tags = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
if existing_tags_subset_of_new_tags is False:
try:
for item in csn.tags.items():
if item not in new_subnet_tags.items():
subnet_tags_delete.append(item)
subnet_tags_delete = [key[0] for key in subnet_tags_delete]
delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e))
# Add new subnet tags if not current
if new_tags_subset_of_existing_tags is False:
try:
changed = True
create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags)
except EC2ResponseError as e:
module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e))
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', {})
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# idempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id is not None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError as e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError as e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
vpc_dict['state'] = "terminated"
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
__all__ = ["test_tutorial"]
import numpy as np
from .. import kernels
from ..gp import GP
from ..basic import BasicSolver
from ..hodlr import HODLRSolver
def test_tutorial():
def model(params, t):
_, _, amp, loc, sig2 = params
return amp * np.exp(-0.5 * (t - loc) ** 2 / sig2)
def lnlike(p, t, y, yerr, solver=BasicSolver):
a, tau = np.exp(p[:2])
gp = GP(a * kernels.Matern32Kernel(tau) + 0.001, solver=solver)
gp.compute(t, yerr)
return gp.lnlikelihood(y - model(p, t))
def lnprior(p):
lna, lntau, amp, loc, sig2 = p
if (-5 < lna < 5 and -5 < lntau < 5 and -10 < amp < 10 and
-5 < loc < 5 and 0 < sig2 < 3):
return 0.0
return -np.inf
def lnprob(p, x, y, yerr, **kwargs):
lp = lnprior(p)
return lp + lnlike(p, x, y, yerr, **kwargs) \
if np.isfinite(lp) else -np.inf
np.random.seed(1234)
x = np.sort(np.random.rand(50))
yerr = 0.05 + 0.01 * np.random.rand(len(x))
y = np.sin(x) + yerr * np.random.randn(len(x))
p = [0, 0, -1.0, 0.1, 0.4]
assert np.isfinite(lnprob(p, x, y, yerr)), "Incorrect result"
assert np.allclose(lnprob(p, x, y, yerr),
lnprob(p, x, y, yerr, solver=HODLRSolver)), \
"Inconsistent results" | unknown | codeparrot/codeparrot-clean | ||
"""
MongoDB/GridFS-level code for the contentstore.
"""
import os
import json
import pymongo
import gridfs
from gridfs.errors import NoFile
from fs.osfs import OSFS
from bson.son import SON
from mongodb_proxy import autoretry_read
from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.content import XASSET_LOCATION_TAG
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.util.misc import escape_invalid_characters
from xmodule.mongo_connection import connect_to_mongodb
from .content import StaticContent, ContentStore, StaticContentStream
class MongoContentStore(ContentStore):
"""
MongoDB-backed ContentStore.
"""
# pylint: disable=unused-argument, bad-continuation
def __init__(
self, host, db,
port=27017, tz_aware=True, user=None, password=None, bucket='fs', collection=None, **kwargs
):
"""
Establish the connection with the mongo backend and connect to the collections
:param collection: ignores but provided for consistency w/ other doc_store_config patterns
"""
# GridFS will throw an exception if the Database is wrapped in a MongoProxy. So don't wrap it.
# The appropriate methods below are marked as autoretry_read - those methods will handle
# the AutoReconnect errors.
proxy = False
mongo_db = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password, proxy=proxy, **kwargs
)
self.fs = gridfs.GridFS(mongo_db, bucket) # pylint: disable=invalid-name
self.fs_files = mongo_db[bucket + ".files"] # the underlying collection GridFS uses
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.fs_files.database.connection.close()
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
self.close_connections()
self.fs_files.database.connection.drop_database(self.fs_files.database)
def save(self, content):
content_id, content_son = self.asset_db_key(content.location)
# The way to version files in gridFS is to not use the file id as the _id but just as the filename.
# Then you can upload as many versions as you like and access by date or version. Because we use
# the location as the _id, we must delete before adding (there's no replace method in gridFS)
self.delete(content_id) # delete is a noop if the entry doesn't exist; so, don't waste time checking
thumbnail_location = content.thumbnail_location.to_deprecated_list_repr() if content.thumbnail_location else None
with self.fs.new_file(_id=content_id, filename=unicode(content.location), content_type=content.content_type,
displayname=content.name, content_son=content_son,
thumbnail_location=thumbnail_location,
import_path=content.import_path,
# getattr b/c caching may mean some pickled instances don't have attr
locked=getattr(content, 'locked', False)) as fp:
if hasattr(content.data, '__iter__'):
for chunk in content.data:
fp.write(chunk)
else:
fp.write(content.data)
return content
def delete(self, location_or_id):
"""
Delete an asset.
"""
if isinstance(location_or_id, AssetKey):
location_or_id, _ = self.asset_db_key(location_or_id)
# Deletes of non-existent files are considered successful
self.fs.delete(location_or_id)
@autoretry_read()
def find(self, location, throw_on_not_found=True, as_stream=False):
content_id, __ = self.asset_db_key(location)
try:
if as_stream:
fp = self.fs.get(content_id)
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContentStream(
location, fp.displayname, fp.content_type, fp, last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
else:
with self.fs.get(content_id) as fp:
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContent(
location, fp.displayname, fp.content_type, fp.read(), last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
except NoFile:
if throw_on_not_found:
raise NotFoundError(content_id)
else:
return None
def export(self, location, output_directory):
content = self.find(location)
filename = content.name
if content.import_path is not None:
output_directory = output_directory + '/' + os.path.dirname(content.import_path)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Escape invalid char from filename.
export_name = escape_invalid_characters(name=filename, invalid_char_list=['/', '\\'])
disk_fs = OSFS(output_directory)
with disk_fs.open(export_name, 'wb') as asset_file:
asset_file.write(content.data)
def export_all_for_course(self, course_key, output_directory, assets_policy_file):
"""
Export all of this course's assets to the output_directory. Export all of the assets'
attributes to the policy file.
Args:
course_key (CourseKey): the :class:`CourseKey` identifying the course
output_directory: the directory under which to put all the asset files
assets_policy_file: the filename for the policy file which should be in the same
directory as the other policy files.
"""
policy = {}
assets, __ = self.get_all_content_for_course(course_key)
for asset in assets:
# TODO: On 6/19/14, I had to put a try/except around this
# to export a course. The course failed on JSON files in
# the /static/ directory placed in it with an import.
#
# If this hasn't been looked at in a while, remove this comment.
#
# When debugging course exports, this might be a good place
# to look. -- pmitros
self.export(asset['asset_key'], output_directory)
for attr, value in asset.iteritems():
if attr not in ['_id', 'md5', 'uploadDate', 'length', 'chunkSize', 'asset_key']:
policy.setdefault(asset['asset_key'].name, {})[attr] = value
with open(assets_policy_file, 'w') as f:
json.dump(policy, f, sort_keys=True, indent=4)
def get_all_content_thumbnails_for_course(self, course_key):
return self._get_all_content_for_course(course_key, get_thumbnails=True)[0]
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
return self._get_all_content_for_course(
course_key, start=start, maxresults=maxresults, get_thumbnails=False, sort=sort, filter_params=filter_params
)
def remove_redundant_content_for_courses(self):
"""
Finds and removes all redundant files (Mac OS metadata files with filename ".DS_Store"
or filename starts with "._") for all courses
"""
assets_to_delete = 0
for prefix in ['_id', 'content_son']:
query = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.category'.format(prefix), 'asset'),
('{}.name'.format(prefix), {'$regex': ASSET_IGNORE_REGEX}),
])
items = self.fs_files.find(query)
assets_to_delete = assets_to_delete + items.count()
for asset in items:
self.fs.delete(asset[prefix])
self.fs_files.remove(query)
return assets_to_delete
@autoretry_read()
def _get_all_content_for_course(self,
course_key,
get_thumbnails=False,
start=0,
maxresults=-1,
sort=None,
filter_params=None):
'''
Returns a list of all static assets for a course. The return format is a list of asset data dictionary elements.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
query = query_for_course(course_key, "asset" if not get_thumbnails else "thumbnail")
find_args = {"sort": sort}
if maxresults > 0:
find_args.update({
"skip": start,
"limit": maxresults,
})
if filter_params:
query.update(filter_params)
items = self.fs_files.find(query, **find_args)
count = items.count()
assets = list(items)
# We're constructing the asset key immediately after retrieval from the database so that
# callers are insulated from knowing how our identifiers are stored.
for asset in assets:
asset_id = asset.get('content_son', asset['_id'])
asset['asset_key'] = course_key.make_asset_key(asset_id['category'], asset_id['name'])
return assets, count
def set_attr(self, asset_key, attr, value=True):
"""
Add/set the given attr on the asset at the given location. Does not allow overwriting gridFS built in
attrs such as _id, md5, uploadDate, length. Value can be any type which pymongo accepts.
Returns nothing
Raises NotFoundError if no such item exists
Raises AttributeError is attr is one of the build in attrs.
:param asset_key: an AssetKey
:param attr: which attribute to set
:param value: the value to set it to (any type pymongo accepts such as datetime, number, string)
"""
self.set_attrs(asset_key, {attr: value})
def get_attr(self, location, attr, default=None):
"""
Get the value of attr set on location. If attr is unset, it returns default. Unlike set, this accessor
does allow getting the value of reserved keywords.
:param location: a c4x asset location
"""
return self.get_attrs(location).get(attr, default)
def set_attrs(self, location, attr_dict):
"""
Like set_attr but sets multiple key value pairs.
Returns nothing.
Raises NotFoundError if no such item exists
Raises AttributeError is attr_dict has any attrs which are one of the build in attrs.
:param location: a c4x asset location
"""
for attr in attr_dict.iterkeys():
if attr in ['_id', 'md5', 'uploadDate', 'length']:
raise AttributeError("{} is a protected attribute.".format(attr))
asset_db_key, __ = self.asset_db_key(location)
# catch upsert error and raise NotFoundError if asset doesn't exist
result = self.fs_files.update({'_id': asset_db_key}, {"$set": attr_dict}, upsert=False)
if not result.get('updatedExisting', True):
raise NotFoundError(asset_db_key)
@autoretry_read()
def get_attrs(self, location):
"""
Gets all of the attributes associated with the given asset. Note, returns even built in attrs
such as md5 which you cannot resubmit in an update; so, don't call set_attrs with the result of this
but only with the set of attrs you want to explicitly update.
The attrs will be a superset of _id, contentType, chunkSize, filename, uploadDate, & md5
:param location: a c4x asset location
"""
asset_db_key, __ = self.asset_db_key(location)
item = self.fs_files.find_one({'_id': asset_db_key})
if item is None:
raise NotFoundError(asset_db_key)
return item
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
See :meth:`.ContentStore.copy_all_course_assets`
This implementation fairly expensively copies all of the data
"""
source_query = query_for_course(source_course_key)
# it'd be great to figure out how to do all of this on the db server and not pull the bits over
for asset in self.fs_files.find(source_query):
asset_key = self.make_id_son(asset)
# don't convert from string until fs access
source_content = self.fs.get(asset_key)
if isinstance(asset_key, basestring):
asset_key = AssetKey.from_string(asset_key)
__, asset_key = self.asset_db_key(asset_key)
asset_key['org'] = dest_course_key.org
asset_key['course'] = dest_course_key.course
if getattr(dest_course_key, 'deprecated', False): # remove the run if exists
if 'run' in asset_key:
del asset_key['run']
asset_id = asset_key
else: # add the run, since it's the last field, we're golden
asset_key['run'] = dest_course_key.run
asset_id = unicode(
dest_course_key.make_asset_key(asset_key['category'], asset_key['name']).for_branch(None)
)
self.fs.put(
source_content.read(),
_id=asset_id, filename=asset['filename'], content_type=asset['contentType'],
displayname=asset['displayname'], content_son=asset_key,
# thumbnail is not technically correct but will be functionally correct as the code
# only looks at the name which is not course relative.
thumbnail_location=asset['thumbnail_location'],
import_path=asset['import_path'],
# getattr b/c caching may mean some pickled instances don't have attr
locked=asset.get('locked', False)
)
def delete_all_course_assets(self, course_key):
"""
Delete all assets identified via this course_key. Dangerous operation which may remove assets
referenced by other runs or other courses.
:param course_key:
"""
course_query = query_for_course(course_key)
matching_assets = self.fs_files.find(course_query)
for asset in matching_assets:
asset_key = self.make_id_son(asset)
self.fs.delete(asset_key)
# codifying the original order which pymongo used for the dicts coming out of location_to_dict
# stability of order is more important than sanity of order as any changes to order make things
# unfindable
ordered_key_fields = ['category', 'name', 'course', 'tag', 'org', 'revision']
@classmethod
def asset_db_key(cls, location):
"""
Returns the database _id and son structured lookup to find the given asset location.
"""
dbkey = SON((field_name, getattr(location, field_name)) for field_name in cls.ordered_key_fields)
if getattr(location, 'deprecated', False):
content_id = dbkey
else:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = location.run
content_id = unicode(location.for_branch(None))
return content_id, dbkey
def make_id_son(self, fs_entry):
"""
Change the _id field in fs_entry into the properly ordered SON or string
Args:
fs_entry: the element returned by self.fs_files.find
"""
_id_field = fs_entry.get('_id', fs_entry)
if isinstance(_id_field, basestring):
return _id_field
dbkey = SON((field_name, _id_field.get(field_name)) for field_name in self.ordered_key_fields)
if 'run' in _id_field:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = _id_field['run']
fs_entry['_id'] = dbkey
return dbkey
def ensure_indexes(self):
# Index needed thru 'category' by `_get_all_content_for_course` and others. That query also takes a sort
# which can be `uploadDate`, `display_name`,
self.fs_files.create_index(
[
('_id.tag', pymongo.ASCENDING),
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.category', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.DESCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('content_son.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('display_name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
self.fs_files.create_index(
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('display_name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
def query_for_course(course_key, category=None):
"""
Construct a SON object that will query for all assets possibly limited to the given type
(thumbnail v assets) in the course using the index in mongo_indexes.md
"""
if getattr(course_key, 'deprecated', False):
prefix = '_id'
else:
prefix = 'content_son'
dbkey = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.org'.format(prefix), course_key.org),
('{}.course'.format(prefix), course_key.course),
])
if category:
dbkey['{}.category'.format(prefix)] = category
if getattr(course_key, 'deprecated', False):
dbkey['{}.run'.format(prefix)] = {'$exists': False}
else:
dbkey['{}.run'.format(prefix)] = course_key.run
return dbkey | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: ssl-no-revoke
Help: Disable cert revocation checks (Schannel)
Added: 7.44.0
Protocols: TLS
Category: tls
Multi: boolean
See-also:
- crlfile
Example:
- --ssl-no-revoke $URL
---
# `--ssl-no-revoke`
(Schannel) Disable certificate revocation checks. WARNING: this option loosens
the SSL security, and by using this flag you ask for exactly that. | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/ssl-no-revoke.md |
#!/bin/sh
#
# Copyright (c) 2012 Mozilla Foundation
#
test_description='diff.context configuration'
. ./test-lib.sh
test_expect_success 'setup' '
cat >template <<-\EOF &&
firstline
b
c
d
e
f
preline
TARGET
postline
i
j
k
l
m
n
EOF
sed "/TARGET/d" >x <template &&
git update-index --add x &&
git commit -m initial &&
sed "s/TARGET/ADDED/" >x <template &&
git update-index --add x &&
git commit -m next &&
sed "s/TARGET/MODIFIED/" >x <template
'
test_expect_success 'the default number of context lines is 3' '
git diff >output &&
test_grep ! "^ d" output &&
test_grep "^ e" output &&
test_grep "^ j" output &&
test_grep ! "^ k" output
'
test_expect_success 'diff.context honored by "log"' '
git log -1 -p >output &&
test_grep ! firstline output &&
test_config diff.context 8 &&
git log -1 -p >output &&
test_grep "^ firstline" output
'
test_expect_success 'The -U option overrides diff.context' '
test_config diff.context 8 &&
git log -U4 -1 >output &&
test_grep ! "^ firstline" output
'
test_expect_success 'diff.context honored by "diff"' '
test_config diff.context 8 &&
git diff >output &&
test_grep "^ firstline" output
'
test_expect_success 'plumbing not affected' '
test_config diff.context 8 &&
git diff-files -p >output &&
test_grep ! "^ firstline" output
'
test_expect_success 'non-integer config parsing' '
test_config diff.context no &&
test_must_fail git diff 2>output &&
test_grep "bad numeric config value" output
'
test_expect_success 'negative integer config parsing' '
test_config diff.context -1 &&
test_must_fail git diff 2>output &&
test_grep "bad config variable" output
'
test_expect_success '-U0 is valid, so is diff.context=0' '
test_config diff.context 0 &&
git diff >output &&
test_grep "^-ADDED" output &&
test_grep "^+MODIFIED" output
'
test_expect_success '-U2147483647 works' '
echo APPENDED >>x &&
test_line_count = 16 x &&
git diff -U2147483647 >output &&
test_line_count = 22 output &&
test_grep "^-ADDED" output &&
test_grep "^+MODIFIED" output &&
test_grep "^+APPENDED" output
'
test_done | unknown | github | https://github.com/git/git | t/t4055-diff-context.sh |
import { execSync } from "child_process";
import * as fs from "fs";
import * as path from "path";
import {
combinePaths,
createGetCanonicalFileName,
getDirectoryPath,
MapLike,
normalizePath,
normalizeSlashes,
sys,
toPath,
version,
} from "../typescript/typescript.js";
import * as ts from "../typescript/typescript.js";
class FileLog implements ts.server.typingsInstaller.Log {
constructor(private logFile: string | undefined) {
}
isEnabled = () => {
return typeof this.logFile === "string";
};
writeLine = (text: string) => {
if (typeof this.logFile !== "string") return;
try {
fs.appendFileSync(this.logFile, `[${ts.server.nowString()}] ${text}${sys.newLine}`);
}
catch {
this.logFile = undefined;
}
};
}
/** Used if `--npmLocation` is not passed. */
function getDefaultNPMLocation(processName: string, validateDefaultNpmLocation: boolean, host: ts.server.InstallTypingHost): string {
if (path.basename(processName).indexOf("node") === 0) {
const npmPath = path.join(path.dirname(process.argv[0]), "npm");
if (!validateDefaultNpmLocation) {
return npmPath;
}
if (host.fileExists(npmPath)) {
return `"${npmPath}"`;
}
}
return "npm";
}
interface TypesRegistryFile {
entries: MapLike<MapLike<string>>;
}
function loadTypesRegistryFile(typesRegistryFilePath: string, host: ts.server.InstallTypingHost, log: ts.server.typingsInstaller.Log): Map<string, MapLike<string>> {
if (!host.fileExists(typesRegistryFilePath)) {
if (log.isEnabled()) {
log.writeLine(`Types registry file '${typesRegistryFilePath}' does not exist`);
}
return new Map<string, MapLike<string>>();
}
try {
const content = JSON.parse(host.readFile(typesRegistryFilePath)!) as TypesRegistryFile;
return new Map(Object.entries(content.entries));
}
catch (e) {
if (log.isEnabled()) {
log.writeLine(`Error when loading types registry file '${typesRegistryFilePath}': ${(e as Error).message}, ${(e as Error).stack}`);
}
return new Map<string, MapLike<string>>();
}
}
const typesRegistryPackageName = "types-registry";
function getTypesRegistryFileLocation(globalTypingsCacheLocation: string): string {
return combinePaths(normalizeSlashes(globalTypingsCacheLocation), `node_modules/${typesRegistryPackageName}/index.json`);
}
interface ExecSyncOptions {
cwd: string;
encoding: "utf-8";
}
class NodeTypingsInstaller extends ts.server.typingsInstaller.TypingsInstaller {
private readonly npmPath: string;
readonly typesRegistry: Map<string, MapLike<string>>;
private delayedInitializationError: ts.server.InitializationFailedResponse | undefined;
constructor(globalTypingsCacheLocation: string, typingSafeListLocation: string, typesMapLocation: string, npmLocation: string | undefined, validateDefaultNpmLocation: boolean, throttleLimit: number, log: ts.server.typingsInstaller.Log) {
const libDirectory = getDirectoryPath(normalizePath(sys.getExecutingFilePath()));
super(
sys,
globalTypingsCacheLocation,
typingSafeListLocation ? toPath(typingSafeListLocation, "", createGetCanonicalFileName(sys.useCaseSensitiveFileNames)) : toPath("typingSafeList.json", libDirectory, createGetCanonicalFileName(sys.useCaseSensitiveFileNames)),
typesMapLocation ? toPath(typesMapLocation, "", createGetCanonicalFileName(sys.useCaseSensitiveFileNames)) : toPath("typesMap.json", libDirectory, createGetCanonicalFileName(sys.useCaseSensitiveFileNames)),
throttleLimit,
log,
);
this.npmPath = npmLocation !== undefined ? npmLocation : getDefaultNPMLocation(process.argv[0], validateDefaultNpmLocation, this.installTypingHost);
// If the NPM path contains spaces and isn't wrapped in quotes, do so.
if (this.npmPath.includes(" ") && this.npmPath[0] !== `"`) {
this.npmPath = `"${this.npmPath}"`;
}
if (this.log.isEnabled()) {
this.log.writeLine(`Process id: ${process.pid}`);
this.log.writeLine(`NPM location: ${this.npmPath} (explicit '${ts.server.Arguments.NpmLocation}' ${npmLocation === undefined ? "not " : ""} provided)`);
this.log.writeLine(`validateDefaultNpmLocation: ${validateDefaultNpmLocation}`);
}
this.ensurePackageDirectoryExists(globalTypingsCacheLocation);
try {
if (this.log.isEnabled()) {
this.log.writeLine(`Updating ${typesRegistryPackageName} npm package...`);
}
this.execSyncAndLog(`${this.npmPath} install --ignore-scripts ${typesRegistryPackageName}@${this.latestDistTag}`, { cwd: globalTypingsCacheLocation });
if (this.log.isEnabled()) {
this.log.writeLine(`Updated ${typesRegistryPackageName} npm package`);
}
}
catch (e) {
if (this.log.isEnabled()) {
this.log.writeLine(`Error updating ${typesRegistryPackageName} package: ${(e as Error).message}`);
}
// store error info to report it later when it is known that server is already listening to events from typings installer
this.delayedInitializationError = {
kind: "event::initializationFailed",
message: (e as Error).message,
stack: (e as Error).stack,
};
}
this.typesRegistry = loadTypesRegistryFile(getTypesRegistryFileLocation(globalTypingsCacheLocation), this.installTypingHost, this.log);
}
override handleRequest(req: ts.server.TypingInstallerRequestUnion): void {
if (this.delayedInitializationError) {
// report initializationFailed error
this.sendResponse(this.delayedInitializationError);
this.delayedInitializationError = undefined;
}
super.handleRequest(req);
}
protected sendResponse(response: ts.server.TypingInstallerResponseUnion): void {
if (this.log.isEnabled()) {
this.log.writeLine(`Sending response:${ts.server.stringifyIndented(response)}`);
}
process.send!(response); // TODO: GH#18217
if (this.log.isEnabled()) {
this.log.writeLine(`Response has been sent.`);
}
}
protected installWorker(requestId: number, packageNames: string[], cwd: string, onRequestCompleted: ts.server.typingsInstaller.RequestCompletedAction): void {
if (this.log.isEnabled()) {
this.log.writeLine(`#${requestId} with cwd: ${cwd} arguments: ${JSON.stringify(packageNames)}`);
}
const start = Date.now();
const hasError = ts.server.typingsInstaller.installNpmPackages(this.npmPath, version, packageNames, command => this.execSyncAndLog(command, { cwd }));
if (this.log.isEnabled()) {
this.log.writeLine(`npm install #${requestId} took: ${Date.now() - start} ms`);
}
onRequestCompleted(!hasError);
}
/** Returns 'true' in case of error. */
private execSyncAndLog(command: string, options: Pick<ExecSyncOptions, "cwd">): boolean {
if (this.log.isEnabled()) {
this.log.writeLine(`Exec: ${command}`);
}
try {
const stdout = execSync(command, { ...options, encoding: "utf-8" });
if (this.log.isEnabled()) {
this.log.writeLine(` Succeeded. stdout:${indent(sys.newLine, stdout)}`);
}
return false;
}
catch (error) {
const { stdout, stderr } = error;
this.log.writeLine(` Failed. stdout:${indent(sys.newLine, stdout)}${sys.newLine} stderr:${indent(sys.newLine, stderr)}`);
return true;
}
}
}
const logFilePath = ts.server.findArgument(ts.server.Arguments.LogFile);
const globalTypingsCacheLocation = ts.server.findArgument(ts.server.Arguments.GlobalCacheLocation);
const typingSafeListLocation = ts.server.findArgument(ts.server.Arguments.TypingSafeListLocation);
const typesMapLocation = ts.server.findArgument(ts.server.Arguments.TypesMapLocation);
const npmLocation = ts.server.findArgument(ts.server.Arguments.NpmLocation);
const validateDefaultNpmLocation = ts.server.hasArgument(ts.server.Arguments.ValidateDefaultNpmLocation);
const log = new FileLog(logFilePath);
if (log.isEnabled()) {
process.on("uncaughtException", (e: Error) => {
log.writeLine(`Unhandled exception: ${e} at ${e.stack}`);
});
}
process.on("disconnect", () => {
if (log.isEnabled()) {
log.writeLine(`Parent process has exited, shutting down...`);
}
process.exit(0);
});
let installer: NodeTypingsInstaller | undefined;
process.on("message", (req: ts.server.TypingInstallerRequestUnion) => {
installer ??= new NodeTypingsInstaller(globalTypingsCacheLocation!, typingSafeListLocation!, typesMapLocation!, npmLocation, validateDefaultNpmLocation, /*throttleLimit*/ 5, log); // TODO: GH#18217
installer.handleRequest(req);
});
function indent(newline: string, str: string | undefined): string {
return str && str.length
? `${newline} ` + str.replace(/\r?\n/, `${newline} `)
: "";
} | typescript | github | https://github.com/microsoft/TypeScript | src/typingsInstaller/nodeTypingsInstaller.ts |
"""
/***************************************************************************
Plugin Installer module
Plugin version comparison functions
-------------------
Date : 2008-11-24
Copyright : (C) 2008 by Borys Jurgiel
Email : info at borysjurgiel dot pl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Here is Python function for comparing version numbers. It's case insensitive
and recognizes all major notations, prefixes (ver. and version), delimiters
(. - and _) and suffixes (alpha, beta, rc, preview and trunk).
Usage: compareVersions(version1, version2)
The function accepts arguments of any type convertible to Unicode string
and returns integer value:
0 - the versions are equal
1 - version 1 is higher
2 - version 2 is higher
-----------------------------------------------------------------------------
HOW DOES IT WORK...
First, both arguments are converted to uppercase Unicode and stripped of
'VERSION' or 'VER.' prefix. Then they are chopped into a list of particular
numeric and alphabetic elements. The dots, dashes and underlines are recognized
as delimiters. Also numbers and non numbers are separated. See example below:
'Ver 0.03-120_rc7foo' is converted to ['0','03','120','RC','7','FOO']
Then every pair of elements, from left to right, is compared as string
or as number to provide the best result (you know, 11>9 but also '03'>'007').
The comparing stops when one of elements is greater. If comparing achieves
the end of the shorter list and the matter is still unresolved, the longer
list is usually recognized as higher, except following suffixes:
ALPHA, BETA, RC, PREVIEW and TRUNK which make the version number lower.
"""
from builtins import str
from builtins import range
from qgis.core import Qgis
import re
# ------------------------------------------------------------------------ #
def normalizeVersion(s):
""" remove possible prefix from given string and convert to uppercase """
prefixes = ['VERSION', 'VER.', 'VER', 'V.', 'V', 'REVISION', 'REV.', 'REV', 'R.', 'R']
if not s:
return str()
s = str(s).upper()
for i in prefixes:
if s[:len(i)] == i:
s = s.replace(i, '')
s = s.strip()
return s
# ------------------------------------------------------------------------ #
def classifyCharacter(c):
""" return 0 for delimiter, 1 for digit and 2 for alphabetic character """
if c in [".", "-", "_", " "]:
return 0
if c.isdigit():
return 1
else:
return 2
# ------------------------------------------------------------------------ #
def chopString(s):
""" convert string to list of numbers and words """
l = [s[0]]
for i in range(1, len(s)):
if classifyCharacter(s[i]) == 0:
pass
elif classifyCharacter(s[i]) == classifyCharacter(s[i - 1]):
l[len(l) - 1] += s[i]
else:
l += [s[i]]
return l
# ------------------------------------------------------------------------ #
def compareElements(s1, s2):
""" compare two particular elements """
# check if the matter is easy solvable:
if s1 == s2:
return 0
# try to compare as numeric values (but only if the first character is not 0):
if s1 and s2 and s1.isnumeric() and s2.isnumeric() and s1[0] != '0' and s2[0] != '0':
if float(s1) == float(s2):
return 0
elif float(s1) > float(s2):
return 1
else:
return 2
# if the strings aren't numeric or start from 0, compare them as a strings:
# but first, set ALPHA < BETA < PREVIEW < RC < TRUNK < [NOTHING] < [ANYTHING_ELSE]
if s1 not in ['ALPHA', 'BETA', 'PREVIEW', 'RC', 'TRUNK']:
s1 = 'Z' + s1
if s2 not in ['ALPHA', 'BETA', 'PREVIEW', 'RC', 'TRUNK']:
s2 = 'Z' + s2
# the final test:
if s1 > s2:
return 1
else:
return 2
# ------------------------------------------------------------------------ #
def compareVersions(a, b):
""" Compare two version numbers. Return 0 if a==b or error, 1 if a>b and 2 if b>a """
if not a or not b:
return 0
a = normalizeVersion(a)
b = normalizeVersion(b)
if a == b:
return 0
# convert the strings to lists
v1 = chopString(a)
v2 = chopString(b)
# set the shorter string as a base
l = len(v1)
if l > len(v2):
l = len(v2)
# try to determine within the common length
for i in range(l):
if compareElements(v1[i], v2[i]):
return compareElements(v1[i], v2[i])
# if the lists are identical till the end of the shorther string, try to compare the odd tail
# with the simple space (because the 'alpha', 'beta', 'preview' and 'rc' are LESS then nothing)
if len(v1) > l:
return compareElements(v1[l], u' ')
if len(v2) > l:
return compareElements(u' ', v2[l])
# if everything else fails...
if a > b:
return 1
else:
return 2
"""
COMPARE CURRENT QGIS VERSION WITH qgisMinimumVersion AND qgisMaximumVersion
ALLOWED FORMATS ARE: major.minor OR major.minor.bugfix, where each segment must be 0..99
"""
def splitVersion(s):
""" split string into 2 or 3 numerical segments """
if not s or type(s) != str:
return None
l = str(s).split('.')
for c in l:
if not c.isnumeric():
return None
if int(c) > 99:
return None
if len(l) not in [2, 3]:
return None
return l
def isCompatible(curVer, minVer, maxVer):
""" Compare current QGIS version with qgisMinVersion and qgisMaxVersion """
if not minVer or not curVer or not maxVer:
return False
minVer = splitVersion(re.sub(r'[^0-9.]+', '', minVer))
maxVer = splitVersion(re.sub(r'[^0-9.]+', '', maxVer))
curVer = splitVersion(re.sub(r'[^0-9.]+', '', curVer))
if not minVer or not curVer or not maxVer:
return False
if len(minVer) < 3:
minVer += ["0"]
if len(curVer) < 3:
curVer += ["0"]
if len(maxVer) < 3:
maxVer += ["99"]
minVer = "{:04n}{:04n}{:04n}".format(int(minVer[0]), int(minVer[1]), int(minVer[2]))
maxVer = "{:04n}{:04n}{:04n}".format(int(maxVer[0]), int(maxVer[1]), int(maxVer[2]))
curVer = "{:04n}{:04n}{:04n}".format(int(curVer[0]), int(curVer[1]), int(curVer[2]))
return (minVer <= curVer and maxVer >= curVer)
def pyQgisVersion():
""" Return current QGIS version number as X.Y.Z for testing plugin compatibility.
If Y = 99, bump up to (X+1.0.0), so e.g. 2.99 becomes 3.0.0
This way QGIS X.99 is only compatible with plugins for the upcoming major release.
"""
x, y, z = re.findall(r'^(\d*).(\d*).(\d*)', Qgis.QGIS_VERSION)[0]
if y == '99':
x = str(int(x) + 1)
y = z = '0'
return '{}.{}.{}'.format(x, y, z) | unknown | codeparrot/codeparrot-clean | ||
base_suite: replica_sets_jscore_passthrough_base
overrides:
- "multiversion.replica_sets_temporarily_disable_due_to_fcv_upgrade"
- "multiversion.replica_fixture_last_continuous_new_new_old" | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/mappings/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v34.multiple_stats_cloudwatch.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "red",
"name": "CloudWatch Annotation Single Statistic",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-123456"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Maximum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Maximum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "green",
"name": "CloudWatch Annotation Empty Statistics",
"legacyOptions": {
"dimensions": {
"LoadBalancer": "my-lb"
},
"namespace": "AWS/ApplicationELB",
"prefixMatching": false,
"region": "us-west-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - InvalidStat",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "InvalidStat"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - null",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - 123",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": 123
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "purple",
"name": "Non-CloudWatch Annotation"
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Minimum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Minimum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Sum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Sum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - Sum",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Sum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - null",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - Average",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - Average",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - ",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": ""
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - true",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": true
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - [object Object]",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": {}
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "CloudWatch Single Query Multiple Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Minimum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "CloudWatch Query Missing Editor Fields",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-missing-fields"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "CloudWatch Query with Expression (Code Mode)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-with-expression"
},
"expression": "SEARCH('{AWS/EC2,InstanceId} MetricName=\"CPUUtilization\"', 'Average', 300)",
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-with-expression"
},
"expression": "SEARCH('{AWS/EC2,InstanceId} MetricName=\"CPUUtilization\"', 'Average', 300)",
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "CloudWatch Insights Query Missing Editor Mode",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-insights"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "CloudWatch Query with Null Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": ""
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-14": {
"kind": "Panel",
"spec": {
"id": 14,
"title": "CloudWatch Query with Only Invalid Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": 123
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": true
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": {}
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": []
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-15": {
"kind": "Panel",
"spec": {
"id": 15,
"title": "Non-CloudWatch Panel",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"expr": "cpu_usage"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "CloudWatch Single Query Single Statistic",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"LoadBalancer": "my-load-balancer"
},
"metricEditorMode": 0,
"metricName": "RequestCount",
"metricQueryType": 0,
"namespace": "AWS/ApplicationELB",
"region": "us-west-2",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "CloudWatch Query No Statistics Array",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"DBInstanceIdentifier": "my-db"
},
"metricEditorMode": 0,
"metricName": "DatabaseConnections",
"metricQueryType": 0,
"namespace": "AWS/RDS",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Mixed CloudWatch and Non-CloudWatch Queries",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"QueueName": "my-queue"
},
"metricEditorMode": 0,
"metricName": "ApproximateNumberOfMessages",
"metricQueryType": 0,
"namespace": "AWS/SQS",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"expr": "up"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"TopicName": "my-topic"
},
"metricEditorMode": 0,
"metricName": "NumberOfMessagesPublished",
"metricQueryType": 0,
"namespace": "AWS/SNS",
"region": "us-west-1",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"QueueName": "my-queue"
},
"metricEditorMode": 0,
"metricName": "ApproximateNumberOfMessages",
"metricQueryType": 0,
"namespace": "AWS/SQS",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "CloudWatch Query Empty Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"BucketName": "my-bucket"
},
"metricEditorMode": 0,
"metricName": "BucketSizeBytes",
"metricQueryType": 0,
"namespace": "AWS/S3",
"region": "us-east-1"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "CloudWatch Query Invalid Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "InvalidStat"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": ""
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "E",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Nested CloudWatch Query Multiple Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "CloudWatch Query with Existing Editor Mode",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"ClusterName": "my-cluster"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/ECS",
"period": "300",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"ClusterName": "my-cluster"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/ECS",
"period": "300",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Collapsed Row with CloudWatch",
"collapse": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-14"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-15"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "CloudWatch Multiple Statistics Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v34.multiple_stats_cloudwatch.v42.v2alpha1.json |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
print "DJANGO_ROOT" + DJANGO_ROOT
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
#DEBUG = False
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
#TEMPLATE_DEBUG = DEBUG
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'db.sqlite3')),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
#SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = 'y3s*z3x_r*^u*2rdkdapcain-ys1rb255pvtfpbi3#0o0l91=k'
#SECRET_KEY = r"{{ secret_key }}"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import copy
from lxml import etree, html
from openerp import SUPERUSER_ID, api
from openerp.addons.website.models import website
from openerp.http import request
from openerp.osv import osv, fields
class view(osv.osv):
_inherit = "ir.ui.view"
_columns = {
'page': fields.boolean("Whether this view is a web page template (complete)"),
'website_meta_title': fields.char("Website meta title", size=70, translate=True),
'website_meta_description': fields.text("Website meta description", size=160, translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
'customize_show': fields.boolean("Show As Optional Inherit"),
}
_defaults = {
'page': False,
'customize_show': False,
}
def _view_obj(self, cr, uid, view_id, context=None):
if isinstance(view_id, basestring):
return self.pool['ir.model.data'].xmlid_to_object(
cr, uid, view_id, raise_if_not_found=True, context=context
)
elif isinstance(view_id, (int, long)):
return self.browse(cr, uid, view_id, context=context)
# assume it's already a view object (WTF?)
return view_id
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
def _views_get(self, cr, uid, view_id, options=True, context=None, root=True):
""" For a given view ``view_id``, should return:
* the view itself
* all views inheriting from it, enabled or not
- but not the optional children of a non-enabled child
* all views called from it (via t-call)
"""
try:
view = self._view_obj(cr, uid, view_id, context=context)
except ValueError:
# Shall we log that ?
return []
while root and view.inherit_id:
view = view.inherit_id
result = [view]
node = etree.fromstring(view.arch)
for child in node.xpath("//t[@t-call]"):
try:
called_view = self._view_obj(cr, uid, child.get('t-call'), context=context)
except ValueError:
continue
if called_view not in result:
result += self._views_get(cr, uid, called_view, options=options, context=context)
extensions = view.inherit_children_ids
if not options:
# only active children
extensions = (v for v in view.inherit_children_ids if v.active)
# Keep options in a deterministic order regardless of their applicability
for extension in sorted(extensions, key=lambda v: v.id):
for r in self._views_get(
cr, uid, extension,
# only return optional grandchildren if this child is enabled
options=extension.active,
context=context, root=False):
if r not in result:
result.append(r)
return result
def extract_embedded_fields(self, cr, uid, arch, context=None):
return arch.xpath('//*[@data-oe-model != "ir.ui.view"]')
def save_embedded_field(self, cr, uid, el, context=None):
Model = self.pool[el.get('data-oe-model')]
field = el.get('data-oe-field')
converter = self.pool['website.qweb'].get_converter_for(el.get('data-oe-type'))
value = converter.from_html(cr, uid, Model, Model._fields[field], el)
if value is not None:
# TODO: batch writes?
Model.write(cr, uid, [int(el.get('data-oe-id'))], {
field: value
}, context=context)
def to_field_ref(self, cr, uid, el, context=None):
# filter out meta-information inserted in the document
attributes = dict((k, v) for k, v in el.items()
if not k.startswith('data-oe-'))
attributes['t-field'] = el.get('data-oe-expression')
out = html.html_parser.makeelement(el.tag, attrib=attributes)
out.tail = el.tail
return out
def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None):
# the root of the arch section shouldn't actually be replaced as it's
# not really editable itself, only the content truly is editable.
[view] = self.browse(cr, uid, [view_id], context=context)
arch = etree.fromstring(view.arch.encode('utf-8'))
# => get the replacement root
if not section_xpath:
root = arch
else:
# ensure there's only one match
[root] = arch.xpath(section_xpath)
root.text = replacement.text
root.tail = replacement.tail
# replace all children
del root[:]
for child in replacement:
root.append(copy.deepcopy(child))
return arch
@api.cr_uid_ids_context
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if request and getattr(request, 'website_enabled', False):
engine='website.qweb'
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
if not context:
context = {}
company = self.pool['res.company'].browse(cr, SUPERUSER_ID, request.website.company_id.id, context=context)
qcontext = dict(
context.copy(),
website=request.website,
url_for=website.url_for,
slug=website.slug,
res_company=company,
user_id=self.pool.get("res.users").browse(cr, uid, uid),
translatable=context.get('lang') != request.website.default_lang_code,
editable=request.website.is_publisher(),
menu_data=self.pool['ir.ui.menu'].load_menus_root(cr, uid, context=context) if request.website.is_user() else None,
)
# add some values
if values:
qcontext.update(values)
# in edit mode ir.ui.view will tag nodes
if not qcontext.get('rendering_bundle'):
if qcontext.get('editable'):
context = dict(context, inherit_branding=True)
elif request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher'):
context = dict(context, inherit_branding_auto=True)
view_obj = request.website.get_template(id_or_xml_id)
if 'main_object' not in qcontext:
qcontext['main_object'] = view_obj
values = qcontext
return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context)
def _pretty_arch(self, arch):
# remove_blank_string does not seem to work on HTMLParser, and
# pretty-printing with lxml more or less requires stripping
# whitespace: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output
# so serialize to XML, parse as XML (remove whitespace) then serialize
# as XML (pretty print)
arch_no_whitespace = etree.fromstring(
etree.tostring(arch, encoding='utf-8'),
parser=etree.XMLParser(encoding='utf-8', remove_blank_text=True))
return etree.tostring(
arch_no_whitespace, encoding='unicode', pretty_print=True)
def save(self, cr, uid, res_id, value, xpath=None, context=None):
""" Update a view section. The view section may embed fields to write
:param str model:
:param int res_id:
:param str xpath: valid xpath to the tag to replace
"""
res_id = int(res_id)
arch_section = html.fromstring(
value, parser=html.HTMLParser(encoding='utf-8'))
if xpath is None:
# value is an embedded field on its own, not a view section
self.save_embedded_field(cr, uid, arch_section, context=context)
return
for el in self.extract_embedded_fields(cr, uid, arch_section, context=context):
self.save_embedded_field(cr, uid, el, context=context)
# transform embedded field back to t-field
el.getparent().replace(el, self.to_field_ref(cr, uid, el, context=context))
arch = self.replace_arch_section(cr, uid, res_id, xpath, arch_section, context=context)
self.write(cr, uid, res_id, {
'arch': self._pretty_arch(arch)
}, context=context)
view = self.browse(cr, SUPERUSER_ID, res_id, context=context)
if view.model_data_id:
view.model_data_id.write({'noupdate': True})
def customize_template_get(self, cr, uid, xml_id, full=False, bundles=False , context=None):
""" Get inherit view's informations of the template ``key``. By default, only
returns ``customize_show`` templates (which can be active or not), if
``full=True`` returns inherit view's informations of the template ``key``.
``bundles=True`` returns also the asset bundles
"""
imd = request.registry['ir.model.data']
view_model, view_theme_id = imd.get_object_reference(cr, uid, 'website', 'theme')
user = request.registry['res.users'].browse(cr, uid, uid, context)
user_groups = set(user.groups_id)
views = self._views_get(cr, uid, xml_id, context=dict(context or {}, active_test=False))
done = set()
result = []
for v in views:
if not user_groups.issuperset(v.groups_id):
continue
if full or (v.customize_show and v.inherit_id.id != view_theme_id):
if v.inherit_id not in done:
result.append({
'name': v.inherit_id.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': True,
'active': False
})
done.add(v.inherit_id)
result.append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.active,
})
return result
def get_view_translations(self, cr, uid, xml_id, lang, field=['id', 'res_id', 'value', 'state', 'gengo_translation'], context=None):
views = self.customize_template_get(cr, uid, xml_id, full=True, context=context)
views_ids = [view.get('id') for view in views if view.get('active')]
domain = [('type', '=', 'view'), ('res_id', 'in', views_ids), ('lang', '=', lang)]
irt = request.registry.get('ir.translation')
return irt.search_read(cr, uid, domain, field, context=context) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP encoder: internal header.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_ENC_VP8I_ENC_H_
#define WEBP_ENC_VP8I_ENC_H_
#include <string.h> // for memcpy()
#include "src/dec/common_dec.h"
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/utils/bit_writer_utils.h"
#include "src/utils/thread_utils.h"
#include "src/utils/utils.h"
#include "src/webp/encode.h"
#include "src/webp/types.h"
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// Various defines and enums
// version numbers
#define ENC_MAJ_VERSION 1
#define ENC_MIN_VERSION 6
#define ENC_REV_VERSION 0
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
MAX_LEVEL = 2047 // max level (note: max codable is 2047 + 67)
};
typedef enum { // Rate-distortion optimization levels
RD_OPT_NONE = 0, // no rd-opt
RD_OPT_BASIC = 1, // basic scoring (no trellis)
RD_OPT_TRELLIS = 2, // perform trellis-quant on the final decision only
RD_OPT_TRELLIS_ALL = 3 // trellis-quant for every scoring (much slower)
} VP8RDLevel;
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
// The original or reconstructed samples can be accessed using VP8Scan[].
// The predicted blocks can be accessed using offsets to 'yuv_p' and
// the arrays VP8*ModeOffsets[].
// * YUV Samples area ('yuv_in'/'yuv_out'/'yuv_out2')
// (see VP8Scan[] for accessing the blocks, along with
// Y_OFF_ENC/U_OFF_ENC/V_OFF_ENC):
// +----+----+
// Y_OFF_ENC |YYYY|UUVV|
// U_OFF_ENC |YYYY|UUVV|
// V_OFF_ENC |YYYY|....| <- 25% wasted U/V area
// |YYYY|....|
// +----+----+
// * Prediction area ('yuv_p', size = PRED_SIZE_ENC)
// Intra16 predictions (16x16 block each, two per row):
// |I16DC16|I16TM16|
// |I16VE16|I16HE16|
// Chroma U/V predictions (16x8 block each, two per row):
// |C8DC8|C8TM8|
// |C8VE8|C8HE8|
// Intra 4x4 predictions (4x4 block each)
// |I4DC4 I4TM4 I4VE4 I4HE4|I4RD4 I4VR4 I4LD4 I4VL4|
// |I4HD4 I4HU4 I4TMP .....|.......................| <- ~31% wasted
#define YUV_SIZE_ENC (BPS * 16)
#define PRED_SIZE_ENC (32 * BPS + 16 * BPS + 8 * BPS) // I16+Chroma+I4 preds
#define Y_OFF_ENC (0)
#define U_OFF_ENC (16)
#define V_OFF_ENC (16 + 8)
extern const uint16_t VP8Scan[16];
extern const uint16_t VP8UVModeOffsets[4];
extern const uint16_t VP8I16ModeOffsets[4];
// Layout of prediction blocks
// intra 16x16
#define I16DC16 (0 * 16 * BPS)
#define I16TM16 (I16DC16 + 16)
#define I16VE16 (1 * 16 * BPS)
#define I16HE16 (I16VE16 + 16)
// chroma 8x8, two U/V blocks side by side (hence: 16x8 each)
#define C8DC8 (2 * 16 * BPS)
#define C8TM8 (C8DC8 + 1 * 16)
#define C8VE8 (2 * 16 * BPS + 8 * BPS)
#define C8HE8 (C8VE8 + 1 * 16)
// intra 4x4
#define I4DC4 (3 * 16 * BPS + 0)
#define I4TM4 (I4DC4 + 4)
#define I4VE4 (I4DC4 + 8)
#define I4HE4 (I4DC4 + 12)
#define I4RD4 (I4DC4 + 16)
#define I4VR4 (I4DC4 + 20)
#define I4LD4 (I4DC4 + 24)
#define I4VL4 (I4DC4 + 28)
#define I4HD4 (3 * 16 * BPS + 4 * BPS)
#define I4HU4 (I4HD4 + 4)
#define I4TMP (I4HD4 + 8)
typedef int64_t score_t; // type used for scores, rate, distortion
// Note that MAX_COST is not the maximum allowed by sizeof(score_t),
// in order to allow overflowing computations.
#define MAX_COST ((score_t)0x7fffffffffffffLL)
#define QFIX 17
#define BIAS(b) ((b) << (QFIX - 8))
// Fun fact: this is the _only_ line where we're actually being lossy and
// discarding bits.
static WEBP_INLINE int QUANTDIV(uint32_t n, uint32_t iQ, uint32_t B) {
return (int)((n * iQ + B) >> QFIX);
}
// Uncomment the following to remove token-buffer code:
// #define DISABLE_TOKEN_BUFFER
// quality below which error-diffusion is enabled
#define ERROR_DIFFUSION_QUALITY 98
//------------------------------------------------------------------------------
// Headers
typedef uint32_t proba_t; // 16b + 16b
typedef uint8_t ProbaArray[NUM_CTX][NUM_PROBAS];
typedef proba_t StatsArray[NUM_CTX][NUM_PROBAS];
typedef uint16_t CostArray[NUM_CTX][MAX_VARIABLE_LEVEL + 1];
typedef const uint16_t* (*CostArrayPtr)[NUM_CTX]; // for easy casting
typedef const uint16_t* CostArrayMap[16][NUM_CTX];
typedef double LFStats[NUM_MB_SEGMENTS][MAX_LF_LEVELS]; // filter stats
typedef struct VP8Encoder VP8Encoder;
// segment features
typedef struct {
int num_segments; // Actual number of segments. 1 segment only = unused.
int update_map; // whether to update the segment map or not.
// must be 0 if there's only 1 segment.
int size; // bit-cost for transmitting the segment map
} VP8EncSegmentHeader;
// Struct collecting all frame-persistent probabilities.
typedef struct {
uint8_t segments[3]; // probabilities for segment tree
uint8_t skip_proba; // final probability of being skipped.
ProbaArray coeffs[NUM_TYPES][NUM_BANDS]; // 1056 bytes
StatsArray stats[NUM_TYPES][NUM_BANDS]; // 4224 bytes
CostArray level_cost[NUM_TYPES][NUM_BANDS]; // 13056 bytes
CostArrayMap remapped_costs[NUM_TYPES]; // 1536 bytes
int dirty; // if true, need to call VP8CalculateLevelCosts()
int use_skip_proba; // Note: we always use skip_proba for now.
int nb_skip; // number of skipped blocks
} VP8EncProba;
// Filter parameters. Not actually used in the code (we don't perform
// the in-loop filtering), but filled from user's config
typedef struct {
int simple; // filtering type: 0=complex, 1=simple
int level; // base filter level [0..63]
int sharpness; // [0..7]
int i4x4_lf_delta; // delta filter level for i4x4 relative to i16x16
} VP8EncFilterHeader;
//------------------------------------------------------------------------------
// Informations about the macroblocks.
typedef struct {
// block type
unsigned int type:2; // 0=i4x4, 1=i16x16
unsigned int uv_mode:2;
unsigned int skip:1;
unsigned int segment:2;
uint8_t alpha; // quantization-susceptibility
} VP8MBInfo;
typedef struct VP8Matrix {
uint16_t q[16]; // quantizer steps
uint16_t iq[16]; // reciprocals, fixed point.
uint32_t bias[16]; // rounding bias
uint32_t zthresh[16]; // value below which a coefficient is zeroed
uint16_t sharpen[16]; // frequency boosters for slight sharpening
} VP8Matrix;
typedef struct {
VP8Matrix y1, y2, uv; // quantization matrices
int alpha; // quant-susceptibility, range [-127,127]. Zero is neutral.
// Lower values indicate a lower risk of blurriness.
int beta; // filter-susceptibility, range [0,255].
int quant; // final segment quantizer.
int fstrength; // final in-loop filtering strength
int max_edge; // max edge delta (for filtering strength)
int min_disto; // minimum distortion required to trigger filtering record
// reactivities
int lambda_i16, lambda_i4, lambda_uv;
int lambda_mode, lambda_trellis, tlambda;
int lambda_trellis_i16, lambda_trellis_i4, lambda_trellis_uv;
// lambda values for distortion-based evaluation
score_t i4_penalty; // penalty for using Intra4
} VP8SegmentInfo;
typedef int8_t DError[2 /* u/v */][2 /* top or left */];
// Handy transient struct to accumulate score and info during RD-optimization
// and mode evaluation.
typedef struct {
score_t D, SD; // Distortion, spectral distortion
score_t H, R, score; // header bits, rate, score.
int16_t y_dc_levels[16]; // Quantized levels for luma-DC, luma-AC, chroma.
int16_t y_ac_levels[16][16];
int16_t uv_levels[4 + 4][16];
int mode_i16; // mode number for intra16 prediction
uint8_t modes_i4[16]; // mode numbers for intra4 predictions
int mode_uv; // mode number of chroma prediction
uint32_t nz; // non-zero blocks
int8_t derr[2][3]; // DC diffusion errors for U/V for blocks #1/2/3
} VP8ModeScore;
// Iterator structure to iterate through macroblocks, pointing to the
// right neighbouring data (samples, predictions, contexts, ...)
typedef struct {
int x, y; // current macroblock
uint8_t* yuv_in; // input samples
uint8_t* yuv_out; // output samples
uint8_t* yuv_out2; // secondary buffer swapped with yuv_out.
uint8_t* yuv_p; // scratch buffer for prediction
VP8Encoder* enc; // back-pointer
VP8MBInfo* mb; // current macroblock
VP8BitWriter* bw; // current bit-writer
uint8_t* preds; // intra mode predictors (4x4 blocks)
uint32_t* nz; // non-zero pattern
#if WEBP_AARCH64 && BPS == 32
uint8_t i4_boundary[40]; // 32+8 boundary samples needed by intra4x4
#else
uint8_t i4_boundary[37]; // 32+5 boundary samples needed by intra4x4
#endif
uint8_t* i4_top; // pointer to the current top boundary sample
int i4; // current intra4x4 mode being tested
int top_nz[9]; // top-non-zero context.
int left_nz[9]; // left-non-zero. left_nz[8] is independent.
uint64_t bit_count[4][3]; // bit counters for coded levels.
uint64_t luma_bits; // macroblock bit-cost for luma
uint64_t uv_bits; // macroblock bit-cost for chroma
LFStats* lf_stats; // filter stats (borrowed from enc)
int do_trellis; // if true, perform extra level optimisation
int count_down; // number of mb still to be processed
int count_down0; // starting counter value (for progress)
int percent0; // saved initial progress percent
DError left_derr; // left error diffusion (u/v)
DError* top_derr; // top diffusion error - NULL if disabled
uint8_t* y_left; // left luma samples (addressable from index -1 to 15).
uint8_t* u_left; // left u samples (addressable from index -1 to 7)
uint8_t* v_left; // left v samples (addressable from index -1 to 7)
uint8_t* y_top; // top luma samples at position 'x'
uint8_t* uv_top; // top u/v samples at position 'x', packed as 16 bytes
// memory for storing y/u/v_left
uint8_t yuv_left_mem[17 + 16 + 16 + 8 + WEBP_ALIGN_CST];
// memory for yuv*
uint8_t yuv_mem[3 * YUV_SIZE_ENC + PRED_SIZE_ENC + WEBP_ALIGN_CST];
} VP8EncIterator;
// in iterator.c
// must be called first
void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it);
// reset iterator position to row 'y'
void VP8IteratorSetRow(VP8EncIterator* const it, int y);
// set count down (=number of iterations to go)
void VP8IteratorSetCountDown(VP8EncIterator* const it, int count_down);
// return true if iteration is finished
int VP8IteratorIsDone(const VP8EncIterator* const it);
// Import uncompressed samples from source.
// If tmp_32 is not NULL, import boundary samples too.
// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory.
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32);
// export decimated samples
void VP8IteratorExport(const VP8EncIterator* const it);
// go to next macroblock. Returns false if not finished.
int VP8IteratorNext(VP8EncIterator* const it);
// save the 'yuv_out' boundary values to 'top'/'left' arrays for next
// iterations.
void VP8IteratorSaveBoundary(VP8EncIterator* const it);
// Report progression based on macroblock rows. Return 0 for user-abort request.
int VP8IteratorProgress(const VP8EncIterator* const it, int delta);
// Intra4x4 iterations
void VP8IteratorStartI4(VP8EncIterator* const it);
// returns true if not done.
int VP8IteratorRotateI4(VP8EncIterator* const it,
const uint8_t* const yuv_out);
// Non-zero context setup/teardown
void VP8IteratorNzToBytes(VP8EncIterator* const it);
void VP8IteratorBytesToNz(VP8EncIterator* const it);
// Helper functions to set mode properties
void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode);
void VP8SetIntra4Mode(const VP8EncIterator* const it, const uint8_t* modes);
void VP8SetIntraUVMode(const VP8EncIterator* const it, int mode);
void VP8SetSkip(const VP8EncIterator* const it, int skip);
void VP8SetSegment(const VP8EncIterator* const it, int segment);
//------------------------------------------------------------------------------
// Paginated token buffer
typedef struct VP8Tokens VP8Tokens; // struct details in token.c
typedef struct {
#if !defined(DISABLE_TOKEN_BUFFER)
VP8Tokens* pages; // first page
VP8Tokens** last_page; // last page
uint16_t* tokens; // set to (*last_page)->tokens
int left; // how many free tokens left before the page is full
int page_size; // number of tokens per page
#endif
int error; // true in case of malloc error
} VP8TBuffer;
// initialize an empty buffer
void VP8TBufferInit(VP8TBuffer* const b, int page_size);
void VP8TBufferClear(VP8TBuffer* const b); // de-allocate pages memory
#if !defined(DISABLE_TOKEN_BUFFER)
// Finalizes bitstream when probabilities are known.
// Deletes the allocated token memory if final_pass is true.
int VP8EmitTokens(VP8TBuffer* const b, VP8BitWriter* const bw,
const uint8_t* const probas, int final_pass);
// record the coding of coefficients without knowing the probabilities yet
int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res,
VP8TBuffer* const tokens);
// Estimate the final coded size given a set of 'probas'.
size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas);
#endif // !DISABLE_TOKEN_BUFFER
//------------------------------------------------------------------------------
// VP8Encoder
struct VP8Encoder {
const WebPConfig* config; // user configuration and parameters
WebPPicture* pic; // input / output picture
// headers
VP8EncFilterHeader filter_hdr; // filtering information
VP8EncSegmentHeader segment_hdr; // segment information
int profile; // VP8's profile, deduced from Config.
// dimension, in macroblock units.
int mb_w, mb_h;
int preds_w; // stride of the *preds prediction plane (=4*mb_w + 1)
// number of partitions (1, 2, 4 or 8 = MAX_NUM_PARTITIONS)
int num_parts;
// per-partition boolean decoders.
VP8BitWriter bw; // part0
VP8BitWriter parts[MAX_NUM_PARTITIONS]; // token partitions
VP8TBuffer tokens; // token buffer
int percent; // for progress
// transparency blob
int has_alpha;
uint8_t* alpha_data; // non-NULL if transparency is present
uint32_t alpha_data_size;
WebPWorker alpha_worker;
// quantization info (one set of DC/AC dequant factor per segment)
VP8SegmentInfo dqm[NUM_MB_SEGMENTS];
int base_quant; // nominal quantizer value. Only used
// for relative coding of segments' quant.
int alpha; // global susceptibility (<=> complexity)
int uv_alpha; // U/V quantization susceptibility
// global offset of quantizers, shared by all segments
int dq_y1_dc;
int dq_y2_dc, dq_y2_ac;
int dq_uv_dc, dq_uv_ac;
// probabilities and statistics
VP8EncProba proba;
uint64_t sse[4]; // sum of Y/U/V/A squared errors for all macroblocks
uint64_t sse_count; // pixel count for the sse[] stats
int coded_size;
int residual_bytes[3][4];
int block_count[3];
// quality/speed settings
int method; // 0=fastest, 6=best/slowest.
VP8RDLevel rd_opt_level; // Deduced from method.
int max_i4_header_bits; // partition #0 safeness factor
int mb_header_limit; // rough limit for header bits per MB
int thread_level; // derived from config->thread_level
int do_search; // derived from config->target_XXX
int use_tokens; // if true, use token buffer
// Memory
VP8MBInfo* mb_info; // contextual macroblock infos (mb_w + 1)
uint8_t* preds; // predictions modes: (4*mb_w+1) * (4*mb_h+1)
uint32_t* nz; // non-zero bit context: mb_w+1
uint8_t* y_top; // top luma samples.
uint8_t* uv_top; // top u/v samples.
// U and V are packed into 16 bytes (8 U + 8 V)
LFStats* lf_stats; // autofilter stats (if NULL, autofilter is off)
DError* top_derr; // diffusion error (NULL if disabled)
};
//------------------------------------------------------------------------------
// internal functions. Not public.
// in tree.c
extern const uint8_t VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS];
extern const uint8_t
VP8CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS];
// Reset the token probabilities to their initial (default) values
void VP8DefaultProbas(VP8Encoder* const enc);
// Write the token probabilities
void VP8WriteProbas(VP8BitWriter* const bw, const VP8EncProba* const probas);
// Writes the partition #0 modes (that is: all intra modes)
void VP8CodeIntraModes(VP8Encoder* const enc);
// in syntax.c
// Generates the final bitstream by coding the partition0 and headers,
// and appending an assembly of all the pre-coded token partitions.
// Return true if everything is ok.
int VP8EncWrite(VP8Encoder* const enc);
// Release memory allocated for bit-writing in VP8EncLoop & seq.
void VP8EncFreeBitWriters(VP8Encoder* const enc);
// in frame.c
extern const uint8_t VP8Cat3[];
extern const uint8_t VP8Cat4[];
extern const uint8_t VP8Cat5[];
extern const uint8_t VP8Cat6[];
// Form all the four Intra16x16 predictions in the 'yuv_p' cache
void VP8MakeLuma16Preds(const VP8EncIterator* const it);
// Form all the four Chroma8x8 predictions in the 'yuv_p' cache
void VP8MakeChroma8Preds(const VP8EncIterator* const it);
// Rate calculation
int VP8GetCostLuma16(VP8EncIterator* const it, const VP8ModeScore* const rd);
int VP8GetCostLuma4(VP8EncIterator* const it, const int16_t levels[16]);
int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd);
// Main coding calls
int VP8EncLoop(VP8Encoder* const enc);
int VP8EncTokenLoop(VP8Encoder* const enc);
// in webpenc.c
// Assign an error code to a picture. Return false for convenience.
int WebPEncodingSetError(const WebPPicture* const pic, WebPEncodingError error);
int WebPReportProgress(const WebPPicture* const pic,
int percent, int* const percent_store);
// in analysis.c
// Main analysis loop. Decides the segmentations and complexity.
// Assigns a first guess for Intra16 and 'uvmode' prediction modes.
int VP8EncAnalyze(VP8Encoder* const enc);
// in quant.c
// Sets up segment's quantization values, 'base_quant' and filter strengths.
void VP8SetSegmentParams(VP8Encoder* const enc, float quality);
// Pick best modes and fills the levels. Returns true if skipped.
int VP8Decimate(VP8EncIterator* WEBP_RESTRICT const it,
VP8ModeScore* WEBP_RESTRICT const rd,
VP8RDLevel rd_opt);
// in alpha.c
void VP8EncInitAlpha(VP8Encoder* const enc); // initialize alpha compression
int VP8EncStartAlpha(VP8Encoder* const enc); // start alpha coding process
int VP8EncFinishAlpha(VP8Encoder* const enc); // finalize compressed data
int VP8EncDeleteAlpha(VP8Encoder* const enc); // delete compressed data
// autofilter
void VP8InitFilter(VP8EncIterator* const it);
void VP8StoreFilterStats(VP8EncIterator* const it);
void VP8AdjustFilterStrength(VP8EncIterator* const it);
// returns the approximate filtering strength needed to smooth a edge
// step of 'delta', given a sharpness parameter 'sharpness'.
int VP8FilterStrengthFromDelta(int sharpness, int delta);
// misc utils for picture_*.c:
// Returns true if 'picture' is non-NULL and dimensions/colorspace are within
// their valid ranges. If returning false, the 'error_code' in 'picture' is
// updated.
int WebPValidatePicture(const WebPPicture* const picture);
// Remove reference to the ARGB/YUVA buffer (doesn't free anything).
void WebPPictureResetBuffers(WebPPicture* const picture);
// Allocates ARGB buffer according to set width/height (previous one is
// always free'd). Preserves the YUV(A) buffer. Returns false in case of error
// (invalid param, out-of-memory).
int WebPPictureAllocARGB(WebPPicture* const picture);
// Allocates YUVA buffer according to set width/height (previous one is always
// free'd). Uses picture->csp to determine whether an alpha buffer is needed.
// Preserves the ARGB buffer.
// Returns false in case of error (invalid param, out-of-memory).
int WebPPictureAllocYUVA(WebPPicture* const picture);
// Replace samples that are fully transparent by 'color' to help compressibility
// (no guarantee, though). Assumes pic->use_argb is true.
void WebPReplaceTransparentPixels(WebPPicture* const pic, uint32_t color);
//------------------------------------------------------------------------------
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_ENC_VP8I_ENC_H_ | c | github | https://github.com/opencv/opencv | 3rdparty/libwebp/src/enc/vp8i_enc.h |
from __future__ import unicode_literals
import re
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2
from urllib import urlencode
from django.contrib.admin import AdminSite
from django.contrib.admin.options import InlineModelAdmin
from django.contrib.sites.models import Site
from django.core import mail
from django.core.urlresolvers import reverse
from django.db import models
from django.forms import Textarea
from django.forms.models import modelform_factory
from django.templatetags.static import static
from django.test.utils import override_settings
from django.utils.html import strip_tags
from django.utils.unittest import skipUnless
from zhiliao.conf import settings
from .admin import BaseDynamicInlineAdmin
from .fields import RichTextField
from .managers import DisplayableManager
from .models import (CONTENT_STATUS_DRAFT,
CONTENT_STATUS_PUBLISHED)
from zhiliao.forms.admin import FieldAdmin
from zhiliao.forms.models import Form
from zhiliao.pages.models import RichTextPage
from zhiliao.utils.importing import import_dotted_path
from zhiliao.utils.tests import (TestCase, run_pyflakes_for_package,
run_pep8_for_package)
from zhiliao.utils.html import TagCloser
class CoreTests(TestCase):
def test_tagcloser(self):
"""
Test tags are closed, and tags that shouldn't be closed aren't.
"""
self.assertEqual(TagCloser("<p>Unclosed paragraph").html,
"<p>Unclosed paragraph</p>")
self.assertEqual(TagCloser("Line break<br>").html,
"Line break<br>")
@skipUnless("zhiliao.mobile" in settings.INSTALLED_APPS and
"zhiliao.pages" in settings.INSTALLED_APPS,
"mobile and pages apps required")
def test_device_specific_template(self):
"""
Test that an alternate template is rendered when a mobile
device is used.
"""
ua = settings.DEVICE_USER_AGENTS[0][1][0]
kwargs = {"slug": "device-test"}
url = reverse("page", kwargs=kwargs)
kwargs["status"] = CONTENT_STATUS_PUBLISHED
RichTextPage.objects.get_or_create(**kwargs)
default = self.client.get(url)
mobile = self.client.get(url, HTTP_USER_AGENT=ua)
self.assertNotEqual(default.template_name[0], mobile.template_name[0])
def test_syntax(self):
"""
Run pyflakes/pep8 across the code base to check for potential errors.
"""
warnings = []
warnings.extend(run_pyflakes_for_package("zhiliao"))
warnings.extend(run_pep8_for_package("zhiliao"))
if warnings:
self.fail("Syntax warnings!\n\n%s" % "\n".join(warnings))
def test_utils(self):
"""
Miscellanous tests for the ``mezzanine.utils`` package.
"""
self.assertRaises(ImportError, import_dotted_path, "zhiliao")
self.assertRaises(ImportError, import_dotted_path, "zhiliao.NO")
self.assertRaises(ImportError, import_dotted_path, "zhiliao.core.NO")
try:
import_dotted_path("zhiliao.core")
except ImportError:
self.fail("zhiliao.utils.imports.import_dotted_path"
"could not import \"zhiliao.core\"")
@skipUnless("zhiliao.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_description(self):
"""
Test generated description is text version of the first line
of content.
"""
description = "<p>How now brown cow</p>"
page = RichTextPage.objects.create(title="Draft",
content=description * 3)
self.assertEqual(page.description, strip_tags(description))
@skipUnless("zhiliao.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_draft(self):
"""
Test a draft object as only being viewable by a staff member.
"""
self.client.logout()
draft = RichTextPage.objects.create(title="Draft",
status=CONTENT_STATUS_DRAFT)
response = self.client.get(draft.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 404)
self.client.login(username=self._username, password=self._password)
response = self.client.get(draft.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
def test_searchable_manager_search_fields(self):
"""
Test that SearchableManager can get appropriate params.
"""
manager = DisplayableManager()
self.assertFalse(manager._search_fields)
manager = DisplayableManager(search_fields={'foo': 10})
self.assertTrue(manager._search_fields)
@skipUnless("zhiliao.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_search(self):
"""
Objects with status "Draft" should not be within search results.
"""
RichTextPage.objects.all().delete()
published = {"status": CONTENT_STATUS_PUBLISHED}
first = RichTextPage.objects.create(title="test page",
status=CONTENT_STATUS_DRAFT).id
second = RichTextPage.objects.create(title="test another test page",
**published).id
# Draft shouldn't be a result.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 1)
RichTextPage.objects.filter(id=first).update(**published)
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
# Either word.
results = RichTextPage.objects.search("another test")
self.assertEqual(len(results), 2)
# Must include first word.
results = RichTextPage.objects.search("+another test")
self.assertEqual(len(results), 1)
# Mustn't include first word.
results = RichTextPage.objects.search("-another test")
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, first)
# Exact phrase.
results = RichTextPage.objects.search('"another test"')
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, second)
# Test ordering.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
if results:
self.assertEqual(results[0].id, second)
# Test the actual search view.
response = self.client.get(reverse("search") + "?q=test")
self.assertEqual(response.status_code, 200)
def _create_page(self, title, status):
return RichTextPage.objects.create(title=title, status=status)
def _test_site_pages(self, title, status, count):
# test _default_manager
pages = RichTextPage._default_manager.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test objects manager
pages = RichTextPage.objects.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test response status code
code = 200 if status == CONTENT_STATUS_PUBLISHED else 404
pages = RichTextPage.objects.filter(status=status)
response = self.client.get(pages[0].get_absolute_url(), follow=True)
self.assertEqual(response.status_code, code)
@skipUnless("zhiliao.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_multisite(self):
from django.conf import settings
# setup
try:
old_site_id = settings.SITE_ID
except:
old_site_id = None
site1 = Site.objects.create(domain="site1.com")
site2 = Site.objects.create(domain="site2.com")
# create pages under site1, which should be only accessible
# when SITE_ID is site1
settings.SITE_ID = site1.pk
site1_page = self._create_page("Site1", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# create pages under site2, which should only be accessible
# when SITE_ID is site2
settings.SITE_ID = site2.pk
self._create_page("Site2", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# original page should 404
response = self.client.get(site1_page.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 404)
# change back to site1, and only the site1 pages should be retrieved
settings.SITE_ID = site1.pk
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site1 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# change back to site2, and only the site2 pages should be retrieved
settings.SITE_ID = site2.pk
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site2 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# tear down
if old_site_id:
settings.SITE_ID = old_site_id
else:
del settings.SITE_ID
site1.delete()
site2.delete()
def _static_proxy(self, querystring):
self.client.login(username=self._username, password=self._password)
proxy_url = '%s?%s' % (reverse('static_proxy'), querystring)
response = self.client.get(proxy_url)
self.assertEqual(response.status_code, 200)
@override_settings(STATIC_URL='/static/')
def test_static_proxy(self):
querystring = urlencode([('u', static("test/image.jpg"))])
self._static_proxy(querystring)
@override_settings(STATIC_URL='http://testserver/static/')
def test_static_proxy_with_host(self):
querystring = urlencode(
[('u', static("test/image.jpg"))])
self._static_proxy(querystring)
@override_settings(STATIC_URL='http://testserver:8000/static/')
def test_static_proxy_with_static_url_with_full_host(self):
from django.templatetags.static import static
querystring = urlencode([('u', static("test/image.jpg"))])
self._static_proxy(querystring)
def _get_csrftoken(self, response):
csrf = re.findall(
b'\<input type\=\'hidden\' name\=\'csrfmiddlewaretoken\' '
b'value\=\'([^"\']+)\' \/\>',
response.content
)
self.assertEqual(len(csrf), 1, 'No csrfmiddlewaretoken found!')
return csrf[0]
def _get_formurl(self, response):
action = re.findall(
b'\<form action\=\"([^\"]*)\" method\=\"post\"\>',
response.content
)
self.assertEqual(len(action), 1, 'No form with action found!')
if action[0] == b'':
action = response.request['PATH_INFO']
return action
@skipUnless('zhiliao.pages' in settings.INSTALLED_APPS,
'pages app required')
@override_settings(LANGUAGE_CODE="en")
def test_password_reset(self):
"""
Test sending of password-reset mails and evaluation of the links.
"""
self.client.logout()
del mail.outbox[:]
# Go to admin-login, search for reset-link
response = self.client.get('/admin/', follow=True)
self.assertContains(response, u'Forgot password?')
url = re.findall(
b'\<a href\=["\']([^\'"]+)["\']\>Forgot password\?\<\/a\>',
response.content
)
self.assertEqual(len(url), 1)
url = url[0]
# Go to reset-page, submit form
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
csrf = self._get_csrftoken(response)
url = self._get_formurl(response)
response = self.client.post(url, {
'csrfmiddlewaretoken': csrf,
'email': self._emailaddress
})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
# Get reset-link, submit form
url = re.findall(
r'http://example.com((?:/\w{2,3})?/reset/[^/]+/[^/]+/)',
mail.outbox[0].body
)[0]
response = self.client.get(url)
csrf = self._get_csrftoken(response)
url = self._get_formurl(response)
response = self.client.post(url, {
'csrfmiddlewaretoken': csrf,
'new_password1': 'newdefault',
'new_password2': 'newdefault'
}, follow=True)
self.assertEqual(response.status_code, 200)
def test_richtext_widget(self):
"""
Test that the RichTextField gets its widget type correctly from
settings, and is able to be overridden in a form's Meta.
"""
class RichTextModel(models.Model):
text_default = RichTextField()
text_overridden = RichTextField()
form_class = modelform_factory(
RichTextModel,
fields=('text_default', 'text_overridden'),
widgets={'text_overridden': Textarea})
form = form_class()
richtext_widget = import_dotted_path(settings.RICHTEXT_WIDGET_CLASS)
self.assertIsInstance(form.fields['text_default'].widget,
richtext_widget)
self.assertIsInstance(form.fields['text_overridden'].widget,
Textarea)
def test_admin_sites_dropdown(self):
"""
Ensures the site selection dropdown appears in the admin.
"""
self.client.login(username=self._username, password=self._password)
response = self.client.get('/admin/', follow=True)
set_site_url = reverse("set_site")
# Set site URL shouldn't appear without multiple sites.
self.assertNotContains(response, set_site_url)
site1 = Site.objects.create(domain="test-site-dropdown1.com",
name="test-site-dropdown1.com")
site2 = Site.objects.create(domain="test-site-dropdown2.com",
name="test-site-dropdown2.com")
response = self.client.get('/admin/', follow=True)
self.assertContains(response, set_site_url)
self.assertContains(response, site1.name)
self.assertContains(response, site2.name)
site1.delete()
site2.delete()
def test_dynamic_inline_admins(self):
"""
Verifies that ``BaseDynamicInlineAdmin`` properly adds the ``_order``
field for admins of ``Orderable`` subclasses.
"""
request = self._request_factory.get('/admin/')
request.user = self._user
field_admin = FieldAdmin(Form, AdminSite())
fieldsets = field_admin.get_fieldsets(request)
self.assertEqual(fieldsets[0][1]['fields'][-1], '_order')
fields = field_admin.get_fields(request)
self.assertEqual(fields[-1], '_order')
def test_dynamic_inline_admins_fields_tuple(self):
"""
Checks if moving the ``_order`` field works with immutable sequences.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
# Any model would work since we're only instantiating the class and
# not actually using it.
model = RichTextPage
fields = ('a', '_order', 'b')
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fields = inline.get_fieldsets(request)[0][1]['fields']
self.assertSequenceEqual(fields, ('a', 'b', '_order'))
def test_dynamic_inline_admins_fields_without_order(self):
"""
Checks that ``_order`` field will be added if ``fields`` are listed
without it.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
model = RichTextPage
fields = ('a', 'b')
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fields = inline.get_fieldsets(request)[0][1]['fields']
self.assertSequenceEqual(fields, ('a', 'b', '_order'))
def test_dynamic_inline_admins_fieldsets(self):
"""
Tests if ``_order`` is moved to the end of the last fieldsets fields.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
model = RichTextPage
fieldsets = (("Fieldset 1", {'fields': ('a',)}),
("Fieldset 2", {'fields': ('_order', 'b')}),
("Fieldset 3", {'fields': ('c')}))
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fieldsets = inline.get_fieldsets(request)
self.assertEqual(fieldsets[-1][1]["fields"][-1], '_order')
self.assertNotIn('_order', fieldsets[1][1]["fields"])
@skipUnless("zhiliao.pages" in settings.INSTALLED_APPS,
"pages app required")
class SiteRelatedTestCase(TestCase):
def test_update_site(self):
from django.conf import settings
from zhiliao.utils.sites import current_site_id
# setup
try:
old_site_id = settings.SITE_ID
except:
old_site_id = None
site1 = Site.objects.create(domain="site1.com")
site2 = Site.objects.create(domain="site2.com")
# default behaviour, page gets assigned current site
settings.SITE_ID = site2.pk
self.assertEqual(settings.SITE_ID, current_site_id())
page = RichTextPage()
page.save()
self.assertEqual(page.site_id, site2.pk)
# Subsequent saves do not update site to current site
page.site = site1
page.save()
self.assertEqual(page.site_id, site1.pk)
# resave w/ update_site=True, page gets assigned current site
settings.SITE_ID = site1.pk
page.site = site2
page.save(update_site=True)
self.assertEqual(page.site_id, site1.pk)
# resave w/ update_site=False, page does not update site
settings.SITE_ID = site2.pk
page.save(update_site=False)
self.assertEqual(page.site_id, site1.pk)
# When update_site=True, new page gets assigned current site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save(update_site=True)
self.assertEqual(page.site_id, site2.pk)
# When update_site=False, new page keeps current site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save(update_site=False)
self.assertEqual(page.site_id, site1.pk)
# When site explicitly assigned, new page keeps assigned site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save()
self.assertEqual(page.site_id, site1.pk)
# tear down
if old_site_id:
settings.SITE_ID = old_site_id
else:
del settings.SITE_ID
site1.delete()
site2.delete() | unknown | codeparrot/codeparrot-clean | ||
import numpy.testing as npt
import numpy as np
import nose
from scipy import stats
DECIMAL_meanvar = 0#1 # was 0
distdiscrete = [
['bernoulli',(0.3,)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)], #0.5
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['hypergeom',(21,3,12)], #numpy.random (3,18,12) numpy ticket:921
['hypergeom',(21,18,11)], #numpy.random (18,3,11) numpy ticket:921
['logser', (0.6,)], # reenabled, numpy ticket:921
['nbinom', (5, 0.5)],
['nbinom', (0.4, 0.4)], #from tickets: 583
['planck', (0.51,)], #4.1
['poisson', (0.6,)],
['randint', (7, 31)],
['skellam', (15, 8)]]
# ['zipf', (4,)] ] # arg=4 is ok,
# Zipf broken for arg = 2, e.g. weird .stats
# looking closer, mean, var should be inf for arg=2
#@npt.dec.slow
def test_discrete_basic():
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
#assert stats.dlaplace.rvs(0.8) is not None
np.random.seed(9765456)
rvs = distfn.rvs(size=2000,*arg)
supp = np.unique(rvs)
m,v = distfn.stats(*arg)
#yield npt.assert_almost_equal(rvs.mean(), m, decimal=4,err_msg='mean')
#yield npt.assert_almost_equal, rvs.mean(), m, 2, 'mean' # does not work
yield check_sample_meanvar, rvs.mean(), m, distname + ' sample mean test'
yield check_sample_meanvar, rvs.var(), v, distname + ' sample var test'
yield check_cdf_ppf, distfn, arg, distname + ' cdf_ppf'
yield check_cdf_ppf2, distfn, arg, supp, distname + ' cdf_ppf'
yield check_pmf_cdf, distfn, arg, distname + ' pmf_cdf'
# zipf doesn't fail, but generates floating point warnings.
# Should be checked.
if not distname in ['zipf']:
yield check_oth, distfn, arg, distname + ' oth'
skurt = stats.kurtosis(rvs)
sskew = stats.skew(rvs)
yield check_sample_skew_kurt, distfn, arg, skurt, sskew, \
distname + ' skew_kurt'
# dlaplace doesn't fail, but generates lots of floating point warnings.
# Should be checked.
if not distname in ['dlaplace']: #['logser']: #known failure, fixed
alpha = 0.01
yield check_discrete_chisquare, distfn, arg, rvs, alpha, \
distname + ' chisquare'
@npt.dec.slow
def test_discrete_extra():
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
yield check_ppf_limits, distfn, arg, distname + \
' ppf limit test'
yield check_isf_limits, distfn, arg, distname + \
' isf limit test'
yield check_entropy, distfn, arg, distname + \
' entropy nan test'
@npt.dec.skipif(True)
def test_discrete_private():
#testing private methods mostly for debugging
# some tests might fail by design,
# e.g. incorrect definition of distfn.a and distfn.b
for distname, arg in distdiscrete:
distfn = getattr(stats,distname)
rvs = distfn.rvs(size=10000,*arg)
m,v = distfn.stats(*arg)
yield check_ppf_ppf, distfn, arg
yield check_cdf_ppf_private, distfn, arg, distname
yield check_generic_moment, distfn, arg, m, 1, 3 # last is decimal
yield check_generic_moment, distfn, arg, v+m*m, 2, 3 # last is decimal
yield check_moment_frozen, distfn, arg, m, 1, 3 # last is decimal
yield check_moment_frozen, distfn, arg, v+m*m, 2, 3 # last is decimal
def check_sample_meanvar(sm,m,msg):
if not np.isinf(m):
npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg=msg + \
' - finite moment')
else:
npt.assert_(sm > 10000, msg='infinite moment, sm = ' + str(sm))
def check_sample_var(sm,m,msg):
npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg= msg + 'var')
def check_cdf_ppf(distfn,arg,msg):
ppf05 = distfn.ppf(0.5,*arg)
cdf05 = distfn.cdf(ppf05,*arg)
npt.assert_almost_equal(distfn.ppf(cdf05-1e-6,*arg),ppf05,
err_msg=msg + 'ppf-cdf-median')
npt.assert_((distfn.ppf(cdf05+1e-4,*arg)>ppf05), msg + 'ppf-cdf-next')
def check_cdf_ppf2(distfn,arg,supp,msg):
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg),*arg),
supp, msg + '-roundtrip')
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg)-1e-8,*arg),
supp, msg + '-roundtrip')
# -1e-8 could cause an error if pmf < 1e-8
def check_cdf_ppf_private(distfn,arg,msg):
ppf05 = distfn._ppf(0.5,*arg)
cdf05 = distfn.cdf(ppf05,*arg)
npt.assert_almost_equal(distfn._ppf(cdf05-1e-6,*arg),ppf05,
err_msg=msg + '_ppf-cdf-median ')
npt.assert_((distfn._ppf(cdf05+1e-4,*arg)>ppf05), msg + '_ppf-cdf-next')
def check_ppf_ppf(distfn, arg):
npt.assert_(distfn.ppf(0.5,*arg) < np.inf)
ppfs = distfn.ppf([0.5,0.9],*arg)
ppf_s = [distfn._ppf(0.5,*arg), distfn._ppf(0.9,*arg)]
npt.assert_(np.all(ppfs < np.inf))
npt.assert_(ppf_s[0] == distfn.ppf(0.5,*arg))
npt.assert_(ppf_s[1] == distfn.ppf(0.9,*arg))
npt.assert_(ppf_s[0] == ppfs[0])
npt.assert_(ppf_s[1] == ppfs[1])
def check_pmf_cdf(distfn, arg, msg):
startind = np.int(distfn._ppf(0.01,*arg)-1)
index = range(startind,startind+10)
cdfs = distfn.cdf(index,*arg)
npt.assert_almost_equal(cdfs, distfn.pmf(index, *arg).cumsum() + \
cdfs[0] - distfn.pmf(index[0],*arg),
decimal=4, err_msg= msg + 'pmf-cdf')
def check_generic_moment(distfn, arg, m, k, decim):
npt.assert_almost_equal(distfn.generic_moment(k,*arg), m, decimal=decim,
err_msg= str(distfn) + ' generic moment test')
def check_moment_frozen(distfn, arg, m, k, decim):
npt.assert_almost_equal(distfn(*arg).moment(k), m, decimal=decim,
err_msg= str(distfn) + ' frozen moment test')
def check_oth(distfn, arg, msg):
#checking other methods of distfn
meanint = round(float(distfn.stats(*arg)[0])) # closest integer to mean
npt.assert_almost_equal(distfn.sf(meanint, *arg), 1 - \
distfn.cdf(meanint, *arg), decimal=8)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
npt.assert_equal(distfn.isf(0.5, *arg), distfn.ppf(0.5, *arg))
#next 3 functions copied from test_continous_extra
# adjusted
def check_ppf_limits(distfn,arg,msg):
below,low,upp,above = distfn.ppf([-1,0,1,2], *arg)
#print distfn.name, distfn.a, low, distfn.b, upp
#print distfn.name,below,low,upp,above
assert_equal_inf_nan(distfn.a-1,low, msg + 'ppf lower bound')
assert_equal_inf_nan(distfn.b,upp, msg + 'ppf upper bound')
npt.assert_(np.isnan(below), msg + 'ppf out of bounds - below')
npt.assert_(np.isnan(above), msg + 'ppf out of bounds - above')
def check_isf_limits(distfn,arg,msg):
below,low,upp,above = distfn.isf([-1,0,1,2], *arg)
#print distfn.name, distfn.a, low, distfn.b, upp
#print distfn.name,below,low,upp,above
assert_equal_inf_nan(distfn.a-1,upp, msg + 'isf lower bound')
assert_equal_inf_nan(distfn.b,low, msg + 'isf upper bound')
npt.assert_(np.isnan(below), msg + 'isf out of bounds - below')
npt.assert_(np.isnan(above), msg + 'isf out of bounds - above')
def assert_equal_inf_nan(v1,v2,msg):
npt.assert_(not np.isnan(v1))
if not np.isinf(v1):
npt.assert_almost_equal(v1, v2, decimal=10, err_msg = msg + \
' - finite')
else:
npt.assert_(np.isinf(v2) or np.isnan(v2),
msg + ' - infinite, v2=%s' % str(v2))
def check_sample_skew_kurt(distfn, arg, sk, ss, msg):
k,s = distfn.stats(moment='ks',*arg)
check_sample_meanvar, sk, k, msg + 'sample skew test'
check_sample_meanvar, ss, s, msg + 'sample kurtosis test'
def check_entropy(distfn,arg,msg):
ent = distfn.entropy(*arg)
#print 'Entropy =', ent
npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
'''perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
uses global variable debug for printing results
'''
# define parameters for test
## n=2000
n = len(rvs)
nsupp = 20
wsupp = 1.0/nsupp
## distfn = getattr(stats, distname)
## np.random.seed(9765456)
## rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = xrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
npt.assert_(pval > alpha, 'chisquare - test for %s'
' at arg = %s with pval = %s' % (msg,str(arg),str(pval)))
if __name__ == "__main__":
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False) | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"bufio"
"errors"
"os"
)
var errDirectIOUnsupported = errors.New("direct IO is unsupported")
type BufWriter interface {
Write([]byte) (int, error)
Flush() error
Reset(f *os.File) error
}
// writer is a specialized wrapper around bufio.Writer.
// It is used when Direct IO isn't enabled, as using directIOWriter in such cases is impractical.
type writer struct {
*bufio.Writer
}
func (b *writer) Reset(f *os.File) error {
b.Writer.Reset(f)
return nil
} | go | github | https://github.com/prometheus/prometheus | tsdb/fileutil/direct_io.go |
# Functions that should behave the same as Numeric and need changing
import numpy as np
import numpy.core.multiarray as mu
import numpy.core.numeric as nn
from typeconv import convtypecode, convtypecode2
__all__ = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue',
'cumsum', 'cumproduct', 'compress', 'fromfunction',
'ones', 'empty', 'identity', 'zeros', 'array', 'asarray',
'nonzero', 'reshape', 'arange', 'fromstring', 'ravel', 'trace',
'indices', 'where','sarray','cross_product', 'argmax', 'argmin',
'average']
def take(a, indicies, axis=0):
return np.take(a, indicies, axis)
def repeat(a, repeats, axis=0):
return np.repeat(a, repeats, axis)
def sum(x, axis=0):
return np.sum(x, axis)
def product(x, axis=0):
return np.product(x, axis)
def sometrue(x, axis=0):
return np.sometrue(x, axis)
def alltrue(x, axis=0):
return np.alltrue(x, axis)
def cumsum(x, axis=0):
return np.cumsum(x, axis)
def cumproduct(x, axis=0):
return np.cumproduct(x, axis)
def argmax(x, axis=-1):
return np.argmax(x, axis)
def argmin(x, axis=-1):
return np.argmin(x, axis)
def compress(condition, m, axis=-1):
return np.compress(condition, m, axis)
def fromfunction(args, dimensions):
return np.fromfunction(args, dimensions, dtype=int)
def ones(shape, typecode='l', savespace=0, dtype=None):
"""ones(shape, dtype=int) returns an array of the given
dimensions which is initialized to all ones.
"""
dtype = convtypecode(typecode,dtype)
a = mu.empty(shape, dtype)
a.fill(1)
return a
def zeros(shape, typecode='l', savespace=0, dtype=None):
"""zeros(shape, dtype=int) returns an array of the given
dimensions which is initialized to all zeros
"""
dtype = convtypecode(typecode,dtype)
return mu.zeros(shape, dtype)
def identity(n,typecode='l', dtype=None):
"""identity(n) returns the identity 2-d array of shape n x n.
"""
dtype = convtypecode(typecode, dtype)
return nn.identity(n, dtype)
def empty(shape, typecode='l', dtype=None):
dtype = convtypecode(typecode, dtype)
return mu.empty(shape, dtype)
def array(sequence, typecode=None, copy=1, savespace=0, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(sequence, dtype, copy=copy)
def sarray(a, typecode=None, copy=False, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(a, dtype, copy)
def asarray(a, typecode=None, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(a, dtype, copy=0)
def nonzero(a):
res = np.nonzero(a)
if len(res) == 1:
return res[0]
else:
raise ValueError, "Input argument must be 1d"
def reshape(a, shape):
return np.reshape(a, shape)
def arange(start, stop=None, step=1, typecode=None, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.arange(start, stop, step, dtype)
def fromstring(string, typecode='l', count=-1, dtype=None):
dtype = convtypecode(typecode, dtype)
return mu.fromstring(string, dtype, count=count)
def ravel(m):
return np.ravel(m)
def trace(a, offset=0, axis1=0, axis2=1):
return np.trace(a, offset=0, axis1=0, axis2=1)
def indices(dimensions, typecode=None, dtype=None):
dtype = convtypecode(typecode, dtype)
return np.indices(dimensions, dtype)
def where(condition, x, y):
return np.where(condition, x, y)
def cross_product(a, b, axis1=-1, axis2=-1):
return np.cross(a, b, axis1, axis2)
def average(a, axis=0, weights=None, returned=False):
return np.average(a, axis, weights, returned) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate java DescriptorProto file.
Usage:
protobuf_lite_java_descriptor_proto.py {protoc} {java_out} {include} {proto_files}
This is a helper file for the protobuf_lite_java_gen_descriptor_proto action in
protobuf.gyp.
It performs the following steps:
1. Recursively deletes old java_out directory.
2. Creates java_out directory.
3. Generates Java descriptor proto file using protoc.
"""
import os
import shutil
import subprocess
import sys
def main(argv):
if len(argv) < 4:
usage()
return 1
protoc_path, java_out, include = argv[1:4]
proto_files = argv[4:]
# Delete all old sources
if os.path.exists(java_out):
shutil.rmtree(java_out)
# Create source directory
os.makedirs(java_out)
# Generate Java files using protoc
return subprocess.call(
[protoc_path, '--java_out', java_out, '-I' + include]
+ proto_files)
def usage():
print(__doc__);
if __name__ == '__main__':
sys.exit(main(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_do_extra_handshake")
def web_socket_transfer_data(request):
pass
# vi:sts=4 sw=4 et | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/brcm,bcm6358-gpio-sysctl.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom BCM6358 GPIO System Controller
maintainers:
- Álvaro Fernández Rojas <noltari@gmail.com>
- Jonas Gorski <jonas.gorski@gmail.com>
description:
Broadcom BCM6358 SoC GPIO system controller which provides a register map
for controlling the GPIO and pins of the SoC.
properties:
"#address-cells": true
"#size-cells": true
compatible:
items:
- const: brcm,bcm6358-gpio-sysctl
- const: syscon
- const: simple-mfd
ranges:
maxItems: 1
reg:
maxItems: 1
patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
$ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
Documentation/devicetree/bindings/gpio/brcm,bcm63xx-gpio.yaml.
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
$ref: /schemas/pinctrl/brcm,bcm6358-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml.
required:
- "#address-cells"
- compatible
- ranges
- reg
- "#size-cells"
additionalProperties: false
examples:
- |
syscon@fffe0080 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "brcm,bcm6358-gpio-sysctl", "syscon", "simple-mfd";
reg = <0xfffe0080 0x80>;
ranges = <0 0xfffe0080 0x80>;
gpio@0 {
compatible = "brcm,bcm6358-gpio";
reg-names = "dirout", "dat";
reg = <0x0 0x8>, <0x8 0x8>;
gpio-controller;
gpio-ranges = <&pinctrl 0 0 40>;
#gpio-cells = <2>;
};
pinctrl: pinctrl@18 {
compatible = "brcm,bcm6358-pinctrl";
reg = <0x18 0x4>;
pinctrl_ebi_cs: ebi_cs-pins {
function = "ebi_cs";
groups = "ebi_cs_grp";
};
pinctrl_uart1: uart1-pins {
function = "uart1";
groups = "uart1_grp";
};
pinctrl_serial_led: serial_led-pins {
function = "serial_led";
groups = "serial_led_grp";
};
pinctrl_legacy_led: legacy_led-pins {
function = "legacy_led";
groups = "legacy_led_grp";
};
pinctrl_led: led-pins {
function = "led";
groups = "led_grp";
};
pinctrl_spi_cs_23: spi_cs-pins {
function = "spi_cs";
groups = "spi_cs_grp";
};
pinctrl_utopia: utopia-pins {
function = "utopia";
groups = "utopia_grp";
};
pinctrl_pwm_syn_clk: pwm_syn_clk-pins {
function = "pwm_syn_clk";
groups = "pwm_syn_clk_grp";
};
pinctrl_sys_irq: sys_irq-pins {
function = "sys_irq";
groups = "sys_irq_grp";
};
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml |
name: Filter test configs matrix
description: |
Apply filter to the test configs matrix to keep only entries specified
by the PR test-config labels. If no test-config label is set, the same
test configs matrix is returned untouched.
inputs:
github-token:
description: GITHUB_TOKEN
required: true
test-matrix:
required: true
type: string
description: JSON description of what test configs to run.
selected-test-configs:
required: false
type: string
description: |
A comma-separated list of test configurations from the test matrix to keep,
The empty list means we are going to keep every configurations by defaults
default: ""
job-name:
type: string
required: false
default: ""
outputs:
test-matrix:
description: The filtered test configs matrix.
value: ${{ steps.filter.outputs.test-matrix }}
is-test-matrix-empty:
description: True if the filtered test configs matrix is empty. False otherwise.
value: ${{ steps.filter.outputs.is-test-matrix-empty }}
keep-going:
description: True if keep-going label was on PR or [keep-going] in PR body.
value: ${{ steps.filter.outputs.keep-going }}
reenabled-issues:
description: Comma separated list of issue numbers that should correspond to disable test issues that the PR fixes
value: ${{ steps.filter.outputs.reenabled-issues }}
ci-verbose-test-logs:
description: True if ci-verbose-test-logs label was on PR or [ci-verbose-test-logs] in PR body.
value: ${{ steps.filter.outputs.ci-verbose-test-logs }}
ci-test-showlocals:
description: True if ci-test-showlocals label was on PR or [ci-test-showlocals] in PR body.
value: ${{ steps.filter.outputs.ci-test-showlocals }}
ci-no-test-timeout:
description: True if ci-no-test-timeout label was on PR or [ci-no-test-timeout] in PR body.
value: ${{ steps.filter.outputs.ci-no-test-timeout }}
ci-no-td:
description: True if ci-no-td label was on PR or [ci-no-td] in PR body.
value: ${{ steps.filter.outputs.ci-no-td }}
ci-td-distributed:
description: True if ci-td-distributed label was on PR or [ci-td-distributed] in PR body.
value: ${{ steps.filter.outputs.ci-td-distributed }}
labels:
description: The list of labels from the PR
value: ${{ steps.filter.outputs.labels }}
runs:
using: composite
steps:
- name: Setup uv
uses: pytorch/test-infra/.github/actions/setup-uv@main
with:
python-version: "3.12"
- uses: nick-fields/retry@v3.0.0
name: Setup dependencies
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
with:
shell: bash
timeout_minutes: 10
max_attempts: 5
retry_wait_seconds: 30
command: |
set -eux
# Pre-fetch dependencies to cache them for later uv run calls
uv run --no-project --with requests==2.27.1 --with pyyaml==6.0.2 python -c "import requests, yaml; print('Dependencies ready')"
- name: Parse ref
id: parse-ref
shell: bash
run: |
set -x
# Use relative path here as this could be checked out anywhere, not necessarily
# in runner workspace
uv run --no-project --with requests==2.27.1 --with pyyaml==6.0.2 python "${GITHUB_ACTION_PATH}/../../scripts/parse_ref.py"
- name: Get the job name
id: get-job-name
if: inputs.job-name == ''
continue-on-error: true
shell: bash
run: |
set -x
# TODO: This is a very hacky way to get the job name. GitHub runner has the info
# but doesn't expose it in anyway. The job name is part of the job message the
# runner receives, so it's there and printed out to the diag log. Below is the
# code responsible for printing it. Need to check with GitHub to see if they can
# expose this variable as part of GitHub context.
# https://github.com/actions/runner/blob/main/src/Runner.Worker/JobExtension.cs#L345
pushd "${{ runner.workspace }}/../../_diag"
pwd
LOG_FILE=$(grep -l -r "${{ github.sha }}" *.log | tail -n 1)
if [ -n "${LOG_FILE}" ]; then
# For some reasons, awk {print $2} on Linux and Windows (bash) work correctly while it
# needs to be awk {print $3} on MacOS
case ${RUNNER_OS} in
macOS)
JOB_NAME=$(grep -r "\"jobDisplayName\"" "${LOG_FILE}" | awk -F '[:]' '{print $3}' | sed 's/"//g' | xargs)
;;
*)
JOB_NAME=$(grep -r "\"jobDisplayName\"" "${LOG_FILE}" | awk -F '[:]' '{print $2}' | sed 's/"//g' | xargs)
;;
esac
echo "job-name=${JOB_NAME}" >> "${GITHUB_OUTPUT}"
fi
popd
- name: Select all requested test configurations
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
JOB_NAME: ${{ inputs.job-name == '' && steps.get-job-name.outputs.job-name || inputs.job-name }}
PR_NUMBER: ${{ github.event.pull_request.number }}
TAG: ${{ steps.parse-ref.outputs.tag }}
EVENT_NAME: ${{ github.event_name }}
SCHEDULE: ${{ github.event.schedule }}
HEAD_BRANCH: ${{ steps.parse-ref.outputs.branch }}
id: filter
run: |
echo "Workflow: ${GITHUB_WORKFLOW}"
echo "Job name: ${JOB_NAME}"
# Use relative path here as this could be checked out anywhere, not necessarily
# in runner workspace
uv run --no-project --with requests==2.27.1 --with pyyaml==6.0.2 python "${GITHUB_ACTION_PATH}/../../scripts/filter_test_configs.py" \
--workflow "${GITHUB_WORKFLOW}" \
--job-name "${JOB_NAME}" \
--test-matrix "${{ inputs.test-matrix }}" \
--selected-test-configs "${{ inputs.selected-test-configs }}" \
--pr-number "${PR_NUMBER}" \
--tag "${TAG}" \
--event-name "${EVENT_NAME}" \
--schedule "${SCHEDULE}" \
--branch "${HEAD_BRANCH}"
- name: Print the filtered test matrix
shell: bash
run: |
echo "Filtered matrix:"
echo "${{ steps.filter.outputs.test-matrix }}"
echo
echo "Is the current job unstable? ${{ steps.filter.outputs.is-unstable }}"
echo
echo "Is keep-going label set? ${{ steps.filter.outputs.keep-going }}"
echo
echo "Is ci-no-td label set? ${{ steps.filter.outputs.ci-no-td }}"
echo
echo "Reenabled issues? ${{ steps.filter.outputs.reenabled-issues }}" | unknown | github | https://github.com/pytorch/pytorch | .github/actions/filter-test-configs/action.yml |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_OPS_CANONICALIZATION_HELPER_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_OPS_CANONICALIZATION_HELPER_H_
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
namespace mlir {
namespace TF {
// Eliminate attributes that are not needed, but can get attached to Ops
// during import.
template <typename Op>
struct DropAttributes : public OpRewritePattern<Op> {
using OpRewritePattern<Op>::OpRewritePattern;
// Drop the "output_shapes" attribute.
LogicalResult matchAndRewrite(Op op,
PatternRewriter &rewriter) const override {
bool found = !!op->removeAttr("output_shapes");
return success(found);
}
};
// Helper function to create TF op while copying all underscore attributes from
// another TF op.
// TODO(jpienaar): This is a workaround until behavior is established.
template <typename OpTy, typename... Args>
OpTy CreateTfOp(RewriterBase &b, Operation *op, Args &&...args) {
auto ret = OpTy::create(b, op->getLoc(), std::forward<Args>(args)...);
CopyDeviceAndUnderscoredAttributes(op, ret.getOperation());
return ret;
}
// Helper function to replace TF op with another op while copying all underscore
// attributes from the TF op.
// TODO(jpienaar): This is a workaround until behavior is established.
template <typename OpTy, typename... Args>
OpTy ReplaceTfOpWithNewOp(RewriterBase &b, Operation *op, Args &&...args) {
auto ret = CreateTfOp<OpTy>(b, op, std::forward<Args>(args)...);
b.replaceOp(op, ret.getOperation()->getResults());
return ret;
}
} // namespace TF
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_OPS_CANONICALIZATION_HELPER_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/tensorflow/ir/tf_ops_canonicalization_helper.h |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.modules import make_boilerplate
class Report(Document):
def validate(self):
"""only administrator can save standard report"""
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.msgprint(_("Only Administrator can save a standard report. Please rename and save."),
raise_exception=True)
if self.report_type in ("Query Report", "Script Report") \
and frappe.session.user!="Administrator":
frappe.msgprint(_("Only Administrator allowed to create Query / Script Reports"),
raise_exception=True)
def on_update(self):
self.export_doc()
def export_doc(self):
if frappe.flags.in_import:
return
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module)
self.create_report_py()
def create_report_py(self):
if self.report_type == "Script Report":
make_boilerplate("controller.py", self, {"name": self.name})
make_boilerplate("controller.js", self, {"name": self.name})
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable)) | unknown | codeparrot/codeparrot-clean | ||
urlpatterns = []
handler404 = "csrf_tests.views.csrf_token_error_handler" | python | github | https://github.com/django/django | tests/csrf_tests/csrf_token_error_handler_urls.py |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from threading import Lock
import codecs
from ..structuredlog import log_levels
class BaseHandler(object):
"""A base handler providing message handling facilities to
derived classes.
:param inner: A handler-like callable that may receive messages
from a log user.
"""
def __init__(self, inner):
self.wrapped = []
if hasattr(inner, "handle_message"):
self.wrapped.append(inner)
self.message_handlers = {}
def register_message_handlers(self, topic, handlers):
self.message_handlers[topic] = handlers
def handle_message(self, topic, cmd, *args):
"""Handles a message for the given topic by calling a subclass-defined
callback for the command.
:param topic: The topic of the broadcasted message. Handlers opt-in to
receiving messages by identifying a topic when calling
register_message_handlers.
:param command: The command to issue. This is a string that corresponds
to a callback provided by the target.
:param arg: Arguments to pass to the identified message callback, if any.
"""
rv = []
if topic in self.message_handlers and cmd in self.message_handlers[topic]:
rv.append(self.message_handlers[topic][cmd](*args))
for inner in self.wrapped:
rv.extend(inner.handle_message(topic, cmd, *args))
return rv
class LogLevelFilter(BaseHandler):
"""Handler that filters out messages with action of log and a level
lower than some specified level.
:param inner: Handler to use for messages that pass this filter
:param level: Minimum log level to process
"""
def __init__(self, inner, level):
BaseHandler.__init__(self, inner)
self.inner = inner
self.level = log_levels[level.upper()]
def __call__(self, item):
if (item["action"] != "log" or
log_levels[item["level"].upper()] <= self.level):
return self.inner(item)
class StreamHandler(BaseHandler):
"""Handler for writing to a file-like object
:param stream: File-like object to write log messages to
:param formatter: formatter to convert messages to string format
"""
_lock = Lock()
def __init__(self, stream, formatter):
BaseHandler.__init__(self, formatter)
assert stream is not None
# This is a hack to deal with the case where we are passed a
# StreamWriter (e.g. by mach for stdout). A StreamWriter requires
# the code to handle unicode in exactly the opposite way compared
# to a normal stream i.e. you always have to pass in a Unicode
# object rather than a string object. Cope with that by extracting
# the underlying raw stream.
if isinstance(stream, codecs.StreamWriter):
stream = stream.stream
self.formatter = formatter
self.stream = stream
def __call__(self, data):
"""Write a log message.
:param data: Structured log message dictionary."""
formatted = self.formatter(data)
if not formatted:
return
with self._lock:
if isinstance(formatted, unicode):
self.stream.write(formatted.encode("utf-8", "replace"))
elif isinstance(formatted, str):
self.stream.write(formatted)
else:
assert False, "Got output from the formatter of an unexpected type"
self.stream.flush() | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_HAL_MSA_MACROS_H
#define OPENCV_CORE_HAL_MSA_MACROS_H
#ifdef __mips_msa
#include "msa.h"
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Define 64 bits vector types */
typedef signed char v8i8 __attribute__ ((vector_size(8), aligned(8)));
typedef unsigned char v8u8 __attribute__ ((vector_size(8), aligned(8)));
typedef short v4i16 __attribute__ ((vector_size(8), aligned(8)));
typedef unsigned short v4u16 __attribute__ ((vector_size(8), aligned(8)));
typedef int v2i32 __attribute__ ((vector_size(8), aligned(8)));
typedef unsigned int v2u32 __attribute__ ((vector_size(8), aligned(8)));
typedef long long v1i64 __attribute__ ((vector_size(8), aligned(8)));
typedef unsigned long long v1u64 __attribute__ ((vector_size(8), aligned(8)));
typedef float v2f32 __attribute__ ((vector_size(8), aligned(8)));
typedef double v1f64 __attribute__ ((vector_size(8), aligned(8)));
/* Load values from the given memory a 64-bit vector. */
#define msa_ld1_s8(__a) (*((v8i8*)(__a)))
#define msa_ld1_s16(__a) (*((v4i16*)(__a)))
#define msa_ld1_s32(__a) (*((v2i32*)(__a)))
#define msa_ld1_s64(__a) (*((v1i64*)(__a)))
#define msa_ld1_u8(__a) (*((v8u8*)(__a)))
#define msa_ld1_u16(__a) (*((v4u16*)(__a)))
#define msa_ld1_u32(__a) (*((v2u32*)(__a)))
#define msa_ld1_u64(__a) (*((v1u64*)(__a)))
#define msa_ld1_f32(__a) (*((v2f32*)(__a)))
#define msa_ld1_f64(__a) (*((v1f64*)(__a)))
/* Load values from the given memory address to a 128-bit vector */
#define msa_ld1q_s8(__a) ((v16i8)__builtin_msa_ld_b(__a, 0))
#define msa_ld1q_s16(__a) ((v8i16)__builtin_msa_ld_h(__a, 0))
#define msa_ld1q_s32(__a) ((v4i32)__builtin_msa_ld_w(__a, 0))
#define msa_ld1q_s64(__a) ((v2i64)__builtin_msa_ld_d(__a, 0))
#define msa_ld1q_u8(__a) ((v16u8)__builtin_msa_ld_b(__a, 0))
#define msa_ld1q_u16(__a) ((v8u16)__builtin_msa_ld_h(__a, 0))
#define msa_ld1q_u32(__a) ((v4u32)__builtin_msa_ld_w(__a, 0))
#define msa_ld1q_u64(__a) ((v2u64)__builtin_msa_ld_d(__a, 0))
#define msa_ld1q_f32(__a) ((v4f32)__builtin_msa_ld_w(__a, 0))
#define msa_ld1q_f64(__a) ((v2f64)__builtin_msa_ld_d(__a, 0))
/* Store 64bits vector elements values to the given memory address. */
#define msa_st1_s8(__a, __b) (*((v8i8*)(__a)) = __b)
#define msa_st1_s16(__a, __b) (*((v4i16*)(__a)) = __b)
#define msa_st1_s32(__a, __b) (*((v2i32*)(__a)) = __b)
#define msa_st1_s64(__a, __b) (*((v1i64*)(__a)) = __b)
#define msa_st1_u8(__a, __b) (*((v8u8*)(__a)) = __b)
#define msa_st1_u16(__a, __b) (*((v4u16*)(__a)) = __b)
#define msa_st1_u32(__a, __b) (*((v2u32*)(__a)) = __b)
#define msa_st1_u64(__a, __b) (*((v1u64*)(__a)) = __b)
#define msa_st1_f32(__a, __b) (*((v2f32*)(__a)) = __b)
#define msa_st1_f64(__a, __b) (*((v1f64*)(__a)) = __b)
/* Store the values of elements in the 128 bits vector __a to the given memory address __a. */
#define msa_st1q_s8(__a, __b) (__builtin_msa_st_b((v16i8)(__b), __a, 0))
#define msa_st1q_s16(__a, __b) (__builtin_msa_st_h((v8i16)(__b), __a, 0))
#define msa_st1q_s32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0))
#define msa_st1q_s64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0))
#define msa_st1q_u8(__a, __b) (__builtin_msa_st_b((v16i8)(__b), __a, 0))
#define msa_st1q_u16(__a, __b) (__builtin_msa_st_h((v8i16)(__b), __a, 0))
#define msa_st1q_u32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0))
#define msa_st1q_u64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0))
#define msa_st1q_f32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0))
#define msa_st1q_f64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0))
/* Store the value of the element with the index __c in vector __a to the given memory address __a. */
#define msa_st1_lane_s8(__a, __b, __c) (*((int8_t*)(__a)) = __b[__c])
#define msa_st1_lane_s16(__a, __b, __c) (*((int16_t*)(__a)) = __b[__c])
#define msa_st1_lane_s32(__a, __b, __c) (*((int32_t*)(__a)) = __b[__c])
#define msa_st1_lane_s64(__a, __b, __c) (*((int64_t*)(__a)) = __b[__c])
#define msa_st1_lane_u8(__a, __b, __c) (*((uint8_t*)(__a)) = __b[__c])
#define msa_st1_lane_u16(__a, __b, __c) (*((uint16_t*)(__a)) = __b[__c])
#define msa_st1_lane_u32(__a, __b, __c) (*((uint32_t*)(__a)) = __b[__c])
#define msa_st1_lane_u64(__a, __b, __c) (*((uint64_t*)(__a)) = __b[__c])
#define msa_st1_lane_f32(__a, __b, __c) (*((float*)(__a)) = __b[__c])
#define msa_st1_lane_f64(__a, __b, __c) (*((double*)(__a)) = __b[__c])
#define msa_st1q_lane_s8(__a, __b, __c) (*((int8_t*)(__a)) = (int8_t)__builtin_msa_copy_s_b(__b, __c))
#define msa_st1q_lane_s16(__a, __b, __c) (*((int16_t*)(__a)) = (int16_t)__builtin_msa_copy_s_h(__b, __c))
#define msa_st1q_lane_s32(__a, __b, __c) (*((int32_t*)(__a)) = __builtin_msa_copy_s_w(__b, __c))
#define msa_st1q_lane_s64(__a, __b, __c) (*((int64_t*)(__a)) = __builtin_msa_copy_s_d(__b, __c))
#define msa_st1q_lane_u8(__a, __b, __c) (*((uint8_t*)(__a)) = (uint8_t)__builtin_msa_copy_u_b((v16i8)(__b), __c))
#define msa_st1q_lane_u16(__a, __b, __c) (*((uint16_t*)(__a)) = (uint16_t)__builtin_msa_copy_u_h((v8i16)(__b), __c))
#define msa_st1q_lane_u32(__a, __b, __c) (*((uint32_t*)(__a)) = __builtin_msa_copy_u_w((v4i32)(__b), __c))
#define msa_st1q_lane_u64(__a, __b, __c) (*((uint64_t*)(__a)) = __builtin_msa_copy_u_d((v2i64)(__b), __c))
#define msa_st1q_lane_f32(__a, __b, __c) (*((float*)(__a)) = __b[__c])
#define msa_st1q_lane_f64(__a, __b, __c) (*((double*)(__a)) = __b[__c])
/* Duplicate elements for 64-bit doubleword vectors */
#define msa_dup_n_s8(__a) ((v8i8)__builtin_msa_copy_s_d((v2i64)__builtin_msa_fill_b((int32_t)(__a)), 0))
#define msa_dup_n_s16(__a) ((v4i16)__builtin_msa_copy_s_d((v2i64)__builtin_msa_fill_h((int32_t)(__a)), 0))
#define msa_dup_n_s32(__a) ((v2i32){__a, __a})
#define msa_dup_n_s64(__a) ((v1i64){__a})
#define msa_dup_n_u8(__a) ((v8u8)__builtin_msa_copy_u_d((v2i64)__builtin_msa_fill_b((int32_t)(__a)), 0))
#define msa_dup_n_u16(__a) ((v4u16)__builtin_msa_copy_u_d((v2i64)__builtin_msa_fill_h((int32_t)(__a)), 0))
#define msa_dup_n_u32(__a) ((v2u32){__a, __a})
#define msa_dup_n_u64(__a) ((v1u64){__a})
#define msa_dup_n_f32(__a) ((v2f32){__a, __a})
#define msa_dup_n_f64(__a) ((v1f64){__a})
/* Duplicate elements for 128-bit quadword vectors */
#define msa_dupq_n_s8(__a) (__builtin_msa_fill_b((int32_t)(__a)))
#define msa_dupq_n_s16(__a) (__builtin_msa_fill_h((int32_t)(__a)))
#define msa_dupq_n_s32(__a) (__builtin_msa_fill_w((int32_t)(__a)))
#define msa_dupq_n_s64(__a) (__builtin_msa_fill_d((int64_t)(__a)))
#define msa_dupq_n_u8(__a) ((v16u8)__builtin_msa_fill_b((int32_t)(__a)))
#define msa_dupq_n_u16(__a) ((v8u16)__builtin_msa_fill_h((int32_t)(__a)))
#define msa_dupq_n_u32(__a) ((v4u32)__builtin_msa_fill_w((int32_t)(__a)))
#define msa_dupq_n_u64(__a) ((v2u64)__builtin_msa_fill_d((int64_t)(__a)))
#define msa_dupq_n_f32(__a) ((v4f32){__a, __a, __a, __a})
#define msa_dupq_n_f64(__a) ((v2f64){__a, __a})
#define msa_dupq_lane_s8(__a, __b) (__builtin_msa_splat_b(__a, __b))
#define msa_dupq_lane_s16(__a, __b) (__builtin_msa_splat_h(__a, __b))
#define msa_dupq_lane_s32(__a, __b) (__builtin_msa_splat_w(__a, __b))
#define msa_dupq_lane_s64(__a, __b) (__builtin_msa_splat_d(__a, __b))
#define msa_dupq_lane_u8(__a, __b) ((v16u8)__builtin_msa_splat_b((v16i8)(__a), __b))
#define msa_dupq_lane_u16(__a, __b) ((v8u16)__builtin_msa_splat_h((v8i16)(__a), __b))
#define msa_dupq_lane_u32(__a, __b) ((v4u32)__builtin_msa_splat_w((v4i32)(__a), __b))
#define msa_dupq_lane_u64(__a, __b) ((v2u64)__builtin_msa_splat_d((v2i64)(__a), __b))
/* Create a 64 bits vector */
#define msa_create_s8(__a) ((v8i8)((uint64_t)(__a)))
#define msa_create_s16(__a) ((v4i16)((uint64_t)(__a)))
#define msa_create_s32(__a) ((v2i32)((uint64_t)(__a)))
#define msa_create_s64(__a) ((v1i64)((uint64_t)(__a)))
#define msa_create_u8(__a) ((v8u8)((uint64_t)(__a)))
#define msa_create_u16(__a) ((v4u16)((uint64_t)(__a)))
#define msa_create_u32(__a) ((v2u32)((uint64_t)(__a)))
#define msa_create_u64(__a) ((v1u64)((uint64_t)(__a)))
#define msa_create_f32(__a) ((v2f32)((uint64_t)(__a)))
#define msa_create_f64(__a) ((v1f64)((uint64_t)(__a)))
/* Sign extends or zero extends each element in a 64 bits vector to twice its original length, and places the results in a 128 bits vector. */
/*Transform v8i8 to v8i16*/
#define msa_movl_s8(__a) \
((v8i16){(__a)[0], (__a)[1], (__a)[2], (__a)[3], \
(__a)[4], (__a)[5], (__a)[6], (__a)[7]})
/*Transform v8u8 to v8u16*/
#define msa_movl_u8(__a) \
((v8u16){(__a)[0], (__a)[1], (__a)[2], (__a)[3], \
(__a)[4], (__a)[5], (__a)[6], (__a)[7]})
/*Transform v4i16 to v8i16*/
#define msa_movl_s16(__a) ((v4i32){(__a)[0], (__a)[1], (__a)[2], (__a)[3]})
/*Transform v2i32 to v4i32*/
#define msa_movl_s32(__a) ((v2i64){(__a)[0], (__a)[1]})
/*Transform v4u16 to v8u16*/
#define msa_movl_u16(__a) ((v4u32){(__a)[0], (__a)[1], (__a)[2], (__a)[3]})
/*Transform v2u32 to v4u32*/
#define msa_movl_u32(__a) ((v2u64){(__a)[0], (__a)[1]})
/* Copies the least significant half of each element of a 128 bits vector into the corresponding elements of a 64 bits vector. */
#define msa_movn_s16(__a) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)(__a)); \
(v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_movn_s32(__a) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \
(v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_movn_s64(__a) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \
(v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_movn_u16(__a) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)(__a)); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_movn_u32(__a) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_movn_u64(__a) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
/* qmovn */
#define msa_qmovn_s16(__a) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_s_h((v8i16)(__a), 7)); \
(v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_qmovn_s32(__a) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_s_w((v4i32)(__a), 15)); \
(v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_qmovn_s64(__a) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_s_d((v2i64)(__a), 31)); \
(v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_qmovn_u16(__a) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)(__a), 7)); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_qmovn_u32(__a) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)(__a), 15)); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_qmovn_u64(__a) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)(__a), 31)); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
/* qmovun */
#define msa_qmovun_s16(__a) \
({ \
v8i16 __d = __builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \
v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qmovun_s32(__a) \
({ \
v4i32 __d = __builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \
v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qmovun_s64(__a) \
({ \
v2i64 __d = __builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)); \
v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
/* Right shift elements in a 128 bits vector by an immediate value, and places the results in a 64 bits vector. */
#define msa_shrn_n_s16(__a, __b) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srai_h((v8i16)(__a), (int)(__b))); \
(v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_shrn_n_s32(__a, __b) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srai_w((v4i32)(__a), (int)(__b))); \
(v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_shrn_n_s64(__a, __b) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srai_d((v2i64)(__a), (int)(__b))); \
(v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_shrn_n_u16(__a, __b) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srli_h((v8i16)(__a), (int)(__b))); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_shrn_n_u32(__a, __b) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srli_w((v4i32)(__a), (int)(__b))); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_shrn_n_u64(__a, __b) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srli_d((v2i64)(__a), (int)(__b))); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
/* Right shift elements in a 128 bits vector by an immediate value, and places the results in a 64 bits vector. */
#define msa_rshrn_n_s16(__a, __b) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srari_h((v8i16)(__a), (int)__b)); \
(v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_rshrn_n_s32(__a, __b) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srari_w((v4i32)(__a), (int)__b)); \
(v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_rshrn_n_s64(__a, __b) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srari_d((v2i64)(__a), (int)__b)); \
(v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \
})
#define msa_rshrn_n_u16(__a, __b) \
({ \
v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srlri_h((v8i16)(__a), (int)__b)); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_rshrn_n_u32(__a, __b) \
({ \
v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srlri_w((v4i32)(__a), (int)__b)); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
#define msa_rshrn_n_u64(__a, __b) \
({ \
v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srlri_d((v2i64)(__a), (int)__b)); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \
})
/* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector. */
#define msa_qrshrn_n_s16(__a, __b) \
({ \
v8i16 __d = __builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__a), (int)(__b)), 7); \
v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__d); \
(v8i8)__builtin_msa_copy_s_d((v2i64)__e, 0); \
})
#define msa_qrshrn_n_s32(__a, __b) \
({ \
v4i32 __d = __builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__a), (int)(__b)), 15); \
v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__d); \
(v4i16)__builtin_msa_copy_s_d((v2i64)__e, 0); \
})
#define msa_qrshrn_n_s64(__a, __b) \
({ \
v2i64 __d = __builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__a), (int)(__b)), 31); \
v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__d); \
(v2i32)__builtin_msa_copy_s_d((v2i64)__e, 0); \
})
#define msa_qrshrn_n_u16(__a, __b) \
({ \
v8u16 __d = __builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__a), (int)(__b)), 7); \
v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__d); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qrshrn_n_u32(__a, __b) \
({ \
v4u32 __d = __builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__a), (int)(__b)), 15); \
v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__d); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qrshrn_n_u64(__a, __b) \
({ \
v2u64 __d = __builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__a), (int)(__b)), 31); \
v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__d); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
/* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector.
Input is signed and output is unsigned. */
#define msa_qrshrun_n_s16(__a, __b) \
({ \
v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__b)); \
v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \
(v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qrshrun_n_s32(__a, __b) \
({ \
v4i32 __d = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)), (int)(__b)); \
v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \
(v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
#define msa_qrshrun_n_s64(__a, __b) \
({ \
v2i64 __d = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)), (int)(__b)); \
v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \
(v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \
})
/* pack */
#define msa_pack_s16(__a, __b) (__builtin_msa_pckev_b((v16i8)(__b), (v16i8)(__a)))
#define msa_pack_s32(__a, __b) (__builtin_msa_pckev_h((v8i16)(__b), (v8i16)(__a)))
#define msa_pack_s64(__a, __b) (__builtin_msa_pckev_w((v4i32)(__b), (v4i32)(__a)))
#define msa_pack_u16(__a, __b) ((v16u8)__builtin_msa_pckev_b((v16i8)(__b), (v16i8)(__a)))
#define msa_pack_u32(__a, __b) ((v8u16)__builtin_msa_pckev_h((v8i16)(__b), (v8i16)(__a)))
#define msa_pack_u64(__a, __b) ((v4u32)__builtin_msa_pckev_w((v4i32)(__b), (v4i32)(__a)))
/* qpack */
#define msa_qpack_s16(__a, __b) \
(__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_s_h((v8i16)(__b), 7), (v16i8)__builtin_msa_sat_s_h((v8i16)(__a), 7)))
#define msa_qpack_s32(__a, __b) \
(__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_s_w((v4i32)(__b), 15), (v8i16)__builtin_msa_sat_s_w((v4i32)(__a), 15)))
#define msa_qpack_s64(__a, __b) \
(__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_s_d((v2i64)(__b), 31), (v4i32)__builtin_msa_sat_s_d((v2i64)(__a), 31)))
#define msa_qpack_u16(__a, __b) \
((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)(__b), 7), (v16i8)__builtin_msa_sat_u_h((v8u16)(__a), 7)))
#define msa_qpack_u32(__a, __b) \
((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)(__b), 15), (v8i16)__builtin_msa_sat_u_w((v4u32)(__a), 15)))
#define msa_qpack_u64(__a, __b) \
((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)(__b), 31), (v4i32)__builtin_msa_sat_u_d((v2u64)(__a), 31)))
/* qpacku */
#define msa_qpacku_s16(__a, __b) \
((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__b))), 7), \
(v16i8)__builtin_msa_sat_u_h((v8u16)(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a))), 7)))
#define msa_qpacku_s32(__a, __b) \
((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__b))), 15), \
(v8i16)__builtin_msa_sat_u_w((v4u32)(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a))), 15)))
#define msa_qpacku_s64(__a, __b) \
((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__b))), 31), \
(v4i32)__builtin_msa_sat_u_d((v2u64)(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a))), 31)))
/* packr */
#define msa_packr_s16(__a, __b, __c) \
(__builtin_msa_pckev_b((v16i8)__builtin_msa_srai_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srai_h((v8i16)(__a), (int)(__c))))
#define msa_packr_s32(__a, __b, __c) \
(__builtin_msa_pckev_h((v8i16)__builtin_msa_srai_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srai_w((v4i32)(__a), (int)(__c))))
#define msa_packr_s64(__a, __b, __c) \
(__builtin_msa_pckev_w((v4i32)__builtin_msa_srai_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srai_d((v2i64)(__a), (int)(__c))))
#define msa_packr_u16(__a, __b, __c) \
((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_srli_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srli_h((v8i16)(__a), (int)(__c))))
#define msa_packr_u32(__a, __b, __c) \
((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_srli_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srli_w((v4i32)(__a), (int)(__c))))
#define msa_packr_u64(__a, __b, __c) \
((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_srli_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srli_d((v2i64)(__a), (int)(__c))))
/* rpackr */
#define msa_rpackr_s16(__a, __b, __c) \
(__builtin_msa_pckev_b((v16i8)__builtin_msa_srari_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srari_h((v8i16)(__a), (int)(__c))))
#define msa_rpackr_s32(__a, __b, __c) \
(__builtin_msa_pckev_h((v8i16)__builtin_msa_srari_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srari_w((v4i32)(__a), (int)(__c))))
#define msa_rpackr_s64(__a, __b, __c) \
(__builtin_msa_pckev_w((v4i32)__builtin_msa_srari_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srari_d((v2i64)(__a), (int)(__c))))
#define msa_rpackr_u16(__a, __b, __c) \
((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_srlri_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srlri_h((v8i16)(__a), (int)(__c))))
#define msa_rpackr_u32(__a, __b, __c) \
((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_srlri_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srlri_w((v4i32)(__a), (int)(__c))))
#define msa_rpackr_u64(__a, __b, __c) \
((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_srlri_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srlri_d((v2i64)(__a), (int)(__c))))
/* qrpackr */
#define msa_qrpackr_s16(__a, __b, __c) \
(__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__b), (int)(__c)), 7), \
(v16i8)__builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__a), (int)(__c)), 7)))
#define msa_qrpackr_s32(__a, __b, __c) \
(__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__b), (int)(__c)), 15), \
(v8i16)__builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__a), (int)(__c)), 15)))
#define msa_qrpackr_s64(__a, __b, __c) \
(__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__b), (int)(__c)), 31), \
(v4i32)__builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__a), (int)(__c)), 31)))
#define msa_qrpackr_u16(__a, __b, __c) \
((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__b), (int)(__c)), 7), \
(v16i8)__builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__a), (int)(__c)), 7)))
#define msa_qrpackr_u32(__a, __b, __c) \
((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__b), (int)(__c)), 15), \
(v8i16)__builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__a), (int)(__c)), 15)))
#define msa_qrpackr_u64(__a, __b, __c) \
((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__b), (int)(__c)), 31), \
(v4i32)__builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__a), (int)(__c)), 31)))
/* qrpackru */
#define msa_qrpackru_s16(__a, __b, __c) \
({ \
v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__c)); \
v8i16 __e = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__b)), (int)(__c)); \
(v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)__e, 7), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \
})
#define msa_qrpackru_s32(__a, __b, __c) \
({ \
v4i32 __d = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)), (int)(__c)); \
v4i32 __e = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__b)), (int)(__c)); \
(v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)__e, 15), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \
})
#define msa_qrpackru_s64(__a, __b, __c) \
({ \
v2i64 __d = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)), (int)(__c)); \
v2i64 __e = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__b)), (int)(__c)); \
(v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__e, 31), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \
})
/* Minimum values between corresponding elements in the two vectors are written to the returned vector. */
#define msa_minq_s8(__a, __b) (__builtin_msa_min_s_b(__a, __b))
#define msa_minq_s16(__a, __b) (__builtin_msa_min_s_h(__a, __b))
#define msa_minq_s32(__a, __b) (__builtin_msa_min_s_w(__a, __b))
#define msa_minq_s64(__a, __b) (__builtin_msa_min_s_d(__a, __b))
#define msa_minq_u8(__a, __b) ((v16u8)__builtin_msa_min_u_b(__a, __b))
#define msa_minq_u16(__a, __b) ((v8u16)__builtin_msa_min_u_h(__a, __b))
#define msa_minq_u32(__a, __b) ((v4u32)__builtin_msa_min_u_w(__a, __b))
#define msa_minq_u64(__a, __b) ((v2u64)__builtin_msa_min_u_d(__a, __b))
#define msa_minq_f32(__a, __b) (__builtin_msa_fmin_w(__a, __b))
#define msa_minq_f64(__a, __b) (__builtin_msa_fmin_d(__a, __b))
/* Maximum values between corresponding elements in the two vectors are written to the returned vector. */
#define msa_maxq_s8(__a, __b) (__builtin_msa_max_s_b(__a, __b))
#define msa_maxq_s16(__a, __b) (__builtin_msa_max_s_h(__a, __b))
#define msa_maxq_s32(__a, __b) (__builtin_msa_max_s_w(__a, __b))
#define msa_maxq_s64(__a, __b) (__builtin_msa_max_s_d(__a, __b))
#define msa_maxq_u8(__a, __b) ((v16u8)__builtin_msa_max_u_b(__a, __b))
#define msa_maxq_u16(__a, __b) ((v8u16)__builtin_msa_max_u_h(__a, __b))
#define msa_maxq_u32(__a, __b) ((v4u32)__builtin_msa_max_u_w(__a, __b))
#define msa_maxq_u64(__a, __b) ((v2u64)__builtin_msa_max_u_d(__a, __b))
#define msa_maxq_f32(__a, __b) (__builtin_msa_fmax_w(__a, __b))
#define msa_maxq_f64(__a, __b) (__builtin_msa_fmax_d(__a, __b))
/* Vector type reinterpretion */
#define MSA_TPV_REINTERPRET(_Tpv, Vec) ((_Tpv)(Vec))
/* Add the odd elements in vector __a with the even elements in vector __b to double width elements in the returned vector. */
/* v8i16 msa_hadd_s16 ((v16i8)__a, (v16i8)__b) */
#define msa_hadd_s16(__a, __b) (__builtin_msa_hadd_s_h((v16i8)(__a), (v16i8)(__b)))
/* v4i32 msa_hadd_s32 ((v8i16)__a, (v8i16)__b) */
#define msa_hadd_s32(__a, __b) (__builtin_msa_hadd_s_w((v8i16)(__a), (v8i16)(__b)))
/* v2i64 msa_hadd_s64 ((v4i32)__a, (v4i32)__b) */
#define msa_hadd_s64(__a, __b) (__builtin_msa_hadd_s_d((v4i32)(__a), (v4i32)(__b)))
/* Copy even elements in __a to the left half and even elements in __b to the right half and return the result vector. */
#define msa_pckev_s8(__a, __b) (__builtin_msa_pckev_b((v16i8)(__a), (v16i8)(__b)))
#define msa_pckev_s16(__a, __b) (__builtin_msa_pckev_h((v8i16)(__a), (v8i16)(__b)))
#define msa_pckev_s32(__a, __b) (__builtin_msa_pckev_w((v4i32)(__a), (v4i32)(__b)))
#define msa_pckev_s64(__a, __b) (__builtin_msa_pckev_d((v2i64)(__a), (v2i64)(__b)))
/* Copy even elements in __a to the left half and even elements in __b to the right half and return the result vector. */
#define msa_pckod_s8(__a, __b) (__builtin_msa_pckod_b((v16i8)(__a), (v16i8)(__b)))
#define msa_pckod_s16(__a, __b) (__builtin_msa_pckod_h((v8i16)(__a), (v8i16)(__b)))
#define msa_pckod_s32(__a, __b) (__builtin_msa_pckod_w((v4i32)(__a), (v4i32)(__b)))
#define msa_pckod_s64(__a, __b) (__builtin_msa_pckod_d((v2i64)(__a), (v2i64)(__b)))
#ifdef _MIPSEB
#define LANE_IMM0_1(x) (0b1 - ((x) & 0b1))
#define LANE_IMM0_3(x) (0b11 - ((x) & 0b11))
#define LANE_IMM0_7(x) (0b111 - ((x) & 0b111))
#define LANE_IMM0_15(x) (0b1111 - ((x) & 0b1111))
#else
#define LANE_IMM0_1(x) ((x) & 0b1)
#define LANE_IMM0_3(x) ((x) & 0b11)
#define LANE_IMM0_7(x) ((x) & 0b111)
#define LANE_IMM0_15(x) ((x) & 0b1111)
#endif
#define msa_get_lane_u8(__a, __b) ((uint8_t)(__a)[LANE_IMM0_7(__b)])
#define msa_get_lane_s8(__a, __b) ((int8_t)(__a)[LANE_IMM0_7(__b)])
#define msa_get_lane_u16(__a, __b) ((uint16_t)(__a)[LANE_IMM0_3(__b)])
#define msa_get_lane_s16(__a, __b) ((int16_t)(__a)[LANE_IMM0_3(__b)])
#define msa_get_lane_u32(__a, __b) ((uint32_t)(__a)[LANE_IMM0_1(__b)])
#define msa_get_lane_s32(__a, __b) ((int32_t)(__a)[LANE_IMM0_1(__b)])
#define msa_get_lane_f32(__a, __b) ((float)(__a)[LANE_IMM0_3(__b)])
#define msa_get_lane_s64(__a, __b) ((int64_t)(__a)[LANE_IMM0_1(__b)])
#define msa_get_lane_u64(__a, __b) ((uint64_t)(__a)[LANE_IMM0_1(__b)])
#define msa_get_lane_f64(__a, __b) ((double)(__a)[LANE_IMM0_1(__b)])
#define msa_getq_lane_u8(__a, imm0_15) ((uint8_t)__builtin_msa_copy_u_b((v16i8)(__a), imm0_15))
#define msa_getq_lane_s8(__a, imm0_15) ((int8_t)__builtin_msa_copy_s_b(__a, imm0_15))
#define msa_getq_lane_u16(__a, imm0_7) ((uint16_t)__builtin_msa_copy_u_h((v8i16)(__a), imm0_7))
#define msa_getq_lane_s16(__a, imm0_7) ((int16_t)__builtin_msa_copy_s_h(__a, imm0_7))
#define msa_getq_lane_u32(__a, imm0_3) __builtin_msa_copy_u_w((v4i32)(__a), imm0_3)
#define msa_getq_lane_s32 __builtin_msa_copy_s_w
#define msa_getq_lane_f32(__a, __b) ((float)(__a)[LANE_IMM0_3(__b)])
#define msa_getq_lane_f64(__a, __b) ((double)(__a)[LANE_IMM0_1(__b)])
#if (__mips == 64)
#define msa_getq_lane_u64(__a, imm0_1) __builtin_msa_copy_u_d((v2i64)(__a), imm0_1)
#define msa_getq_lane_s64 __builtin_msa_copy_s_d
#else
#define msa_getq_lane_u64(__a, imm0_1) ((uint64_t)(__a)[LANE_IMM0_1(imm0_1)])
#define msa_getq_lane_s64(__a, imm0_1) ((int64_t)(__a)[LANE_IMM0_1(imm0_1)])
#endif
/* combine */
#if (__mips == 64)
#define __COMBINE_64_64(__TYPE, a, b) ((__TYPE)((v2u64){((v1u64)(a))[0], ((v1u64)(b))[0]}))
#else
#define __COMBINE_64_64(__TYPE, a, b) ((__TYPE)((v4u32){((v2u32)(a))[0], ((v2u32)(a))[1], \
((v2u32)(b))[0], ((v2u32)(b))[1]}))
#endif
/* v16i8 msa_combine_s8 (v8i8 __a, v8i8 __b) */
#define msa_combine_s8(__a, __b) __COMBINE_64_64(v16i8, __a, __b)
/* v8i16 msa_combine_s16(v4i16 __a, v4i16 __b) */
#define msa_combine_s16(__a, __b) __COMBINE_64_64(v8i16, __a, __b)
/* v4i32 msa_combine_s32(v2i32 __a, v2i32 __b) */
#define msa_combine_s32(__a, __b) __COMBINE_64_64(v4i32, __a, __b)
/* v2i64 msa_combine_s64(v1i64 __a, v1i64 __b) */
#define msa_combine_s64(__a, __b) __COMBINE_64_64(v2i64, __a, __b)
/* v4f32 msa_combine_f32(v2f32 __a, v2f32 __b) */
#define msa_combine_f32(__a, __b) __COMBINE_64_64(v4f32, __a, __b)
/* v16u8 msa_combine_u8(v8u8 __a, v8u8 __b) */
#define msa_combine_u8(__a, __b) __COMBINE_64_64(v16u8, __a, __b)
/* v8u16 msa_combine_u16(v4u16 __a, v4u16 __b) */
#define msa_combine_u16(__a, __b) __COMBINE_64_64(v8u16, __a, __b)
/* v4u32 msa_combine_u32(v2u32 __a, v2u32 __b) */
#define msa_combine_u32(__a, __b) __COMBINE_64_64(v4u32, __a, __b)
/* v2u64 msa_combine_u64(v1u64 __a, v1u64 __b) */
#define msa_combine_u64(__a, __b) __COMBINE_64_64(v2u64, __a, __b)
/* v2f64 msa_combine_f64(v1f64 __a, v1f64 __b) */
#define msa_combine_f64(__a, __b) __COMBINE_64_64(v2f64, __a, __b)
/* get_low, get_high */
#if (__mips == 64)
#define __GET_LOW(__TYPE, a) ((__TYPE)((v1u64)(__builtin_msa_copy_u_d((v2i64)(a), 0))))
#define __GET_HIGH(__TYPE, a) ((__TYPE)((v1u64)(__builtin_msa_copy_u_d((v2i64)(a), 1))))
#else
#define __GET_LOW(__TYPE, a) ((__TYPE)(((v2u64)(a))[0]))
#define __GET_HIGH(__TYPE, a) ((__TYPE)(((v2u64)(a))[1]))
#endif
/* v8i8 msa_get_low_s8(v16i8 __a) */
#define msa_get_low_s8(__a) __GET_LOW(v8i8, __a)
/* v4i16 msa_get_low_s16(v8i16 __a) */
#define msa_get_low_s16(__a) __GET_LOW(v4i16, __a)
/* v2i32 msa_get_low_s32(v4i32 __a) */
#define msa_get_low_s32(__a) __GET_LOW(v2i32, __a)
/* v1i64 msa_get_low_s64(v2i64 __a) */
#define msa_get_low_s64(__a) __GET_LOW(v1i64, __a)
/* v8u8 msa_get_low_u8(v16u8 __a) */
#define msa_get_low_u8(__a) __GET_LOW(v8u8, __a)
/* v4u16 msa_get_low_u16(v8u16 __a) */
#define msa_get_low_u16(__a) __GET_LOW(v4u16, __a)
/* v2u32 msa_get_low_u32(v4u32 __a) */
#define msa_get_low_u32(__a) __GET_LOW(v2u32, __a)
/* v1u64 msa_get_low_u64(v2u64 __a) */
#define msa_get_low_u64(__a) __GET_LOW(v1u64, __a)
/* v2f32 msa_get_low_f32(v4f32 __a) */
#define msa_get_low_f32(__a) __GET_LOW(v2f32, __a)
/* v1f64 msa_get_low_f64(v2f64 __a) */
#define msa_get_low_f64(__a) __GET_LOW(v1f64, __a)
/* v8i8 msa_get_high_s8(v16i8 __a) */
#define msa_get_high_s8(__a) __GET_HIGH(v8i8, __a)
/* v4i16 msa_get_high_s16(v8i16 __a) */
#define msa_get_high_s16(__a) __GET_HIGH(v4i16, __a)
/* v2i32 msa_get_high_s32(v4i32 __a) */
#define msa_get_high_s32(__a) __GET_HIGH(v2i32, __a)
/* v1i64 msa_get_high_s64(v2i64 __a) */
#define msa_get_high_s64(__a) __GET_HIGH(v1i64, __a)
/* v8u8 msa_get_high_u8(v16u8 __a) */
#define msa_get_high_u8(__a) __GET_HIGH(v8u8, __a)
/* v4u16 msa_get_high_u16(v8u16 __a) */
#define msa_get_high_u16(__a) __GET_HIGH(v4u16, __a)
/* v2u32 msa_get_high_u32(v4u32 __a) */
#define msa_get_high_u32(__a) __GET_HIGH(v2u32, __a)
/* v1u64 msa_get_high_u64(v2u64 __a) */
#define msa_get_high_u64(__a) __GET_HIGH(v1u64, __a)
/* v2f32 msa_get_high_f32(v4f32 __a) */
#define msa_get_high_f32(__a) __GET_HIGH(v2f32, __a)
/* v1f64 msa_get_high_f64(v2f64 __a) */
#define msa_get_high_f64(__a) __GET_HIGH(v1f64, __a)
/* ri = ai * b[lane] */
/* v4f32 msa_mulq_lane_f32(v4f32 __a, v4f32 __b, const int __lane) */
#define msa_mulq_lane_f32(__a, __b, __lane) ((__a) * msa_getq_lane_f32(__b, __lane))
/* ri = ai + bi * c[lane] */
/* v4f32 msa_mlaq_lane_f32(v4f32 __a, v4f32 __b, v4f32 __c, const int __lane) */
#define msa_mlaq_lane_f32(__a, __b, __c, __lane) ((__a) + ((__b) * msa_getq_lane_f32(__c, __lane)))
/* uint16_t msa_sum_u16(v8u16 __a)*/
#define msa_sum_u16(__a) \
({ \
v4u32 _b; \
v2u64 _c; \
_b = __builtin_msa_hadd_u_w(__a, __a); \
_c = __builtin_msa_hadd_u_d(_b, _b); \
(uint16_t)(_c[0] + _c[1]); \
})
/* int16_t msa_sum_s16(v8i16 __a) */
#define msa_sum_s16(__a) \
({ \
v4i32 _b; \
v2i64 _c; \
_b = __builtin_msa_hadd_s_w(__a, __a); \
_c = __builtin_msa_hadd_s_d(_b, _b); \
(int32_t)(_c[0] + _c[1]); \
})
/* uint32_t msa_sum_u32(v4u32 __a)*/
#define msa_sum_u32(__a) \
({ \
v2u64 _b; \
_b = __builtin_msa_hadd_u_d(__a, __a); \
(uint32_t)(_b[0] + _b[1]); \
})
/* int32_t msa_sum_s32(v4i32 __a)*/
#define msa_sum_s32(__a) \
({ \
v2i64 _b; \
_b = __builtin_msa_hadd_s_d(__a, __a); \
(int64_t)(_b[0] + _b[1]); \
})
/* uint8_t msa_sum_u8(v16u8 __a)*/
#define msa_sum_u8(__a) \
({ \
v8u16 _b16; \
v4u32 _c32; \
_b16 = __builtin_msa_hadd_u_h(__a, __a); \
_c32 = __builtin_msa_hadd_u_w(_b16, _b16); \
(uint8_t)msa_sum_u32(_c32); \
})
/* int8_t msa_sum_s8(v16s8 __a)*/
#define msa_sum_s8(__a) \
({ \
v8i16 _b16; \
v4i32 _c32; \
_b16 = __builtin_msa_hadd_s_h(__a, __a); \
_c32 = __builtin_msa_hadd_s_w(_b16, _b16); \
(int16_t)msa_sum_s32(_c32); \
})
/* float msa_sum_f32(v4f32 __a)*/
#define msa_sum_f32(__a) ((__a)[0] + (__a)[1] + (__a)[2] + (__a)[3])
/* v8u16 msa_paddlq_u8(v16u8 __a) */
#define msa_paddlq_u8(__a) (__builtin_msa_hadd_u_h(__a, __a))
/* v8i16 msa_paddlq_s8(v16i8 __a) */
#define msa_paddlq_s8(__a) (__builtin_msa_hadd_s_h(__a, __a))
/* v4u32 msa_paddlq_u16 (v8u16 __a)*/
#define msa_paddlq_u16(__a) (__builtin_msa_hadd_u_w(__a, __a))
/* v4i32 msa_paddlq_s16 (v8i16 __a)*/
#define msa_paddlq_s16(__a) (__builtin_msa_hadd_s_w(__a, __a))
/* v2u64 msa_paddlq_u32(v4u32 __a) */
#define msa_paddlq_u32(__a) (__builtin_msa_hadd_u_d(__a, __a))
/* v2i64 msa_paddlq_s32(v4i32 __a) */
#define msa_paddlq_s32(__a) (__builtin_msa_hadd_s_d(__a, __a))
#define V8U8_2_V8U16(x) {(uint16_t)x[0], (uint16_t)x[1], (uint16_t)x[2], (uint16_t)x[3], \
(uint16_t)x[4], (uint16_t)x[5], (uint16_t)x[6], (uint16_t)x[7]}
#define V8U8_2_V8I16(x) {(int16_t)x[0], (int16_t)x[1], (int16_t)x[2], (int16_t)x[3], \
(int16_t)x[4], (int16_t)x[5], (int16_t)x[6], (int16_t)x[7]}
#define V8I8_2_V8I16(x) {(int16_t)x[0], (int16_t)x[1], (int16_t)x[2], (int16_t)x[3], \
(int16_t)x[4], (int16_t)x[5], (int16_t)x[6], (int16_t)x[7]}
#define V4U16_2_V4U32(x) {(uint32_t)x[0], (uint32_t)x[1], (uint32_t)x[2], (uint32_t)x[3]}
#define V4U16_2_V4I32(x) {(int32_t)x[0], (int32_t)x[1], (int32_t)x[2], (int32_t)x[3]}
#define V4I16_2_V4I32(x) {(int32_t)x[0], (int32_t)x[1], (int32_t)x[2], (int32_t)x[3]}
#define V2U32_2_V2U64(x) {(uint64_t)x[0], (uint64_t)x[1]}
#define V2U32_2_V2I64(x) {(int64_t)x[0], (int64_t)x[1]}
/* v8u16 msa_mull_u8(v8u8 __a, v8u8 __b) */
#define msa_mull_u8(__a, __b) ((v8u16)__builtin_msa_mulv_h((v8i16)V8U8_2_V8I16(__a), (v8i16)V8U8_2_V8I16(__b)))
/* v8i16 msa_mull_s8(v8i8 __a, v8i8 __b)*/
#define msa_mull_s8(__a, __b) (__builtin_msa_mulv_h((v8i16)V8I8_2_V8I16(__a), (v8i16)V8I8_2_V8I16(__b)))
/* v4u32 msa_mull_u16(v4u16 __a, v4u16 __b) */
#define msa_mull_u16(__a, __b) ((v4u32)__builtin_msa_mulv_w((v4i32)V4U16_2_V4I32(__a), (v4i32)V4U16_2_V4I32(__b)))
/* v4i32 msa_mull_s16(v4i16 __a, v4i16 __b) */
#define msa_mull_s16(__a, __b) (__builtin_msa_mulv_w((v4i32)V4I16_2_V4I32(__a), (v4i32)V4I16_2_V4I32(__b)))
/* v2u64 msa_mull_u32(v2u32 __a, v2u32 __b) */
#define msa_mull_u32(__a, __b) ((v2u64)__builtin_msa_mulv_d((v2i64)V2U32_2_V2I64(__a), (v2i64)V2U32_2_V2I64(__b)))
/* bitwise and: __builtin_msa_and_v */
#define msa_andq_u8(__a, __b) ((v16u8)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_s8(__a, __b) ((v16i8)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_u16(__a, __b) ((v8u16)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_s16(__a, __b) ((v8i16)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_u32(__a, __b) ((v4u32)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_s32(__a, __b) ((v4i32)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_u64(__a, __b) ((v2u64)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
#define msa_andq_s64(__a, __b) ((v2i64)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b)))
/* bitwise or: __builtin_msa_or_v */
#define msa_orrq_u8(__a, __b) ((v16u8)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_s8(__a, __b) ((v16i8)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_u16(__a, __b) ((v8u16)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_s16(__a, __b) ((v8i16)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_u32(__a, __b) ((v4u32)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_s32(__a, __b) ((v4i32)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_u64(__a, __b) ((v2u64)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
#define msa_orrq_s64(__a, __b) ((v2i64)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b)))
/* bitwise xor: __builtin_msa_xor_v */
#define msa_eorq_u8(__a, __b) ((v16u8)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_s8(__a, __b) ((v16i8)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_u16(__a, __b) ((v8u16)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_s16(__a, __b) ((v8i16)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_u32(__a, __b) ((v4u32)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_s32(__a, __b) ((v4i32)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_u64(__a, __b) ((v2u64)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
#define msa_eorq_s64(__a, __b) ((v2i64)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b)))
/* bitwise not: v16u8 __builtin_msa_xori_b (v16u8, 0xff) */
#define msa_mvnq_u8(__a) ((v16u8)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_s8(__a) ((v16i8)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_u16(__a) ((v8u16)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_s16(__a) ((v8i16)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_u32(__a) ((v4u32)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_s32(__a) ((v4i32)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_u64(__a) ((v2u64)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
#define msa_mvnq_s64(__a) ((v2i64)__builtin_msa_xori_b((v16u8)(__a), 0xFF))
/* compare equal: ceq -> ri = ai == bi ? 1...1:0...0 */
#define msa_ceqq_u8(__a, __b) ((v16u8)__builtin_msa_ceq_b((v16i8)(__a), (v16i8)(__b)))
#define msa_ceqq_s8(__a, __b) ((v16u8)__builtin_msa_ceq_b((v16i8)(__a), (v16i8)(__b)))
#define msa_ceqq_u16(__a, __b) ((v8u16)__builtin_msa_ceq_h((v8i16)(__a), (v8i16)(__b)))
#define msa_ceqq_s16(__a, __b) ((v8u16)__builtin_msa_ceq_h((v8i16)(__a), (v8i16)(__b)))
#define msa_ceqq_u32(__a, __b) ((v4u32)__builtin_msa_ceq_w((v4i32)(__a), (v4i32)(__b)))
#define msa_ceqq_s32(__a, __b) ((v4u32)__builtin_msa_ceq_w((v4i32)(__a), (v4i32)(__b)))
#define msa_ceqq_f32(__a, __b) ((v4u32)__builtin_msa_fceq_w((v4f32)(__a), (v4f32)(__b)))
#define msa_ceqq_u64(__a, __b) ((v2u64)__builtin_msa_ceq_d((v2i64)(__a), (v2i64)(__b)))
#define msa_ceqq_s64(__a, __b) ((v2u64)__builtin_msa_ceq_d((v2i64)(__a), (v2i64)(__b)))
#define msa_ceqq_f64(__a, __b) ((v2u64)__builtin_msa_fceq_d((v2f64)(__a), (v2f64)(__b)))
/* Compare less-than: clt -> ri = ai < bi ? 1...1:0...0 */
#define msa_cltq_u8(__a, __b) ((v16u8)__builtin_msa_clt_u_b((v16u8)(__a), (v16u8)(__b)))
#define msa_cltq_s8(__a, __b) ((v16u8)__builtin_msa_clt_s_b((v16i8)(__a), (v16i8)(__b)))
#define msa_cltq_u16(__a, __b) ((v8u16)__builtin_msa_clt_u_h((v8u16)(__a), (v8u16)(__b)))
#define msa_cltq_s16(__a, __b) ((v8u16)__builtin_msa_clt_s_h((v8i16)(__a), (v8i16)(__b)))
#define msa_cltq_u32(__a, __b) ((v4u32)__builtin_msa_clt_u_w((v4u32)(__a), (v4u32)(__b)))
#define msa_cltq_s32(__a, __b) ((v4u32)__builtin_msa_clt_s_w((v4i32)(__a), (v4i32)(__b)))
#define msa_cltq_f32(__a, __b) ((v4u32)__builtin_msa_fclt_w((v4f32)(__a), (v4f32)(__b)))
#define msa_cltq_u64(__a, __b) ((v2u64)__builtin_msa_clt_u_d((v2u64)(__a), (v2u64)(__b)))
#define msa_cltq_s64(__a, __b) ((v2u64)__builtin_msa_clt_s_d((v2i64)(__a), (v2i64)(__b)))
#define msa_cltq_f64(__a, __b) ((v2u64)__builtin_msa_fclt_d((v2f64)(__a), (v2f64)(__b)))
/* compare greater-than: cgt -> ri = ai > bi ? 1...1:0...0 */
#define msa_cgtq_u8(__a, __b) ((v16u8)__builtin_msa_clt_u_b((v16u8)(__b), (v16u8)(__a)))
#define msa_cgtq_s8(__a, __b) ((v16u8)__builtin_msa_clt_s_b((v16i8)(__b), (v16i8)(__a)))
#define msa_cgtq_u16(__a, __b) ((v8u16)__builtin_msa_clt_u_h((v8u16)(__b), (v8u16)(__a)))
#define msa_cgtq_s16(__a, __b) ((v8u16)__builtin_msa_clt_s_h((v8i16)(__b), (v8i16)(__a)))
#define msa_cgtq_u32(__a, __b) ((v4u32)__builtin_msa_clt_u_w((v4u32)(__b), (v4u32)(__a)))
#define msa_cgtq_s32(__a, __b) ((v4u32)__builtin_msa_clt_s_w((v4i32)(__b), (v4i32)(__a)))
#define msa_cgtq_f32(__a, __b) ((v4u32)__builtin_msa_fclt_w((v4f32)(__b), (v4f32)(__a)))
#define msa_cgtq_u64(__a, __b) ((v2u64)__builtin_msa_clt_u_d((v2u64)(__b), (v2u64)(__a)))
#define msa_cgtq_s64(__a, __b) ((v2u64)__builtin_msa_clt_s_d((v2i64)(__b), (v2i64)(__a)))
#define msa_cgtq_f64(__a, __b) ((v2u64)__builtin_msa_fclt_d((v2f64)(__b), (v2f64)(__a)))
/* compare less-equal: cle -> ri = ai <= bi ? 1...1:0...0 */
#define msa_cleq_u8(__a, __b) ((v16u8)__builtin_msa_cle_u_b((v16u8)(__a), (v16u8)(__b)))
#define msa_cleq_s8(__a, __b) ((v16u8)__builtin_msa_cle_s_b((v16i8)(__a), (v16i8)(__b)))
#define msa_cleq_u16(__a, __b) ((v8u16)__builtin_msa_cle_u_h((v8u16)(__a), (v8u16)(__b)))
#define msa_cleq_s16(__a, __b) ((v8u16)__builtin_msa_cle_s_h((v8i16)(__a), (v8i16)(__b)))
#define msa_cleq_u32(__a, __b) ((v4u32)__builtin_msa_cle_u_w((v4u32)(__a), (v4u32)(__b)))
#define msa_cleq_s32(__a, __b) ((v4u32)__builtin_msa_cle_s_w((v4i32)(__a), (v4i32)(__b)))
#define msa_cleq_f32(__a, __b) ((v4u32)__builtin_msa_fcle_w((v4f32)(__a), (v4f32)(__b)))
#define msa_cleq_u64(__a, __b) ((v2u64)__builtin_msa_cle_u_d((v2u64)(__a), (v2u64)(__b)))
#define msa_cleq_s64(__a, __b) ((v2u64)__builtin_msa_cle_s_d((v2i64)(__a), (v2i64)(__b)))
#define msa_cleq_f64(__a, __b) ((v2u64)__builtin_msa_fcle_d((v2f64)(__a), (v2f64)(__b)))
/* compare greater-equal: cge -> ri = ai >= bi ? 1...1:0...0 */
#define msa_cgeq_u8(__a, __b) ((v16u8)__builtin_msa_cle_u_b((v16u8)(__b), (v16u8)(__a)))
#define msa_cgeq_s8(__a, __b) ((v16u8)__builtin_msa_cle_s_b((v16i8)(__b), (v16i8)(__a)))
#define msa_cgeq_u16(__a, __b) ((v8u16)__builtin_msa_cle_u_h((v8u16)(__b), (v8u16)(__a)))
#define msa_cgeq_s16(__a, __b) ((v8u16)__builtin_msa_cle_s_h((v8i16)(__b), (v8i16)(__a)))
#define msa_cgeq_u32(__a, __b) ((v4u32)__builtin_msa_cle_u_w((v4u32)(__b), (v4u32)(__a)))
#define msa_cgeq_s32(__a, __b) ((v4u32)__builtin_msa_cle_s_w((v4i32)(__b), (v4i32)(__a)))
#define msa_cgeq_f32(__a, __b) ((v4u32)__builtin_msa_fcle_w((v4f32)(__b), (v4f32)(__a)))
#define msa_cgeq_u64(__a, __b) ((v2u64)__builtin_msa_cle_u_d((v2u64)(__b), (v2u64)(__a)))
#define msa_cgeq_s64(__a, __b) ((v2u64)__builtin_msa_cle_s_d((v2i64)(__b), (v2i64)(__a)))
#define msa_cgeq_f64(__a, __b) ((v2u64)__builtin_msa_fcle_d((v2f64)(__b), (v2f64)(__a)))
/* Shift Left Logical: shl -> ri = ai << bi; */
#define msa_shlq_u8(__a, __b) ((v16u8)__builtin_msa_sll_b((v16i8)(__a), (v16i8)(__b)))
#define msa_shlq_s8(__a, __b) ((v16i8)__builtin_msa_sll_b((v16i8)(__a), (v16i8)(__b)))
#define msa_shlq_u16(__a, __b) ((v8u16)__builtin_msa_sll_h((v8i16)(__a), (v8i16)(__b)))
#define msa_shlq_s16(__a, __b) ((v8i16)__builtin_msa_sll_h((v8i16)(__a), (v8i16)(__b)))
#define msa_shlq_u32(__a, __b) ((v4u32)__builtin_msa_sll_w((v4i32)(__a), (v4i32)(__b)))
#define msa_shlq_s32(__a, __b) ((v4i32)__builtin_msa_sll_w((v4i32)(__a), (v4i32)(__b)))
#define msa_shlq_u64(__a, __b) ((v2u64)__builtin_msa_sll_d((v2i64)(__a), (v2i64)(__b)))
#define msa_shlq_s64(__a, __b) ((v2i64)__builtin_msa_sll_d((v2i64)(__a), (v2i64)(__b)))
/* Immediate Shift Left Logical: shl -> ri = ai << imm; */
#define msa_shlq_n_u8(__a, __imm) ((v16u8)__builtin_msa_slli_b((v16i8)(__a), __imm))
#define msa_shlq_n_s8(__a, __imm) ((v16i8)__builtin_msa_slli_b((v16i8)(__a), __imm))
#define msa_shlq_n_u16(__a, __imm) ((v8u16)__builtin_msa_slli_h((v8i16)(__a), __imm))
#define msa_shlq_n_s16(__a, __imm) ((v8i16)__builtin_msa_slli_h((v8i16)(__a), __imm))
#define msa_shlq_n_u32(__a, __imm) ((v4u32)__builtin_msa_slli_w((v4i32)(__a), __imm))
#define msa_shlq_n_s32(__a, __imm) ((v4i32)__builtin_msa_slli_w((v4i32)(__a), __imm))
#define msa_shlq_n_u64(__a, __imm) ((v2u64)__builtin_msa_slli_d((v2i64)(__a), __imm))
#define msa_shlq_n_s64(__a, __imm) ((v2i64)__builtin_msa_slli_d((v2i64)(__a), __imm))
/* shift right: shrq -> ri = ai >> bi; */
#define msa_shrq_u8(__a, __b) ((v16u8)__builtin_msa_srl_b((v16i8)(__a), (v16i8)(__b)))
#define msa_shrq_s8(__a, __b) ((v16i8)__builtin_msa_sra_b((v16i8)(__a), (v16i8)(__b)))
#define msa_shrq_u16(__a, __b) ((v8u16)__builtin_msa_srl_h((v8i16)(__a), (v8i16)(__b)))
#define msa_shrq_s16(__a, __b) ((v8i16)__builtin_msa_sra_h((v8i16)(__a), (v8i16)(__b)))
#define msa_shrq_u32(__a, __b) ((v4u32)__builtin_msa_srl_w((v4i32)(__a), (v4i32)(__b)))
#define msa_shrq_s32(__a, __b) ((v4i32)__builtin_msa_sra_w((v4i32)(__a), (v4i32)(__b)))
#define msa_shrq_u64(__a, __b) ((v2u64)__builtin_msa_srl_d((v2i64)(__a), (v2i64)(__b)))
#define msa_shrq_s64(__a, __b) ((v2i64)__builtin_msa_sra_d((v2i64)(__a), (v2i64)(__b)))
/* Immediate Shift Right: shr -> ri = ai >> imm; */
#define msa_shrq_n_u8(__a, __imm) ((v16u8)__builtin_msa_srli_b((v16i8)(__a), __imm))
#define msa_shrq_n_s8(__a, __imm) ((v16i8)__builtin_msa_srai_b((v16i8)(__a), __imm))
#define msa_shrq_n_u16(__a, __imm) ((v8u16)__builtin_msa_srli_h((v8i16)(__a), __imm))
#define msa_shrq_n_s16(__a, __imm) ((v8i16)__builtin_msa_srai_h((v8i16)(__a), __imm))
#define msa_shrq_n_u32(__a, __imm) ((v4u32)__builtin_msa_srli_w((v4i32)(__a), __imm))
#define msa_shrq_n_s32(__a, __imm) ((v4i32)__builtin_msa_srai_w((v4i32)(__a), __imm))
#define msa_shrq_n_u64(__a, __imm) ((v2u64)__builtin_msa_srli_d((v2i64)(__a), __imm))
#define msa_shrq_n_s64(__a, __imm) ((v2i64)__builtin_msa_srai_d((v2i64)(__a), __imm))
/* Immediate Shift Right Rounded: shr -> ri = ai >> (rounded)imm; */
#define msa_rshrq_n_u8(__a, __imm) ((v16u8)__builtin_msa_srlri_b((v16i8)(__a), __imm))
#define msa_rshrq_n_s8(__a, __imm) ((v16i8)__builtin_msa_srari_b((v16i8)(__a), __imm))
#define msa_rshrq_n_u16(__a, __imm) ((v8u16)__builtin_msa_srlri_h((v8i16)(__a), __imm))
#define msa_rshrq_n_s16(__a, __imm) ((v8i16)__builtin_msa_srari_h((v8i16)(__a), __imm))
#define msa_rshrq_n_u32(__a, __imm) ((v4u32)__builtin_msa_srlri_w((v4i32)(__a), __imm))
#define msa_rshrq_n_s32(__a, __imm) ((v4i32)__builtin_msa_srari_w((v4i32)(__a), __imm))
#define msa_rshrq_n_u64(__a, __imm) ((v2u64)__builtin_msa_srlri_d((v2i64)(__a), __imm))
#define msa_rshrq_n_s64(__a, __imm) ((v2i64)__builtin_msa_srari_d((v2i64)(__a), __imm))
/* Vector saturating rounding shift left, qrshl -> ri = ai << bi; */
#define msa_qrshrq_s32(a, b) ((v4i32)__msa_srar_w((v4i32)(a), (v4i32)(b)))
/* Rename the msa builtin func to unify the name style for intrin_msa.hpp */
#define msa_qaddq_u8 __builtin_msa_adds_u_b
#define msa_qaddq_s8 __builtin_msa_adds_s_b
#define msa_qaddq_u16 __builtin_msa_adds_u_h
#define msa_qaddq_s16 __builtin_msa_adds_s_h
#define msa_qaddq_u32 __builtin_msa_adds_u_w
#define msa_qaddq_s32 __builtin_msa_adds_s_w
#define msa_qaddq_u64 __builtin_msa_adds_u_d
#define msa_qaddq_s64 __builtin_msa_adds_s_d
#define msa_addq_u8(a, b) ((v16u8)__builtin_msa_addv_b((v16i8)(a), (v16i8)(b)))
#define msa_addq_s8 __builtin_msa_addv_b
#define msa_addq_u16(a, b) ((v8u16)__builtin_msa_addv_h((v8i16)(a), (v8i16)(b)))
#define msa_addq_s16 __builtin_msa_addv_h
#define msa_addq_u32(a, b) ((v4u32)__builtin_msa_addv_w((v4i32)(a), (v4i32)(b)))
#define msa_addq_s32 __builtin_msa_addv_w
#define msa_addq_f32 __builtin_msa_fadd_w
#define msa_addq_u64(a, b) ((v2u64)__builtin_msa_addv_d((v2i64)(a), (v2i64)(b)))
#define msa_addq_s64 __builtin_msa_addv_d
#define msa_addq_f64 __builtin_msa_fadd_d
#define msa_qsubq_u8 __builtin_msa_subs_u_b
#define msa_qsubq_s8 __builtin_msa_subs_s_b
#define msa_qsubq_u16 __builtin_msa_subs_u_h
#define msa_qsubq_s16 __builtin_msa_subs_s_h
#define msa_subq_u8(a, b) ((v16u8)__builtin_msa_subv_b((v16i8)(a), (v16i8)(b)))
#define msa_subq_s8 __builtin_msa_subv_b
#define msa_subq_u16(a, b) ((v8u16)__builtin_msa_subv_h((v8i16)(a), (v8i16)(b)))
#define msa_subq_s16 __builtin_msa_subv_h
#define msa_subq_u32(a, b) ((v4u32)__builtin_msa_subv_w((v4i32)(a), (v4i32)(b)))
#define msa_subq_s32 __builtin_msa_subv_w
#define msa_subq_f32 __builtin_msa_fsub_w
#define msa_subq_u64(a, b) ((v2u64)__builtin_msa_subv_d((v2i64)(a), (v2i64)(b)))
#define msa_subq_s64 __builtin_msa_subv_d
#define msa_subq_f64 __builtin_msa_fsub_d
#define msa_mulq_u8(a, b) ((v16u8)__builtin_msa_mulv_b((v16i8)(a), (v16i8)(b)))
#define msa_mulq_s8(a, b) ((v16i8)__builtin_msa_mulv_b((v16i8)(a), (v16i8)(b)))
#define msa_mulq_u16(a, b) ((v8u16)__builtin_msa_mulv_h((v8i16)(a), (v8i16)(b)))
#define msa_mulq_s16(a, b) ((v8i16)__builtin_msa_mulv_h((v8i16)(a), (v8i16)(b)))
#define msa_mulq_u32(a, b) ((v4u32)__builtin_msa_mulv_w((v4i32)(a), (v4i32)(b)))
#define msa_mulq_s32(a, b) ((v4i32)__builtin_msa_mulv_w((v4i32)(a), (v4i32)(b)))
#define msa_mulq_u64(a, b) ((v2u64)__builtin_msa_mulv_d((v2i64)(a), (v2i64)(b)))
#define msa_mulq_s64(a, b) ((v2i64)__builtin_msa_mulv_d((v2i64)(a), (v2i64)(b)))
#define msa_mulq_f32 __builtin_msa_fmul_w
#define msa_mulq_f64 __builtin_msa_fmul_d
#define msa_divq_f32 __builtin_msa_fdiv_w
#define msa_divq_f64 __builtin_msa_fdiv_d
#define msa_dotp_s_h __builtin_msa_dotp_s_h
#define msa_dotp_s_w __builtin_msa_dotp_s_w
#define msa_dotp_s_d __builtin_msa_dotp_s_d
#define msa_dotp_u_h __builtin_msa_dotp_u_h
#define msa_dotp_u_w __builtin_msa_dotp_u_w
#define msa_dotp_u_d __builtin_msa_dotp_u_d
#define msa_dpadd_s_h __builtin_msa_dpadd_s_h
#define msa_dpadd_s_w __builtin_msa_dpadd_s_w
#define msa_dpadd_s_d __builtin_msa_dpadd_s_d
#define msa_dpadd_u_h __builtin_msa_dpadd_u_h
#define msa_dpadd_u_w __builtin_msa_dpadd_u_w
#define msa_dpadd_u_d __builtin_msa_dpadd_u_d
#define ILVRL_B2(RTYPE, in0, in1, low, hi) do { \
low = (RTYPE)__builtin_msa_ilvr_b((v16i8)(in0), (v16i8)(in1)); \
hi = (RTYPE)__builtin_msa_ilvl_b((v16i8)(in0), (v16i8)(in1)); \
} while (0)
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
#define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
#define ILVRL_H2(RTYPE, in0, in1, low, hi) do { \
low = (RTYPE)__builtin_msa_ilvr_h((v8i16)(in0), (v8i16)(in1)); \
hi = (RTYPE)__builtin_msa_ilvl_h((v8i16)(in0), (v8i16)(in1)); \
} while (0)
#define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
#define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
#define ILVRL_H2_UH(...) ILVRL_H2(v8u16, __VA_ARGS__)
#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
#define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__)
#define ILVRL_W2(RTYPE, in0, in1, low, hi) do { \
low = (RTYPE)__builtin_msa_ilvr_w((v4i32)(in0), (v4i32)(in1)); \
hi = (RTYPE)__builtin_msa_ilvl_w((v4i32)(in0), (v4i32)(in1)); \
} while (0)
#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
#define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__)
/* absq, qabsq (r = |a|;) */
#define msa_absq_s8(a) __builtin_msa_add_a_b(a, __builtin_msa_fill_b(0))
#define msa_absq_s16(a) __builtin_msa_add_a_h(a, __builtin_msa_fill_h(0))
#define msa_absq_s32(a) __builtin_msa_add_a_w(a, __builtin_msa_fill_w(0))
#define msa_absq_s64(a) __builtin_msa_add_a_d(a, __builtin_msa_fill_d(0))
#define msa_absq_f32(a) ((v4f32)__builtin_msa_bclri_w((v4u32)(a), 31))
#define msa_absq_f64(a) ((v2f64)__builtin_msa_bclri_d((v2u64)(a), 63))
#define msa_qabsq_s8(a) __builtin_msa_adds_a_b(a, __builtin_msa_fill_b(0))
#define msa_qabsq_s16(a) __builtin_msa_adds_a_h(a, __builtin_msa_fill_h(0))
#define msa_qabsq_s32(a) __builtin_msa_adds_a_w(a, __builtin_msa_fill_w(0))
#define msa_qabsq_s64(a) __builtin_msa_adds_a_d(a, __builtin_msa_fill_d(0))
/* abdq, qabdq (r = |a - b|;) */
#define msa_abdq_u8 __builtin_msa_asub_u_b
#define msa_abdq_s8 __builtin_msa_asub_s_b
#define msa_abdq_u16 __builtin_msa_asub_u_h
#define msa_abdq_s16 __builtin_msa_asub_s_h
#define msa_abdq_u32 __builtin_msa_asub_u_w
#define msa_abdq_s32 __builtin_msa_asub_s_w
#define msa_abdq_u64 __builtin_msa_asub_u_d
#define msa_abdq_s64 __builtin_msa_asub_s_d
#define msa_abdq_f32(a, b) msa_absq_f32(__builtin_msa_fsub_w(a, b))
#define msa_abdq_f64(a, b) msa_absq_f64(__builtin_msa_fsub_d(a, b))
#define msa_qabdq_s8(a, b) msa_qabsq_s8(__builtin_msa_subs_s_b(a, b))
#define msa_qabdq_s16(a, b) msa_qabsq_s16(__builtin_msa_subs_s_h(a, b))
#define msa_qabdq_s32(a, b) msa_qabsq_s32(__builtin_msa_subs_s_w(a, b))
#define msa_qabdq_s64(a, b) msa_qabsq_s64(__builtin_msa_subs_s_d(a, b))
/* sqrtq, rsqrtq */
#define msa_sqrtq_f32 __builtin_msa_fsqrt_w
#define msa_sqrtq_f64 __builtin_msa_fsqrt_d
#define msa_rsqrtq_f32 __builtin_msa_frsqrt_w
#define msa_rsqrtq_f64 __builtin_msa_frsqrt_d
/* mlaq: r = a + b * c; */
__extension__ extern __inline v4i32
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
msa_mlaq_s32(v4i32 __a, v4i32 __b, v4i32 __c)
{
__asm__ volatile("maddv.w %w[__a], %w[__b], %w[__c]\n"
// Outputs
: [__a] "+f"(__a)
// Inputs
: [__b] "f"(__b), [__c] "f"(__c));
return __a;
}
__extension__ extern __inline v2i64
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
msa_mlaq_s64(v2i64 __a, v2i64 __b, v2i64 __c)
{
__asm__ volatile("maddv.d %w[__a], %w[__b], %w[__c]\n"
// Outputs
: [__a] "+f"(__a)
// Inputs
: [__b] "f"(__b), [__c] "f"(__c));
return __a;
}
__extension__ extern __inline v4f32
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
msa_mlaq_f32(v4f32 __a, v4f32 __b, v4f32 __c)
{
__asm__ volatile("fmadd.w %w[__a], %w[__b], %w[__c]\n"
// Outputs
: [__a] "+f"(__a)
// Inputs
: [__b] "f"(__b), [__c] "f"(__c));
return __a;
}
__extension__ extern __inline v2f64
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
msa_mlaq_f64(v2f64 __a, v2f64 __b, v2f64 __c)
{
__asm__ volatile("fmadd.d %w[__a], %w[__b], %w[__c]\n"
// Outputs
: [__a] "+f"(__a)
// Inputs
: [__b] "f"(__b), [__c] "f"(__c));
return __a;
}
/* cntq */
#define msa_cntq_s8 __builtin_msa_pcnt_b
#define msa_cntq_s16 __builtin_msa_pcnt_h
#define msa_cntq_s32 __builtin_msa_pcnt_w
#define msa_cntq_s64 __builtin_msa_pcnt_d
/* bslq (a: mask; r = b(if a == 0); r = c(if a == 1);) */
#define msa_bslq_u8 __builtin_msa_bsel_v
/* ilvrq, ilvlq (For EL only, ilvrq: b0, a0, b1, a1; ilvlq: b2, a2, b3, a3;) */
#define msa_ilvrq_s8 __builtin_msa_ilvr_b
#define msa_ilvrq_s16 __builtin_msa_ilvr_h
#define msa_ilvrq_s32 __builtin_msa_ilvr_w
#define msa_ilvrq_s64 __builtin_msa_ilvr_d
#define msa_ilvlq_s8 __builtin_msa_ilvl_b
#define msa_ilvlq_s16 __builtin_msa_ilvl_h
#define msa_ilvlq_s32 __builtin_msa_ilvl_w
#define msa_ilvlq_s64 __builtin_msa_ilvl_d
/* ilvevq, ilvodq (ilvevq: b0, a0, b2, a2; ilvodq: b1, a1, b3, a3; ) */
#define msa_ilvevq_s8 __builtin_msa_ilvev_b
#define msa_ilvevq_s16 __builtin_msa_ilvev_h
#define msa_ilvevq_s32 __builtin_msa_ilvev_w
#define msa_ilvevq_s64 __builtin_msa_ilvev_d
#define msa_ilvodq_s8 __builtin_msa_ilvod_b
#define msa_ilvodq_s16 __builtin_msa_ilvod_h
#define msa_ilvodq_s32 __builtin_msa_ilvod_w
#define msa_ilvodq_s64 __builtin_msa_ilvod_d
/* extq (r = (a || b); a concatenation b and get elements from index c) */
#ifdef _MIPSEB
#define msa_extq_s8(a, b, c) \
(__builtin_msa_vshf_b(__builtin_msa_subv_b((v16i8)((v2i64){0x1716151413121110, 0x1F1E1D1C1B1A1918}), __builtin_msa_fill_b(c)), a, b))
#define msa_extq_s16(a, b, c) \
(__builtin_msa_vshf_h(__builtin_msa_subv_h((v8i16)((v2i64){0x000B000A00090008, 0x000F000E000D000C}), __builtin_msa_fill_h(c)), a, b))
#define msa_extq_s32(a, b, c) \
(__builtin_msa_vshf_w(__builtin_msa_subv_w((v4i32)((v2i64){0x0000000500000004, 0x0000000700000006}), __builtin_msa_fill_w(c)), a, b))
#define msa_extq_s64(a, b, c) \
(__builtin_msa_vshf_d(__builtin_msa_subv_d((v2i64){0x0000000000000002, 0x0000000000000003}, __builtin_msa_fill_d(c)), a, b))
#else
#define msa_extq_s8(a, b, c) \
(__builtin_msa_vshf_b(__builtin_msa_addv_b((v16i8)((v2i64){0x0706050403020100, 0x0F0E0D0C0B0A0908}), __builtin_msa_fill_b(c)), b, a))
#define msa_extq_s16(a, b, c) \
(__builtin_msa_vshf_h(__builtin_msa_addv_h((v8i16)((v2i64){0x0003000200010000, 0x0007000600050004}), __builtin_msa_fill_h(c)), b, a))
#define msa_extq_s32(a, b, c) \
(__builtin_msa_vshf_w(__builtin_msa_addv_w((v4i32)((v2i64){0x0000000100000000, 0x0000000300000002}), __builtin_msa_fill_w(c)), b, a))
#define msa_extq_s64(a, b, c) \
(__builtin_msa_vshf_d(__builtin_msa_addv_d((v2i64){0x0000000000000000, 0x0000000000000001}, __builtin_msa_fill_d(c)), b, a))
#endif /* _MIPSEB */
/* cvttruncq, cvttintq, cvtrintq */
#define msa_cvttruncq_u32_f32 __builtin_msa_ftrunc_u_w
#define msa_cvttruncq_s32_f32 __builtin_msa_ftrunc_s_w
#define msa_cvttruncq_u64_f64 __builtin_msa_ftrunc_u_d
#define msa_cvttruncq_s64_f64 __builtin_msa_ftrunc_s_d
#define msa_cvttintq_u32_f32 __builtin_msa_ftint_u_w
#define msa_cvttintq_s32_f32 __builtin_msa_ftint_s_w
#define msa_cvttintq_u64_f64 __builtin_msa_ftint_u_d
#define msa_cvttintq_s64_f64 __builtin_msa_ftint_s_d
#define msa_cvtrintq_f32 __builtin_msa_frint_w
#define msa_cvtrintq_f64 __builtin_msa_frint_d
/* cvtfintq, cvtfq */
#define msa_cvtfintq_f32_u32 __builtin_msa_ffint_u_w
#define msa_cvtfintq_f32_s32 __builtin_msa_ffint_s_w
#define msa_cvtfintq_f64_u64 __builtin_msa_ffint_u_d
#define msa_cvtfintq_f64_s64 __builtin_msa_ffint_s_d
#define msa_cvtfq_f32_f64 __builtin_msa_fexdo_w
#define msa_cvtflq_f64_f32 __builtin_msa_fexupr_d
#define msa_cvtfhq_f64_f32 __builtin_msa_fexupl_d
#define msa_addl_u8(a, b) ((v8u16)__builtin_msa_addv_h((v8i16)V8U8_2_V8I16(a), (v8i16)V8U8_2_V8I16(b)))
#define msa_addl_s8(a, b) (__builtin_msa_addv_h((v8i16)V8I8_2_V8I16(a), (v8i16)V8I8_2_V8I16(b)))
#define msa_addl_u16(a, b) ((v4u32)__builtin_msa_addv_w((v4i32)V4U16_2_V4I32(a), (v4i32)V4U16_2_V4I32(b)))
#define msa_addl_s16(a, b) (__builtin_msa_addv_w((v4i32)V4I16_2_V4I32(a), (v4i32)V4I16_2_V4I32(b)))
#define msa_subl_s16(a, b) (__builtin_msa_subv_w((v4i32)V4I16_2_V4I32(a), (v4i32)V4I16_2_V4I32(b)))
#define msa_recpeq_f32 __builtin_msa_frcp_w
#define msa_recpsq_f32(a, b) (__builtin_msa_fsub_w(msa_dupq_n_f32(2.0f), __builtin_msa_fmul_w(a, b)))
#define MSA_INTERLEAVED_IMPL_LOAD2_STORE2(_Tp, _Tpv, _Tpvs, suffix, df, nlanes) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld2q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + nlanes); \
*a = (_Tpv)__builtin_msa_pckev_##df((_Tpvs)v1, (_Tpvs)v0); \
*b = (_Tpv)__builtin_msa_pckod_##df((_Tpvs)v1, (_Tpvs)v0); \
} \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st2q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b) \
{ \
msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_##df((_Tpvs)b, (_Tpvs)a)); \
msa_st1q_##suffix(ptr + nlanes, (_Tpv)__builtin_msa_ilvl_##df((_Tpvs)b, (_Tpvs)a)); \
}
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint8_t, v16u8, v16i8, u8, b, 16)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int8_t, v16i8, v16i8, s8, b, 16)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint16_t, v8u16, v8i16, u16, h, 8)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int16_t, v8i16, v8i16, s16, h, 8)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint32_t, v4u32, v4i32, u32, w, 4)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int32_t, v4i32, v4i32, s32, w, 4)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(float, v4f32, v4i32, f32, w, 4)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint64_t, v2u64, v2i64, u64, d, 2)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int64_t, v2i64, v2i64, s64, d, 2)
MSA_INTERLEAVED_IMPL_LOAD2_STORE2(double, v2f64, v2i64, f64, d, 2)
#ifdef _MIPSEB
#define MSA_INTERLEAVED_IMPL_LOAD3_8(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + 16); \
_Tpv v2 = msa_ld1q_##suffix(ptr + 32); \
_Tpvs v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0704011F1F1F1F1F, 0x1F1C191613100D0A}), (_Tpvs)v0, (_Tpvs)v1); \
*a = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x1716150E0B080502, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \
v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0603001F1F1F1F1F, 0x1E1B1815120F0C09}), (_Tpvs)v0, (_Tpvs)v1); \
*b = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x1716150D0A070401, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \
v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x05021F1F1F1F1F1F, 0x1D1A1714110E0B08}), (_Tpvs)v0, (_Tpvs)v1); \
*c = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x17160F0C09060300, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \
}
#else
#define MSA_INTERLEAVED_IMPL_LOAD3_8(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + 16); \
_Tpv v2 = msa_ld1q_##suffix(ptr + 32); \
_Tpvs v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x15120F0C09060300, 0x00000000001E1B18}), (_Tpvs)v1, (_Tpvs)v0); \
*a = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1D1A1714110A0908}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1613100D0A070401, 0x00000000001F1C19}), (_Tpvs)v1, (_Tpvs)v0); \
*b = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1E1B1815120A0908}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1714110E0B080502, 0x0000000000001D1A}), (_Tpvs)v1, (_Tpvs)v0); \
*c = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1F1C191613100908}), (_Tpvs)v2, v3); \
}
#endif
MSA_INTERLEAVED_IMPL_LOAD3_8(uint8_t, v16u8, v16i8, u8)
MSA_INTERLEAVED_IMPL_LOAD3_8(int8_t, v16i8, v16i8, s8)
#ifdef _MIPSEB
#define MSA_INTERLEAVED_IMPL_LOAD3_16(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + 8); \
_Tpv v2 = msa_ld1q_##suffix(ptr + 16); \
_Tpvs v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00030000000F000F, 0x000F000C00090006}), (_Tpvs)v1, (_Tpvs)v0); \
*a = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000A00050002, 0x000F000E000D000C}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0002000F000F000F, 0x000E000B00080005}), (_Tpvs)v1, (_Tpvs)v0); \
*b = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000700040001, 0x000F000E000D000C}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000F000F000F, 0x000D000A00070004}), (_Tpvs)v1, (_Tpvs)v0); \
*c = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000600030000, 0x000F000E000D000C}), (_Tpvs)v2, v3); \
}
#else
#define MSA_INTERLEAVED_IMPL_LOAD3_16(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + 8); \
_Tpv v2 = msa_ld1q_##suffix(ptr + 16); \
_Tpvs v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0009000600030000, 0x00000000000F000C}), (_Tpvs)v1, (_Tpvs)v0); \
*a = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000D000A00050004}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000A000700040001, 0x000000000000000D}), (_Tpvs)v1, (_Tpvs)v0); \
*b = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000E000B00080004}), (_Tpvs)v2, v3); \
v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000800050002, 0x000000000000000E}), (_Tpvs)v1, (_Tpvs)v0); \
*c = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000F000C00090004}), (_Tpvs)v2, v3); \
}
#endif
MSA_INTERLEAVED_IMPL_LOAD3_16(uint16_t, v8u16, v8i16, u16)
MSA_INTERLEAVED_IMPL_LOAD3_16(int16_t, v8i16, v8i16, s16)
#define MSA_INTERLEAVED_IMPL_LOAD3_32(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
_Tpv v00 = msa_ld1q_##suffix(ptr); \
_Tpv v01 = msa_ld1q_##suffix(ptr + 4); \
_Tpv v02 = msa_ld1q_##suffix(ptr + 8); \
_Tpvs v10 = __builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v01, (v2i64)v01), (_Tpvs)v00); \
_Tpvs v11 = __builtin_msa_ilvr_w((_Tpvs)v02, (_Tpvs)__builtin_msa_ilvl_d((v2i64)v00, (v2i64)v00)); \
_Tpvs v12 = __builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v02, (v2i64)v02), (_Tpvs)v01); \
*a = (_Tpv)__builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v11, (v2i64)v11), v10); \
*b = (_Tpv)__builtin_msa_ilvr_w(v12, (_Tpvs)__builtin_msa_ilvl_d((v2i64)v10, (v2i64)v10)); \
*c = (_Tpv)__builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v12, (v2i64)v12), v11); \
}
MSA_INTERLEAVED_IMPL_LOAD3_32(uint32_t, v4u32, v4i32, u32)
MSA_INTERLEAVED_IMPL_LOAD3_32(int32_t, v4i32, v4i32, s32)
MSA_INTERLEAVED_IMPL_LOAD3_32(float, v4f32, v4i32, f32)
#define MSA_INTERLEAVED_IMPL_LOAD3_64(_Tp, _Tpv, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \
{ \
*((_Tp*)a) = *ptr; *((_Tp*)b) = *(ptr + 1); *((_Tp*)c) = *(ptr + 2); \
*((_Tp*)a + 1) = *(ptr + 3); *((_Tp*)b + 1) = *(ptr + 4); *((_Tp*)c + 1) = *(ptr + 5); \
}
MSA_INTERLEAVED_IMPL_LOAD3_64(uint64_t, v2u64, u64)
MSA_INTERLEAVED_IMPL_LOAD3_64(int64_t, v2i64, s64)
MSA_INTERLEAVED_IMPL_LOAD3_64(double, v2f64, f64)
#ifdef _MIPSEB
#define MSA_INTERLEAVED_IMPL_STORE3_8(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0F0E0D0C0B1F1F1F, 0x1F1E1D1C1B1A1F1F}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0D1C140C1B130B1A, 0x1F170F1E160E1D15}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0A09080706051F1F, 0x19181716151F1F1F}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1D14071C13061B12, 0x170A1F16091E1508}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \
v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x04030201001F1F1F, 0x14131211101F1F1F}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x15021C14011B1300, 0x051F17041E16031D}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 32, (_Tpv)v1); \
}
#else
#define MSA_INTERLEAVED_IMPL_STORE3_8(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000050403020100, 0x0000001413121110}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0A02110901100800, 0x05140C04130B0312}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000000A09080706, 0x00001A1918171615}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x170A011609001508, 0x0D04190C03180B02}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \
v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000000F0E0D0C0B, 0x0000001F1E1D1C1B}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x021C09011B08001A, 0x1F0C041E0B031D0A}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 32, (_Tpv)v1); \
}
#endif
MSA_INTERLEAVED_IMPL_STORE3_8(uint8_t, v16u8, v16i8, u8)
MSA_INTERLEAVED_IMPL_STORE3_8(int8_t, v16i8, v16i8, s8)
#ifdef _MIPSEB
#define MSA_INTERLEAVED_IMPL_STORE3_16(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000700060005000F, 0x000F000E000D000F}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000A0006000D0009, 0x000F000B0007000E}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00040003000F000F, 0x000C000B000A000F}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000E000A0003000D, 0x0005000F000B0004}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \
v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000200010000000F, 0x00090008000F000F}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000E00090000, 0x000B0002000F000A}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \
}
#else
#define MSA_INTERLEAVED_IMPL_STORE3_16(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000200010000, 0x0000000A00090008}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000800040000, 0x0006000200090005}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000500040003, 0x00000000000C000B}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B00040000000A, 0x0002000C00050001}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \
v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000000070006, 0x0000000F000E000D}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00050000000D0004, 0x000F00060001000E}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \
}
#endif
MSA_INTERLEAVED_IMPL_STORE3_16(uint16_t, v8u16, v8i16, u16)
MSA_INTERLEAVED_IMPL_STORE3_16(int16_t, v8i16, v8i16, s16)
#ifdef _MIPSEB
#define MSA_INTERLEAVED_IMPL_STORE3_32(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000300000007, 0x0000000700000006}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000300000006, 0x0000000700000005}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000200000001, 0x0000000500000007}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000700000004, 0x0000000500000002}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 4, (_Tpv)v1); \
v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000007, 0x0000000400000007}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000500000000, 0x0000000100000007}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \
}
#else
#define MSA_INTERLEAVED_IMPL_STORE3_32(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
_Tpvs v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000100000000, 0x0000000000000004}), (_Tpvs)b, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000200000000, 0x0000000100000004}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr, (_Tpv)v1); \
v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000002, 0x0000000600000005}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000500000002, 0x0000000300000000}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 4, (_Tpv)v1); \
v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000003, 0x0000000000000007}), (_Tpvs)b, (_Tpvs)a); \
v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000006, 0x0000000700000002}), (_Tpvs)c, (_Tpvs)v0); \
msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \
}
#endif
MSA_INTERLEAVED_IMPL_STORE3_32(uint32_t, v4u32, v4i32, u32)
MSA_INTERLEAVED_IMPL_STORE3_32(int32_t, v4i32, v4i32, s32)
MSA_INTERLEAVED_IMPL_STORE3_32(float, v4f32, v4i32, f32)
#define MSA_INTERLEAVED_IMPL_STORE3_64(_Tp, _Tpv, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \
{ \
*ptr = a[0]; *(ptr + 1) = b[0]; *(ptr + 2) = c[0]; \
*(ptr + 3) = a[1]; *(ptr + 4) = b[1]; *(ptr + 5) = c[1]; \
}
MSA_INTERLEAVED_IMPL_STORE3_64(uint64_t, v2u64, u64)
MSA_INTERLEAVED_IMPL_STORE3_64(int64_t, v2i64, s64)
MSA_INTERLEAVED_IMPL_STORE3_64(double, v2f64, f64)
#define MSA_INTERLEAVED_IMPL_LOAD4_STORE4(_Tp, _Tpv, _Tpvs, suffix, df, nlanes) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld4q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c, _Tpv* d) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + nlanes); \
_Tpv v2 = msa_ld1q_##suffix(ptr + nlanes * 2); \
_Tpv v3 = msa_ld1q_##suffix(ptr + nlanes * 3); \
_Tpvs t0 = __builtin_msa_pckev_##df((_Tpvs)v1, (_Tpvs)v0); \
_Tpvs t1 = __builtin_msa_pckev_##df((_Tpvs)v3, (_Tpvs)v2); \
_Tpvs t2 = __builtin_msa_pckod_##df((_Tpvs)v1, (_Tpvs)v0); \
_Tpvs t3 = __builtin_msa_pckod_##df((_Tpvs)v3, (_Tpvs)v2); \
*a = (_Tpv)__builtin_msa_pckev_##df(t1, t0); \
*b = (_Tpv)__builtin_msa_pckev_##df(t3, t2); \
*c = (_Tpv)__builtin_msa_pckod_##df(t1, t0); \
*d = (_Tpv)__builtin_msa_pckod_##df(t3, t2); \
} \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st4q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c, const _Tpv d) \
{ \
_Tpvs v0 = __builtin_msa_ilvr_##df((_Tpvs)c, (_Tpvs)a); \
_Tpvs v1 = __builtin_msa_ilvr_##df((_Tpvs)d, (_Tpvs)b); \
_Tpvs v2 = __builtin_msa_ilvl_##df((_Tpvs)c, (_Tpvs)a); \
_Tpvs v3 = __builtin_msa_ilvl_##df((_Tpvs)d, (_Tpvs)b); \
msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_##df(v1, v0)); \
msa_st1q_##suffix(ptr + nlanes, (_Tpv)__builtin_msa_ilvl_##df(v1, v0)); \
msa_st1q_##suffix(ptr + 2 * nlanes, (_Tpv)__builtin_msa_ilvr_##df(v3, v2)); \
msa_st1q_##suffix(ptr + 3 * nlanes, (_Tpv)__builtin_msa_ilvl_##df(v3, v2)); \
}
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint8_t, v16u8, v16i8, u8, b, 16)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int8_t, v16i8, v16i8, s8, b, 16)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint16_t, v8u16, v8i16, u16, h, 8)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int16_t, v8i16, v8i16, s16, h, 8)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint32_t, v4u32, v4i32, u32, w, 4)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int32_t, v4i32, v4i32, s32, w, 4)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4(float, v4f32, v4i32, f32, w, 4)
#define MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(_Tp, _Tpv, _Tpvs, suffix) \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_ld4q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c, _Tpv* d) \
{ \
_Tpv v0 = msa_ld1q_##suffix(ptr); \
_Tpv v1 = msa_ld1q_##suffix(ptr + 2); \
_Tpv v2 = msa_ld1q_##suffix(ptr + 4); \
_Tpv v3 = msa_ld1q_##suffix(ptr + 6); \
*a = (_Tpv)__builtin_msa_ilvr_d((_Tpvs)v2, (_Tpvs)v0); \
*b = (_Tpv)__builtin_msa_ilvl_d((_Tpvs)v2, (_Tpvs)v0); \
*c = (_Tpv)__builtin_msa_ilvr_d((_Tpvs)v3, (_Tpvs)v1); \
*d = (_Tpv)__builtin_msa_ilvl_d((_Tpvs)v3, (_Tpvs)v1); \
} \
__extension__ extern __inline void \
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
msa_st4q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c, const _Tpv d) \
{ \
msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_d((_Tpvs)b, (_Tpvs)a)); \
msa_st1q_##suffix(ptr + 2, (_Tpv)__builtin_msa_ilvr_d((_Tpvs)d, (_Tpvs)c)); \
msa_st1q_##suffix(ptr + 4, (_Tpv)__builtin_msa_ilvl_d((_Tpvs)b, (_Tpvs)a)); \
msa_st1q_##suffix(ptr + 6, (_Tpv)__builtin_msa_ilvl_d((_Tpvs)d, (_Tpvs)c)); \
}
MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(uint64_t, v2u64, v2i64, u64)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(int64_t, v2i64, v2i64, s64)
MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(double, v2f64, v2i64, f64)
__extension__ extern __inline v8i16
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
msa_qdmulhq_n_s16(v8i16 a, int16_t b)
{
v8i16 a_lo, a_hi;
ILVRL_H2_SH(a, msa_dupq_n_s16(0), a_lo, a_hi);
return msa_packr_s32(msa_shlq_n_s32(msa_mulq_s32(msa_paddlq_s16(a_lo), msa_dupq_n_s32(b)), 1),
msa_shlq_n_s32(msa_mulq_s32(msa_paddlq_s16(a_hi), msa_dupq_n_s32(b)), 1), 16);
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif /*__mips_msa*/
#endif /* OPENCV_CORE_MSA_MACROS_H */ | c | github | https://github.com/opencv/opencv | modules/core/include/opencv2/core/hal/msa_macros.h |
"""
Tests of the maxout functionality.
So far these don't test correctness, just that you can
run the objects.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import numpy as np
import unittest
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
from theano import config
from theano import function
from theano.sandbox import cuda
from theano import tensor as T
from pylearn2.config import yaml_parse
from pylearn2.datasets.exc import NoDataPathError
from pylearn2.models.mlp import MLP
from pylearn2.models.maxout import Maxout
from pylearn2.space import VectorSpace
def test_min_zero():
"""
This test guards against a bug where the size of the zero buffer used with
the min_zero flag was specified to have the wrong size. The bug only
manifested when compiled with optimizations off, because the optimizations
discard information about the size of the zero buffer.
"""
mlp = MLP(input_space=VectorSpace(1),
layers= [Maxout(layer_name="test_layer", num_units=1,
num_pieces = 2,
irange=.05, min_zero=True)])
X = T.matrix()
output = mlp.fprop(X)
# Compile in debug mode so we don't optimize out the size of the buffer
# of zeros
f = function([X], output, mode="DEBUG_MODE")
f(np.zeros((1, 1)).astype(X.dtype))
def test_maxout_basic():
# Tests that we can load a densely connected maxout model
# and train it for a few epochs (without saving) on a dummy
# dataset-- tiny model and dataset
yaml_string = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.testing.datasets.random_one_hot_dense_d\
esign_matrix {
rng: !obj:numpy.random.RandomState { seed: [2013, 3, 16] },
num_examples: 12,
dim: 2,
num_classes: 10
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.maxout.Maxout {
layer_name: 'h0',
num_units: 3,
num_pieces: 2,
irange: .005,
max_col_norm: 1.9365,
},
!obj:pylearn2.models.maxout.Maxout {
layer_name: 'h1',
num_units: 2,
num_pieces: 3,
irange: .005,
max_col_norm: 1.9365,
},
!obj:pylearn2.models.mlp.Softmax {
max_col_norm: 1.9365,
layer_name: 'y',
n_classes: 10,
irange: .005
}
],
nvis: 2,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.5 },
batch_size: 6,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train
},
cost: !obj:pylearn2.costs.mlp.dropout.Dropout {
input_include_probs: { 'h0' : .8 },
input_scales: { 'h0': 1. }
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 3,
},
update_callbacks: !obj:pylearn2.training_algorithms.sgd.Exponenti\
alDecay {
decay_factor: 1.000004,
min_lr: .000001
}
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 1,
saturate: 250,
final_momentum: .7
}
],
}
"""
train = yaml_parse.load(yaml_string)
train.main_loop()
yaml_string_maxout_conv_c01b_basic = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.testing.datasets.random_one_hot_topolog\
ical_dense_design_matrix {
rng: !obj:numpy.random.RandomState { seed: [2013, 3, 16] },
shape: &input_shape [10, 10],
channels: 1,
axes: ['c', 0, 1, 'b'],
num_examples: 12,
num_classes: 10
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 2,
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'h0',
pad: 0,
num_channels: 8,
num_pieces: 2,
kernel_shape: [2, 2],
pool_shape: [2, 2],
pool_stride: [2, 2],
irange: .005,
max_kernel_norm: .9,
},
# The following layers are commented out to make this
# test pass on a GTX 285.
# cuda-convnet isn't really meant to run on such an old
# graphics card but that's what we use for the buildbot.
# In the long run, we should move the buildbot to a newer
# graphics card and uncomment the remaining layers.
# !obj:pylearn2.models.maxout.MaxoutConvC01B {
# layer_name: 'h1',
# pad: 3,
# num_channels: 4,
# num_pieces: 4,
# kernel_shape: [3, 3],
# pool_shape: [2, 2],
# pool_stride: [2, 2],
# irange: .005,
# max_kernel_norm: 1.9365,
# },
#!obj:pylearn2.models.maxout.MaxoutConvC01B {
# pad: 3,
# layer_name: 'h2',
# num_channels: 16,
# num_pieces: 2,
# kernel_shape: [2, 2],
# pool_shape: [2, 2],
# pool_stride: [2, 2],
# irange: .005,
# max_kernel_norm: 1.9365,
# },
!obj:pylearn2.models.mlp.Softmax {
max_col_norm: 1.9365,
layer_name: 'y',
n_classes: 10,
irange: .005
}
],
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: *input_shape,
num_channels: 1,
axes: ['c', 0, 1, 'b'],
},
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rate: .05,
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.9 },
monitoring_dataset:
{
'train': *train
},
cost: !obj:pylearn2.costs.mlp.dropout.Dropout {
input_include_probs: { 'h0' : .8 },
input_scales: { 'h0': 1. }
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 3
},
update_callbacks: !obj:pylearn2.training_algorithms.sgd.Exponenti\
alDecay {
decay_factor: 1.00004,
min_lr: .000001
}
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 1,
saturate: 250,
final_momentum: .7
}
]
}
"""
yaml_string_maxout_conv_c01b_cifar10 = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
which_set: 'train',
axes: ['c', 0, 1, 'b'],
start: 0,
stop: 50000
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 100,
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: [32, 32],
num_channels: 3,
axes: ['c', 0, 1, 'b'],
},
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'conv1',
pad: 0,
num_channels: 32,
num_pieces: 1,
kernel_shape: [5, 5],
pool_shape: [3, 3],
pool_stride: [2, 2],
irange: .01,
min_zero: True,
W_lr_scale: 1.,
b_lr_scale: 2.,
tied_b: True,
max_kernel_norm: 9.9,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 10,
istdev: .01,
W_lr_scale: 1.,
b_lr_scale: 2.,
max_col_norm: 9.9365
}
],
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.9 },
batch_size: 100,
learning_rate: .01,
monitoring_dataset:
{
'valid' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'train',
start: 40000,
stop: 50000
},
'test' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'test',
}
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 5
}
}
}
"""
yaml_string_maxout_conv_c01b_cifar10_fast = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
which_set: 'train',
axes: ['c', 0, 1, 'b'],
start: 0,
stop: 100
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 100,
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: [32, 32],
num_channels: 3,
axes: ['c', 0, 1, 'b'],
},
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'conv1',
pad: 0,
num_channels: 16,
num_pieces: 1,
kernel_shape: [5, 5],
pool_shape: [3, 3],
pool_stride: [2, 2],
irange: .01,
min_zero: False,
W_lr_scale: 1.,
b_lr_scale: 2.,
tied_b: True,
max_kernel_norm: 9.9,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 10,
istdev: .03,
W_lr_scale: 1.,
b_lr_scale: 2.,
max_col_norm: 8.5
}
],
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
momentum: { init_momentum: 0.9 },
batch_size: 100,
learning_rate: .01,
monitoring_dataset:
{
'valid' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'train',
start: 40000,
stop: 40100
},
'test' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'test',
}
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 5
}
}
}
"""
class TestMaxout(unittest.TestCase):
def test_maxout_conv_c01b_basic_err(self):
assert cuda.cuda_enabled is False
self.assertRaises(RuntimeError,
yaml_parse.load,
yaml_string_maxout_conv_c01b_basic)
def test_maxout_conv_c01b_basic(self):
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if not hasattr(cuda, 'unuse'):
raise Exception("Theano version too old to run this test!")
# Tests that we can run a small convolutional model on GPU,
assert cuda.cuda_enabled is False
# Even if there is a GPU, but the user didn't specify device=gpu
# we want to run this test.
try:
old_floatX = config.floatX
cuda.use('gpu')
config.floatX = 'float32'
train = yaml_parse.load(yaml_string_maxout_conv_c01b_basic)
train.main_loop()
finally:
config.floatX = old_floatX
cuda.unuse()
assert cuda.cuda_enabled is False
def test_maxout_conv_c01b_cifar10(self):
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if not hasattr(cuda, 'unuse'):
raise Exception("Theano version too old to run this test!")
# Tests that we can run a small convolutional model on GPU,
assert cuda.cuda_enabled is False
# Even if there is a GPU, but the user didn't specify device=gpu
# we want to run this test.
try:
old_floatX = config.floatX
cuda.use('gpu')
config.floatX = 'float32'
try:
if config.mode in ['DEBUG_MODE', 'DebugMode']:
train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10_fast)
else:
train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10)
except NoDataPathError:
raise SkipTest("PYLEARN2_DATA_PATH environment variable "
"not defined")
train.main_loop()
# Check that the performance is close to the expected one:
# test_y_misclass: 0.3777000308036804
misclass_chan = train.algorithm.monitor.channels['test_y_misclass']
if not config.mode in ['DEBUG_MODE', 'DebugMode']:
assert misclass_chan.val_record[-1] < 0.38, \
("misclass_chan.val_record[-1] = %g" %
misclass_chan.val_record[-1])
# test_y_nll: 1.0978516340255737
nll_chan = train.algorithm.monitor.channels['test_y_nll']
if not config.mode in ['DEBUG_MODE', 'DebugMode']:
assert nll_chan.val_record[-1] < 1.1
finally:
config.floatX = old_floatX
cuda.unuse()
assert cuda.cuda_enabled is False
if __name__ == '__main__':
t = TestMaxout('setUp')
t.setUp()
t.test_maxout_conv_c01b_basic()
if 0:
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.docker;
import org.elasticsearch.gradle.Architecture;
import org.elasticsearch.gradle.LoggedExec;
import org.gradle.api.DefaultTask;
import org.gradle.api.GradleException;
import org.gradle.api.file.DirectoryProperty;
import org.gradle.api.file.ProjectLayout;
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.model.ObjectFactory;
import org.gradle.api.provider.ListProperty;
import org.gradle.api.provider.MapProperty;
import org.gradle.api.provider.Property;
import org.gradle.api.provider.SetProperty;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputDirectory;
import org.gradle.api.tasks.Optional;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.PathSensitive;
import org.gradle.api.tasks.PathSensitivity;
import org.gradle.api.tasks.TaskAction;
import org.gradle.process.ExecOperations;
import org.gradle.process.ExecSpec;
import org.gradle.workers.WorkAction;
import org.gradle.workers.WorkParameters;
import org.gradle.workers.WorkerExecutor;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
/**
* This task wraps up the details of building a Docker image, including adding a pull
* mechanism that can retry, and emitting the image SHA as a task output.
*/
public abstract class DockerBuildTask extends DefaultTask {
private static final Logger LOGGER = Logging.getLogger(DockerBuildTask.class);
private final WorkerExecutor workerExecutor;
private final RegularFileProperty markerFile;
private final DirectoryProperty dockerContext;
private String[] tags;
private boolean pull = true;
private boolean noCache = true;
private String[] baseImages;
private MapProperty<String, String> buildArgs;
@Inject
public DockerBuildTask(WorkerExecutor workerExecutor, ObjectFactory objectFactory, ProjectLayout projectLayout) {
this.workerExecutor = workerExecutor;
this.markerFile = objectFactory.fileProperty();
this.dockerContext = objectFactory.directoryProperty();
this.buildArgs = objectFactory.mapProperty(String.class, String.class);
this.markerFile.set(projectLayout.getBuildDirectory().file("markers/" + this.getName() + ".marker"));
}
@TaskAction
public void build() {
workerExecutor.noIsolation().submit(DockerBuildAction.class, params -> {
params.getDockerContext().set(dockerContext);
params.getMarkerFile().set(markerFile);
params.getTags().set(Arrays.asList(tags));
params.getPull().set(pull);
params.getNoCache().set(noCache);
params.getPush().set(getPush().getOrElse(false));
params.getBaseImages().set(Arrays.asList(baseImages));
params.getBuildArgs().set(buildArgs);
params.getPlatforms().set(getPlatforms());
});
}
@InputDirectory
@PathSensitive(PathSensitivity.RELATIVE)
public DirectoryProperty getDockerContext() {
return dockerContext;
}
@Input
public String[] getTags() {
return tags;
}
public void setTags(String[] tags) {
this.tags = tags;
}
@Input
public boolean isPull() {
return pull;
}
public void setPull(boolean pull) {
this.pull = pull;
}
@Input
public boolean isNoCache() {
return noCache;
}
public void setNoCache(boolean noCache) {
this.noCache = noCache;
}
@Input
public String[] getBaseImages() {
return baseImages;
}
public void setBaseImages(String[] baseImages) {
this.baseImages = baseImages;
}
@Input
public MapProperty<String, String> getBuildArgs() {
return buildArgs;
}
@Input
public abstract SetProperty<String> getPlatforms();
public void setPlatform(String platform) {
getPlatforms().set(Arrays.asList(platform));
}
@Input
@Optional
public abstract Property<Boolean> getPush();
@OutputFile
public RegularFileProperty getMarkerFile() {
return markerFile;
}
public abstract static class DockerBuildAction implements WorkAction<Parameters> {
private final ExecOperations execOperations;
@Inject
public DockerBuildAction(ExecOperations execOperations) {
this.execOperations = execOperations;
}
/**
* Wraps `docker pull` in a retry loop, to try and provide some resilience against
* transient errors
* @param baseImage the image to pull.
*/
private void pullBaseImage(String baseImage) {
final int maxAttempts = 10;
for (int attempt = 1; attempt <= maxAttempts; attempt++) {
try {
LoggedExec.exec(execOperations, spec -> {
maybeConfigureDockerConfig(spec);
spec.executable("docker");
spec.args("pull");
spec.environment("DOCKER_BUILDKIT", "1");
spec.args(baseImage);
});
return;
} catch (Exception e) {
LOGGER.warn("Attempt {}/{} to pull Docker base image {} failed", attempt, maxAttempts, baseImage);
}
}
// If we successfully ran `docker pull` above, we would have returned before this point.
throw new GradleException("Failed to pull Docker base image [" + baseImage + "], all attempts failed");
}
private void maybeConfigureDockerConfig(ExecSpec spec) {
String dockerConfig = System.getenv("DOCKER_CONFIG");
if (dockerConfig != null) {
spec.environment("DOCKER_CONFIG", dockerConfig);
}
}
@Override
public void execute() {
final Parameters parameters = getParameters();
if (parameters.getPull().get()) {
parameters.getBaseImages().get().forEach(this::pullBaseImage);
}
final List<String> tags = parameters.getTags().get();
final boolean isCrossPlatform = isCrossPlatform();
LoggedExec.exec(execOperations, spec -> {
maybeConfigureDockerConfig(spec);
spec.executable("docker");
spec.environment("DOCKER_BUILDKIT", "1");
if (isCrossPlatform) {
spec.args("buildx");
}
spec.args("build", parameters.getDockerContext().get().getAsFile().getAbsolutePath());
if (isCrossPlatform) {
spec.args("--platform", parameters.getPlatforms().get().stream().collect(Collectors.joining(",")));
}
if (parameters.getNoCache().get()) {
spec.args("--no-cache");
}
tags.forEach(tag -> spec.args("--tag", tag));
parameters.getBuildArgs().get().forEach((k, v) -> spec.args("--build-arg", k + "=" + v));
if (parameters.getPush().getOrElse(false)) {
spec.args("--push");
} else if (!isCrossPlatform) {
// For single-platform builds, add --load to ensure the image is loaded into
// the local Docker daemon as a regular image, not a manifest list.
// This prevents issues with newer Docker versions (23.0+) that may create
// manifest lists even for single-platform builds when BuildKit is enabled.
spec.args("--load");
}
});
// Fetch the Docker image's hash, and write it to desk as the task's output. Doing this allows us
// to do proper up-to-date checks in Gradle.
try {
// multi-platform image builds do not end up in local registry, so we need to pull the just build image
// first to get the checksum and also serves as a test for the image being pushed correctly
if (parameters.getPlatforms().get().size() > 1 && parameters.getPush().getOrElse(false)) {
pullBaseImage(tags.get(0));
}
final String checksum = getImageChecksum(tags.get(0));
Files.writeString(parameters.getMarkerFile().getAsFile().get().toPath(), checksum + "\n");
} catch (IOException e) {
throw new RuntimeException("Failed to write marker file", e);
}
}
private boolean isCrossPlatform() {
return getParameters().getPlatforms()
.get()
.stream()
.anyMatch(any -> any.equals(Architecture.current().dockerPlatform) == false);
}
private String getImageChecksum(String imageTag) {
final ByteArrayOutputStream stdout = new ByteArrayOutputStream();
execOperations.exec(spec -> {
spec.setCommandLine("docker", "inspect", "--format", "{{ .Id }}", imageTag);
spec.setStandardOutput(stdout);
spec.setIgnoreExitValue(false);
});
return stdout.toString().trim();
}
}
interface Parameters extends WorkParameters {
DirectoryProperty getDockerContext();
RegularFileProperty getMarkerFile();
ListProperty<String> getTags();
Property<Boolean> getPull();
Property<Boolean> getNoCache();
ListProperty<String> getBaseImages();
MapProperty<String, String> getBuildArgs();
SetProperty<String> getPlatforms();
Property<Boolean> getPush();
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerBuildTask.java |
#!/usr/bin/python
import unittest
import json
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
from nysa.cbuilder import sdb_component
SDB_DATA = \
" Set the Vendor ID (Hexidecimal 64-bit Number)\n" \
" SDB_VENDOR_ID:800000000000C594\n" \
"\n" \
" Set the Product ID\n" \
" SDB_DEVICE_ID:0001\n" \
"\n" \
" Set the Version of the core\n" \
" SDB_CORE_VERSION:00.000.001\n" \
"\n" \
" Set the name of the core\n" \
" SDB_NAME:sdb_module\n" \
"\n" \
" Set ABI Class\n" \
" SDB_ABI_CLASS:0000\n" \
" Undocumented Device\n" \
"\n" \
" Set API Version Major\n" \
" SDB_ABI_VERSION_MAJOR:01\n" \
"\n" \
" Set ABI Version Minor\n" \
" SDB_ABI_VERSION_MINOR:00\n" \
"\n" \
" Set Endian BIG, LITTLE\n" \
" SDB_ABI_ENDIAN:BIG\n" \
"\n" \
" Set Device Width (8, 16, 32, 64)\n" \
" SDB_ABI_DEVICE_WIDTH:32\n" \
"\n" \
" Set the Modules URL\n" \
" SDB_MODULE_URL:http://www.example.com\n" \
"\n" \
" Date\n" \
" SDB_DATE:2015/01/05\n" \
"\n" \
" Device is executable\n" \
" SDB_EXECUTABLE:True\n" \
"\n" \
" Device is writeable\n" \
" SDB_WRITEABLE:True\n" \
"\n" \
" Device is readable\n" \
" SDB_READABLE:True\n" \
"\n"
TEST_INTERCONNECT_ROM = ""\
"5344422D\n" \
"00100100\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000000"
TEST_BRIDGE_ROM = ""\
"10000000\n" \
"00000000\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000002"
TEST_DEVICE_ROM = ""\
"00000100\n" \
"00000207\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000001"
class Test (unittest.TestCase):
"""Unit test for SDB Component"""
def setUp(self):
self.dbg = False
self.sdbc = sdb_component.SDBComponent()
self.sdbc.parse_buffer(SDB_DATA)
self.sdbc.set_start_address(0x01000000)
self.sdbc.set_size(5)
self.sdbc.set_number_of_records(10)
self.sdbc.set_bridge_child_addr(0x1000000000000000)
def test_parse_buffer(self):
od = self.sdbc.generated_ordered_dict()
self.assertEqual(od["SDB_VENDOR_ID"] , "800000000000C594")
self.assertEqual(od["SDB_DEVICE_ID"] , "0001")
self.assertEqual(od["SDB_CORE_VERSION"] , "00.000.001")
self.assertEqual(od["SDB_NAME"] , "sdb_module")
self.assertEqual(od["SDB_ABI_CLASS"] , "0000")
self.assertEqual(od["SDB_ABI_VERSION_MAJOR"] , "01")
self.assertEqual(od["SDB_ABI_VERSION_MINOR"] , "00")
self.assertEqual(od["SDB_ABI_ENDIAN"] , "BIG")
self.assertEqual(od["SDB_ABI_DEVICE_WIDTH"] , "32")
self.assertEqual(od["SDB_MODULE_URL"] , "http://www.example.com")
self.assertEqual(od["SDB_DATE"] , "2015/01/05")
self.assertEqual(od["SDB_EXECUTABLE"] , "True")
self.assertEqual(od["SDB_WRITEABLE"] , "True")
self.assertEqual(od["SDB_READABLE"] , "True")
self.assertEqual(od["SDB_START_ADDRESS"] , "0x1000000")
self.assertEqual(od["SDB_LAST_ADDRESS"] , "0x1000005L")
self.assertEqual(od["SDB_NRECS"] , "10")
self.assertEqual(od["SDB_BRIDGE_CHILD_ADDR"] , "0x1000000000000000")
for e in od:
#print "%s:%s" % (e, od[e])
pass
def test_create_device_record(self):
device = sdb_component.create_device_record(name = "test device",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
core_version = "1.0",
abi_class = 0,
version_major = 1,
version_minor = 0)
self.assertTrue(device.is_device())
def test_create_interconnect_record(self):
interconnect = sdb_component.create_interconnect_record(
name = "peripherals",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
start_address = 0x01,
size = 10)
self.assertTrue(interconnect.is_interconnect())
def test_create_bridge_record(self):
bridge = sdb_component.create_bridge_record(name = "bridge",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
start_address = 0x01,
size = 10)
self.assertTrue(bridge.is_bridge())
def test_create_integration_record(self):
integration = sdb_component.create_integration_record(
information = "test integration",
vendor_id = 0x1000000000000000,
device_id = 0x00000000)
self.assertTrue(integration.is_integration_record())
def test_create_synthesis_record(self):
synth_record = sdb_component.create_synthesis_record(
synthesis_name = "name of names",
commit_id = 0000,
tool_name = "xilinx",
tool_version = 14.1,
user_name = "Dave McCoy")
self.assertTrue(synth_record.is_synthesis_record())
def test_create_repo_url_record(self):
url = sdb_component.create_repo_url_record("wwww.cospandesign.com")
self.assertTrue(url.is_url_record()) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from altair import api
from .. import api, spec
VALID_MARKTYPES = spec.SPEC['properties']['marktype']['enum']
def test_empty_data():
d = api.Data()
assert d.formatType=='json'
assert 'formatType' in d
assert 'url' not in d
assert 'values' not in d
def test_dict_data():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
spec = api.Viz(data)
assert np.all(spec.data == pd.DataFrame(data))
def test_dataframe_data():
datadict = dict(x=[1, 2, 3],
y=[4, 5, 6])
data = pd.DataFrame(datadict)
spec = api.Viz(data)
assert np.all(spec.data == data)
def test_to_dict():
data = pd.DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
spec = api.Viz(data).encode(x='x', y='y')
D = spec.to_dict()
assert D == {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4},
{'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': False, 'name': 'x', 'type': 'Q'},
'y': {'bin': False, 'name': 'y', 'type': 'Q'}},
'marktype': 'point'}
def test_markers():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
spec = api.Viz(data)
# call, e.g. spec.mark('point')
for marktype in VALID_MARKTYPES:
spec.mark(marktype)
assert spec.marktype == marktype
# call, e.g. spec.point()
for marktype in VALID_MARKTYPES:
method = marktype
getattr(spec, method)()
assert spec.marktype == marktype
def test_encode():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x='col1', y='col2', row='col3', col='col4',
size='col5', color='col6', shape='col7')
spec = api.Viz(data).encode(**kwargs)
for key, name in kwargs.items():
assert getattr(spec.encoding, key).name == name
def test_encode_aggregates():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('count', 'col1'), y=('count', 'col2'),
row=('count', 'col3'), col=('count', 'col4'),
size=('avg', 'col5'), color=('max', 'col6'),
shape=('count', 'col7'))
spec = api.Viz(data).encode(**{key:"{0}({1})".format(*val)
for key, val in kwargs.items()})
for key, val in kwargs.items():
agg, name = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).aggregate == agg
def test_encode_types():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('col1', 'Q'), y=('col2', 'Q'),
row=('col3', 'O'), col=('col4', 'N'),
size=('col5', 'Q'), color=('col6', 'T'),
shape=('col7', 'O'))
spec = api.Viz(data).encode(**{key:"{0}:{1}".format(*val)
for key, val in kwargs.items()})
for key, val in kwargs.items():
name, typ = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).type == typ
def test_infer_types():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('col1', 'Q'), y=('col2', 'Q'),
row=('col3', 'N'), col=('col4', 'N'),
size=('col5', 'Q'), color=('col6', 'T'),
shape=('col7', 'Q'))
spec = api.Viz(data).encode(**{key: val[0]
for key, val in kwargs.items()})
for key, val in kwargs.items():
name, typ = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).type == typ
def test_hist():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
viz1 = api.Viz(data).hist(x='x')
assert viz1.encoding.x.name == "x"
assert viz1.encoding.x.bin.maxbins == 10
assert viz1.encoding.y.name == "x"
assert viz1.encoding.y.type == "Q"
assert viz1.encoding.y.aggregate == "count"
viz2 = api.Viz(data).hist(x="x", bins=30)
assert viz2.encoding.x.bin.maxbins == 30
expected = {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4}, {'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': {'maxbins': 30}, 'name': 'x'},
'y': {'aggregate': 'count',
'bin': False,
'name': 'x',
'type': 'Q'}},
'marktype': 'bar'}
viz3 = api.Viz(data).hist(x="x:O",
color=api.Color(shorthand="bar", type="N")
)
assert viz3.encoding.x.name == "x"
assert viz3.encoding.x.type == "O"
expected = {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4}, {'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': {'maxbins': 10},
'name': 'x', 'type': 'O'},
'y': {'aggregate': 'count',
'bin': False,
'name': 'x',
'type': 'Q'},
'color': {'bin': False,
'name': 'bar',
'opacity': 1.0,
'type': 'N',
'value': '#4682b4'}},
'marktype': 'bar'}
assert viz3.to_dict() == expected
viz4 = api.Viz(data).hist(
x=api.X(shorthand="x", bin=api.Bin(maxbins=40)))
assert viz4.encoding.x.name == "x"
assert viz4.encoding.x.bin.maxbins == 40 | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSparkSQLTool,
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSparkSQLTool": "langchain_community.tools",
"QuerySparkSQLTool": "langchain_community.tools",
"InfoSparkSQLTool": "langchain_community.tools",
"ListSparkSQLTool": "langchain_community.tools",
"QueryCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSparkSQLTool",
"InfoSparkSQLTool",
"ListSparkSQLTool",
"QueryCheckerTool",
"QuerySparkSQLTool",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/tools/spark_sql/tool.py |
# stdlib
from collections import defaultdict, namedtuple
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class NodeNotFound(Exception):
pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'pshard_stats',
'cluster_stats',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current")
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
"elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
"elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
"elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
"elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
"elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
"elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent")
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("An url must be specified in the instance")
pshard_stats = _is_affirmative(instance.get('pshard_stats', False))
cluster_stats = _is_affirmative(instance.get('cluster_stats', False))
if 'is_external' in instance:
cluster_stats = _is_affirmative(instance.get('is_external', False))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
pshard_stats=pshard_stats,
cluster_stats=cluster_stats,
password=instance.get('password'),
service_check_tags=service_check_tags,
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username')
)
return config
def check(self, instance):
config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version(config)
health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, stats_metrics, \
pshard_stats_metrics = self._define_params(version, config.cluster_stats)
# Load clusterwise data
if config.pshard_stats:
pshard_stats_url = urlparse.urljoin(config.url, pshard_stats_url)
pshard_stats_data = self._get_data(pshard_stats_url, config)
self._process_pshard_stats_data(pshard_stats_data, config, pshard_stats_metrics)
# Load stats data.
stats_url = urlparse.urljoin(config.url, stats_url)
stats_data = self._get_data(stats_url, config)
self._process_stats_data(nodes_url, stats_data, stats_metrics, config)
# Load the health data.
health_url = urlparse.urljoin(config.url, health_url)
health_data = self._get_data(health_url, config)
self._process_health_data(health_data, config)
# Load the pending_tasks data.
pending_tasks_url = urlparse.urljoin(config.url, pending_tasks_url)
pending_tasks_data = self._get_data(pending_tasks_url, config)
self._process_pending_tasks_data(pending_tasks_data, config)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=config.service_check_tags
)
def _get_es_version(self, config):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
version = map(int, data['version']['number'].split('.')[0:3])
except Exception, e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (config.url, str(e))
)
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, cluster_stats):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
pshard_stats_url = "/_stats"
if version >= [0, 90, 10]:
# ES versions 0.90.10 and above
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_nodes?network=true"
pending_tasks_url = "/_cluster/pending_tasks?pretty=true"
# For "external" clusters, we want to collect from all nodes.
if cluster_stats:
stats_url = "/_nodes/stats?all=true"
else:
stats_url = "/_nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_POST_0_90_10
else:
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_cluster/nodes?network=true"
pending_tasks_url = None
if cluster_stats:
stats_url = "/_cluster/nodes/stats?all=true"
else:
stats_url = "/_cluster/nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_PRE_0_90_10
stats_metrics = dict(self.STATS_METRICS)
stats_metrics.update(additional_metrics)
### Additional Stats metrics ###
if version >= [0, 90, 5]:
# ES versions 0.90.5 and above
additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5
else:
# ES version 0.90.4 and below
additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5
stats_metrics.update(additional_metrics)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_0_0)
if version >= [1, 3, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
# ES versions 1.4 and above
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_4_0)
# Version specific stats metrics about the primary shards
pshard_stats_metrics = dict(self.PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0
pshard_stats_metrics.update(additional_metrics)
return health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, \
stats_metrics, pshard_stats_metrics
def _get_data(self, url, config, send_sc=True):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if config.username and config.password:
auth = (config.username, config.password)
else:
auth = None
try:
resp = requests.get(
url,
timeout=config.timeout,
headers=headers(self.agentConfig),
auth=auth
)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags
)
raise
return resp.json()
def _process_pending_tasks_data(self, data, config):
p_tasks = defaultdict(int)
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
node_data = {
'pending_task_total': sum(p_tasks.values()),
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
}
for metric in self.CLUSTER_PENDING_TASKS:
# metric description
desc = self.CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=config.tags)
def _process_stats_data(self, nodes_url, data, stats_metrics, config):
cluster_stats = config.cluster_stats
for node_name in data['nodes']:
node_data = data['nodes'][node_name]
# On newer version of ES it's "host" not "hostname"
node_hostname = node_data.get(
'hostname', node_data.get('host', None))
# Override the metric hostname if we're hitting an external cluster
metric_hostname = node_hostname if cluster_stats else None
for metric, desc in stats_metrics.iteritems():
self._process_metric(
node_data, metric, *desc, tags=config.tags,
hostname=metric_hostname)
def _process_pshard_stats_data(self, data, config, pshard_stats_metrics):
for metric, desc in pshard_stats_metrics.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data, config):
if self.cluster_status.get(config.url) is None:
self.cluster_status[config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
if data['status'] != self.cluster_status.get(config.url):
self.cluster_status[config.url] = data['status']
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
for metric, desc in self.CLUSTER_HEALTH_METRICS.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status, tags=None):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags
} | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class YinYueTaiIE(InfoExtractor):
IE_NAME = 'yinyuetai:video'
IE_DESC = '音悦Tai'
_VALID_URL = r'https?://v\.yinyuetai\.com/video(?:/h5)?/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://v.yinyuetai.com/video/2322376',
'md5': '6e3abe28d38e3a54b591f9f040595ce0',
'info_dict': {
'id': '2322376',
'ext': 'mp4',
'title': '少女时代_PARTY_Music Video Teaser',
'creator': '少女时代',
'duration': 25,
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'http://v.yinyuetai.com/video/h5/2322376',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://ext.yinyuetai.com/main/get-h-mv-info?json=true&videoId=%s' % video_id, video_id,
'Downloading mv info')['videoInfo']['coreVideoInfo']
if info['error']:
raise ExtractorError(info['errorMsg'], expected=True)
formats = [{
'url': format_info['videoUrl'],
'format_id': format_info['qualityLevel'],
'format': format_info.get('qualityLevelName'),
'filesize': format_info.get('fileSize'),
# though URLs ends with .flv, the downloaded files are in fact mp4
'ext': 'mp4',
'tbr': format_info.get('bitrate'),
} for format_info in info['videoUrlModels']]
self._sort_formats(formats)
return {
'id': video_id,
'title': info['videoName'],
'thumbnail': info.get('bigHeadImage'),
'creator': info.get('artistNames'),
'duration': info.get('duration'),
'formats': formats,
} | unknown | codeparrot/codeparrot-clean | ||
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import json
from base64 import b64encode
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
class DelugeAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DelugeAPI, self).__init__('Deluge', host, username, password)
self.url = self.host + 'json'
def _get_auth(self):
post_data = json.dumps({"method": "auth.login",
"params": [self.password],
"id": 1
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
except:
return None
self.auth = self.response.json()["result"]
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
except:
return None
connected = self.response.json()['result']
if not connected:
post_data = json.dumps({"method": "web.get_hosts",
"params": [],
"id": 11
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
except:
return None
hosts = self.response.json()['result']
if len(hosts) == 0:
logger.log(self.name + u': WebUI does not contain daemons', logger.ERROR)
return None
post_data = json.dumps({"method": "web.connect",
"params": [hosts[0][0]],
"id": 11
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
except:
return None
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
except:
return None
connected = self.response.json()['result']
if not connected:
logger.log(self.name + u': WebUI could not connect to daemon', logger.ERROR)
return None
return self.auth
def _add_torrent_uri(self, result):
post_data = json.dumps({"method": "core.add_torrent_magnet",
"params": [result.url,{"move_completed": "true", "move_completed_path": sickbeard.TV_DOWNLOAD_DIR}],
"id": 2
})
self._request(method='post', data=post_data)
result.hash = self.response.json()['result']
return self.response.json()['result']
def _add_torrent_file(self, result):
post_data = json.dumps({"method": "core.add_torrent_file",
"params": [result.name + '.torrent', b64encode(result.content),{"move_completed": "true", "move_completed_path": sickbeard.TV_DOWNLOAD_DIR}],
"id": 2
})
self._request(method='post', data=post_data)
result.hash = self.response.json()['result']
return self.response.json()['result']
def _set_torrent_label(self, result):
label = sickbeard.TORRENT_LABEL.lower()
if label:
# check if label already exists and create it if not
post_data = json.dumps({"method": 'label.get_labels',
"params": [],
"id": 3
})
self._request(method='post', data=post_data)
labels = self.response.json()['result']
if labels != None:
if label not in labels:
logger.log(self.name + ': ' + label +u" label does not exist in Deluge we must add it", logger.DEBUG)
post_data = json.dumps({"method": 'label.add',
"params": [label],
"id": 4
})
self._request(method='post', data=post_data)
logger.log(self.name + ': ' + label +u" label added to Deluge", logger.DEBUG)
# add label to torrent
post_data = json.dumps({ "method": 'label.set_torrent',
"params": [result.hash, label],
"id": 5
})
self._request(method='post', data=post_data)
logger.log(self.name + ': ' + label +u" label added to torrent", logger.DEBUG)
else:
logger.log(self.name + ': ' + u"label plugin not detected", logger.DEBUG)
return False
return not self.response.json()['error']
def _set_torrent_ratio(self, result):
if sickbeard.TORRENT_RATIO:
post_data = json.dumps({"method": "core.set_torrent_stop_at_ratio",
"params": [result.hash, True],
"id": 5
})
self._request(method='post', data=post_data)
post_data = json.dumps({"method": "core.set_torrent_stop_ratio",
"params": [result.hash,float(sickbeard.TORRENT_RATIO)],
"id": 6
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
def _set_torrent_path(self, result):
if sickbeard.TORRENT_PATH:
post_data = json.dumps({"method": "core.set_torrent_move_completed",
"params": [result.hash, True],
"id": 7
})
self._request(method='post', data=post_data)
post_data = json.dumps({"method": "core.set_torrent_move_completed_path",
"params": [result.hash, sickbeard.TORRENT_PATH],
"id": 8
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
post_data = json.dumps({"method": "core.pause_torrent",
"params": [[result.hash]],
"id": 9
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
api = DelugeAPI() | unknown | codeparrot/codeparrot-clean | ||
// This file was automatically generated from exception-handling.md by Knit tool. Do not edit.
package kotlinx.coroutines.guide.test
import kotlinx.coroutines.knit.*
import org.junit.Test
class ExceptionsGuideTest {
@Test
fun testExampleExceptions01() {
test("ExampleExceptions01") { kotlinx.coroutines.guide.exampleExceptions01.main() }.verifyExceptions(
"Throwing exception from launch",
"Exception in thread \"DefaultDispatcher-worker-1 @coroutine#2\" java.lang.IndexOutOfBoundsException",
"Joined failed job",
"Throwing exception from async",
"Caught ArithmeticException"
)
}
@Test
fun testExampleExceptions02() {
test("ExampleExceptions02") { kotlinx.coroutines.guide.exampleExceptions02.main() }.verifyLines(
"CoroutineExceptionHandler got java.lang.AssertionError"
)
}
@Test
fun testExampleExceptions03() {
test("ExampleExceptions03") { kotlinx.coroutines.guide.exampleExceptions03.main() }.verifyLines(
"Cancelling child",
"Child is cancelled",
"Parent is not cancelled"
)
}
@Test
fun testExampleExceptions04() {
test("ExampleExceptions04") { kotlinx.coroutines.guide.exampleExceptions04.main() }.verifyLines(
"Second child throws an exception",
"Children are cancelled, but exception is not handled until all children terminate",
"The first child finished its non cancellable block",
"CoroutineExceptionHandler got java.lang.ArithmeticException"
)
}
@Test
fun testExampleExceptions05() {
test("ExampleExceptions05") { kotlinx.coroutines.guide.exampleExceptions05.main() }.verifyLines(
"CoroutineExceptionHandler got java.io.IOException with suppressed [java.lang.ArithmeticException]"
)
}
@Test
fun testExampleExceptions06() {
test("ExampleExceptions06") { kotlinx.coroutines.guide.exampleExceptions06.main() }.verifyLines(
"Rethrowing CancellationException with original cause",
"CoroutineExceptionHandler got java.io.IOException"
)
}
@Test
fun testExampleSupervision01() {
test("ExampleSupervision01") { kotlinx.coroutines.guide.exampleSupervision01.main() }.verifyLines(
"The first child is failing",
"The first child is cancelled: true, but the second one is still active",
"Cancelling the supervisor",
"The second child is cancelled because the supervisor was cancelled"
)
}
@Test
fun testExampleSupervision02() {
test("ExampleSupervision02") { kotlinx.coroutines.guide.exampleSupervision02.main() }.verifyLines(
"The child is sleeping",
"Throwing an exception from the scope",
"The child is cancelled",
"Caught an assertion error"
)
}
@Test
fun testExampleSupervision03() {
test("ExampleSupervision03") { kotlinx.coroutines.guide.exampleSupervision03.main() }.verifyLines(
"The scope is completing",
"The child throws an exception",
"CoroutineExceptionHandler got java.lang.AssertionError",
"The scope is completed"
)
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/guide/test/ExceptionsGuideTest.kt |
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class SimpleDisk(SimObject):
type = 'SimpleDisk'
cxx_header = "dev/simple_disk.hh"
disk = Param.DiskImage("Disk Image")
system = Param.System(Parent.any, "System Pointer") | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('publichd')
CATEGORIES = {
'all': 0,
# Movies
'BluRay 720p': 2,
'BluRay 1080p': 5,
'XviD': 15,
'BRRip': 16,
# TV
'HDTV': 7,
'SDTV': 24,
'TV WEB-DL': 14
}
class SearchPublicHD(object):
"""
PublicHD search plugin.
To perform search against single category:
publichd:
category: BluRay 720p
To perform search against multiple categories:
publichd:
category:
- BluRay 720p
- BluRay 1080p
Movie categories accepted: BluRay 720p, BluRay 1080p, XviD, BRRip
TV categories accepted: HDTV, SDTV, TV WEB-DL
You can use also use category ID manually if you so desire (eg. BluRay 720p is actually category id '2')
"""
schema = {
'type': 'object',
'properties': {
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]})
},
"additionalProperties": False
}
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for entries on PublicHD
"""
categories = config.get('category', 'all')
# Ensure categories a list
if not isinstance(categories, list):
categories = [categories]
# Convert named category to its respective category id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
category_url_fragment = '&category=%s' % urllib.quote(';'.join(str(c) for c in categories))
base_url = 'http://publichd.se/index.php?page=torrents&active=0'
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_unicode(search_string)
query_url_fragment = '&search=' + urllib.quote(query.encode('utf8'))
# http://publichd.se/index.php?page=torrents&active=0&category=5;15&search=QUERY
url = (base_url + category_url_fragment + query_url_fragment)
log.debug('PublicHD search url: %s' % url)
page = requests.get(url).content
soup = get_soup(page)
for result in soup.findAll('a', href=re.compile('page=torrent-details')):
entry = Entry()
entry['title'] = result.text
# Expand the selection to whole row
result = result.findPrevious('tr')
download_url = result.find('a', href=re.compile('\.torrent$'))['href']
torrent_hash = re.search(r'/([0-9a-fA-F]{5,40})/', download_url).group(1)
entry['url'] = 'http://publichd.se/download.php?id=%s' % torrent_hash
seeds, leeches = result.findAll('td', text=re.compile('^\d+$'))
entry['torrent_seeds'] = int(seeds.text)
entry['torrent_leeches'] = int(leeches.text)
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
size = result.find("td", text=re.compile('(\d+(?:[.,]\d+)*)\s?([KMG]B)')).text
size = re.search('(\d+(?:[.,]\d+)*)\s?([KMG]B)', size)
if size:
if size.group(2) == 'GB':
entry['content_size'] = int(float(size.group(1).replace(',', '')) * 1000 ** 3 / 1024 ** 2)
elif size.group(2) == 'MB':
entry['content_size'] = int(float(size.group(1).replace(',', '')) * 1000 ** 2 / 1024 ** 2)
elif size.group(2) == 'KB':
entry['content_size'] = int(float(size.group(1).replace(',', '')) * 1000 / 1024 ** 2)
else:
entry['content_size'] = int(float(size.group(1).replace(',', '')) / 1024 ** 2)
entries.add(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchPublicHD, 'publichd', groups=['search'], api_ver=2) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import unittest
from cherrymusicserver import service
class TestService(unittest.TestCase):
def test_mutual_dependency(self):
@service.user(myfoo='fooservice')
class Reflecto(object):
def __init__(self):
service.provide('fooservice', self.__class__)
assert self.myfoo
self.assertRaises(service.MutualDependencyBreak, Reflecto) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Http\Resources\Json;
use Illuminate\Support\Arr;
class PaginatedResourceResponse extends ResourceResponse
{
/**
* Create an HTTP response that represents the object.
*
* @param \Illuminate\Http\Request $request
* @return \Illuminate\Http\JsonResponse
*/
public function toResponse($request)
{
return tap(response()->json(
$this->wrap(
$this->resource->resolve($request),
array_merge_recursive(
$this->paginationInformation($request),
$this->resource->with($request),
$this->resource->additional
)
),
$this->calculateStatus(),
[],
$this->resource->jsonOptions()
), function ($response) use ($request) {
$response->original = $this->resource->resource->map(function ($item) {
if (is_array($item)) {
return Arr::get($item, 'resource');
} elseif (is_object($item)) {
return $item->resource ?? null;
}
return null;
});
$this->resource->withResponse($request, $response);
});
}
/**
* Add the pagination information to the response.
*
* @param \Illuminate\Http\Request $request
* @return array
*/
protected function paginationInformation($request)
{
$paginated = $this->resource->resource->toArray();
$default = [
'links' => $this->paginationLinks($paginated),
'meta' => $this->meta($paginated),
];
if (method_exists($this->resource, 'paginationInformation') ||
$this->resource->hasMacro('paginationInformation')) {
return $this->resource->paginationInformation($request, $paginated, $default);
}
return $default;
}
/**
* Get the pagination links for the response.
*
* @param array $paginated
* @return array
*/
protected function paginationLinks($paginated)
{
return [
'first' => $paginated['first_page_url'] ?? null,
'last' => $paginated['last_page_url'] ?? null,
'prev' => $paginated['prev_page_url'] ?? null,
'next' => $paginated['next_page_url'] ?? null,
];
}
/**
* Gather the metadata for the response.
*
* @param array $paginated
* @return array
*/
protected function meta($paginated)
{
return Arr::except($paginated, [
'data',
'first_page_url',
'last_page_url',
'prev_page_url',
'next_page_url',
]);
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Http/Resources/Json/PaginatedResourceResponse.php |
import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.utcnow()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
updated_headers = self.update_headers(response)
if updated_headers:
response.headers.update(updated_headers)
warning_header_value = self.warning(response)
if warning_header_value is not None:
response.headers.update({"Warning": warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if "expires" not in response.headers:
date = parsedate(response.headers["date"])
expires = expire_after(timedelta(days=1), date=datetime(*date[:6]))
headers["expires"] = datetime_to_header(expires)
headers["cache-control"] = "public"
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {"expires": datetime_to_header(expires), "cache-control": "public"}
def warning(self, response):
tmpl = "110 - Automatically cached for %s. Response might be stale"
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = {
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
}
def update_headers(self, resp):
headers = resp.headers
if "expires" in headers:
return {}
if "cache-control" in headers and headers["cache-control"] != "public":
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if "date" not in headers or "last-modified" not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers["date"]))
last_modified = parsedate(headers["last-modified"])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.