code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.doc;
import org.gradle.api.InvalidUserDataException;
import java.io.File;
public class SnippetParserException extends RuntimeException {
private final File file;
private final int lineNumber;
public SnippetParserException(String message, Throwable cause) {
super(message, cause);
this.file = null;
this.lineNumber = -1;
}
public SnippetParserException(File file, int lineNumber, InvalidUserDataException e) {
super("Error parsing snippet in " + file.getName() + " at line " + lineNumber, e);
this.file = file;
this.lineNumber = lineNumber;
}
public File getFile() {
return file;
}
public int getLineNumber() {
return lineNumber;
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/SnippetParserException.java |
# Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import absolute_import
import inspect
try:
# Check if we have the newer inspect.signature available.
# Otherwise fallback to the legacy getargspec.
inspect.signature # noqa
except AttributeError:
def get_arity(fn):
return len(inspect.getargspec(fn)[0])
def has_kwargs(fn):
argspec = inspect.getargspec(fn)
return argspec.keywords is not None
def format_args(fn):
argspec = inspect.getargspec(fn)
return inspect.formatargspec(*argspec)
else:
def get_arity(fn):
parameters = inspect.signature(fn).parameters
return sum(1 for param in parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD)
def has_kwargs(fn):
parameters = inspect.signature(fn).parameters
return any(param.kind == param.VAR_KEYWORD
for param in parameters.values())
def format_args(fn):
return str(inspect.signature(fn)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from bs4 import BeautifulSoup
import os
import re
BASE_JAVADOC_URL = "http://docs.spring.io/spring/docs/current/javadoc-api/"
BASE_LOCAL_JAVADOC_DIR = "./docs/javadoc-api"
BASE_JAVADOC_FILE = BASE_LOCAL_JAVADOC_DIR + "/allclasses-noframe.html"
def readRootFile():
rootFile = open(BASE_JAVADOC_FILE, 'r')
lines = rootFile.read()
rootFile.close()
return lines
def collectDocFilesFrom(dir):
docFiles = []
for (path, dirs, files) in os.walk(dir):
if 'class-use' not in path and 'index-files' not in path:
for f in files:
if f.endswith('.html') and 'package-' not in f and 'doc-files' not in f:
docFiles.append("%s/%s" % (path, f))
return docFiles
def getDocs(filename, classUrl):
if filename.endswith('.html') and 'package-' not in filename and 'doc-files' not in filename:
content = BeautifulSoup(getcontent(filename), 'html.parser')
classname = content.find_all('h2')[0].string
block = content.find_all('div', 'block', limit=1)
description = ""
if len(block) > 0:
description = block[0].get_text()
description = cutlength(description)
url = ""
if len(classUrl) != 0:
url = BASE_JAVADOC_URL + classUrl
return classname, description, url
def cutlength(description):
# if len(description) > 100:
description = description[0:description.rfind('.', 0, 300) + 1]
return description.replace("\n", "")
def remove_keywords(line):
if isinstance(line, basestring):
line = re.sub(r'<\w,?\w?>', '', line)
return line.replace('Class ', '').replace('Enum ', '').replace('Interface ', '').replace('Annotation Type ', '')
else:
return ''
def getcontent(filename):
f = open(filename, 'r')
lines = f.read()
f.close()
return lines
def concat_list(data_list=['', '', '']):
if data_list != None:
return concat(data_list[0], data_list[1], data_list[2])
else:
return ""
def concat(clazz, description, url):
title = remove_keywords(clazz) or 'No class found'
typez = 'A'
redirect = ''
four = ''
categories = ''
six = ''
related_topics = '' # [[Perl Data Language|PDL]], can be multiples?
eight = ''
external_links = '' # [$url title text]\\n, can be multiples
ten = ''
image = ''
abstract = description.replace("\n", "\\n").replace("\t", "\\t") or "No abstract found"
abstract = '<section class="prog__container">' + abstract + '</section>'
url = url or "No URL found"
data = [title, typez, redirect, four, categories, six, related_topics, eight, external_links, ten, image, abstract,
url]
line = "\t".join(data) + "\n"
return line
def output(filename, data_list):
line = concat_list(data_list)
if not line.startswith("No class found") and line != "" and not ("No abstract found" in line):
f = open(filename, 'a')
f.write(line.encode('utf'))
f.close() | unknown | codeparrot/codeparrot-clean | ||
# dialog.py -- Tkinter interface to the tk_dialog script.
from Tkinter import *
from Tkinter import _cnfmerge
if TkVersion <= 3.6:
DIALOG_ICON = 'warning'
else:
DIALOG_ICON = 'questhead'
class Dialog(Widget):
def __init__(self, master=None, cnf={}, **kw):
cnf = _cnfmerge((cnf, kw))
self.widgetName = '__dialog__'
Widget._setup(self, master, cnf)
self.num = self.tk.getint(
self.tk.call(
'tk_dialog', self._w,
cnf['title'], cnf['text'],
cnf['bitmap'], cnf['default'],
*cnf['strings']))
try: Widget.destroy(self)
except TclError: pass
def destroy(self): pass
def _test():
d = Dialog(None, {'title': 'File Modified',
'text':
'File "Python.h" has been modified'
' since the last time it was saved.'
' Do you want to save it before'
' exiting the application.',
'bitmap': DIALOG_ICON,
'default': 0,
'strings': ('Save File',
'Discard Changes',
'Return to Editor')})
print d.num
if __name__ == '__main__':
t = Button(None, {'text': 'Test',
'command': _test,
Pack: {}})
q = Button(None, {'text': 'Quit',
'command': t.quit,
Pack: {}})
t.mainloop() | unknown | codeparrot/codeparrot-clean | ||
module.exports = function() {
return "This is page B.";
}; | javascript | github | https://github.com/webpack/webpack | examples/hybrid-routing/bPage.js |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/tls"
"fmt"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/alecthomas/units"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/grafana/regexp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
"go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/aws"
"github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/eureka"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/hetzner"
"github.com/prometheus/prometheus/discovery/http"
"github.com/prometheus/prometheus/discovery/ionos"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/linode"
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/moby"
"github.com/prometheus/prometheus/discovery/nomad"
"github.com/prometheus/prometheus/discovery/openstack"
"github.com/prometheus/prometheus/discovery/ovhcloud"
"github.com/prometheus/prometheus/discovery/puppetdb"
"github.com/prometheus/prometheus/discovery/scaleway"
"github.com/prometheus/prometheus/discovery/stackit"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/triton"
"github.com/prometheus/prometheus/discovery/uyuni"
"github.com/prometheus/prometheus/discovery/vultr"
"github.com/prometheus/prometheus/discovery/xds"
"github.com/prometheus/prometheus/discovery/zookeeper"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/util/testutil"
)
func mustParseURL(u string) *config.URL {
parsed, err := url.Parse(u)
if err != nil {
panic(err)
}
return &config.URL{URL: parsed}
}
const (
globBodySizeLimit = 15 * units.MiB
globSampleLimit = 1500
globTargetLimit = 30
globLabelLimit = 30
globLabelNameLengthLimit = 200
globLabelValueLengthLimit = 200
globalGoGC = 42
globScrapeFailureLogFile = "testdata/fail.log"
)
var expectedConf = &Config{
loaded: true,
GlobalConfig: GlobalConfig{
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EvaluationInterval: model.Duration(30 * time.Second),
QueryLogFile: "testdata/query.log",
ScrapeFailureLogFile: globScrapeFailureLogFile,
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistogramsToNHCB: false,
ExtraScrapeMetrics: boolPtr(false),
MetricNameValidationScheme: model.UTF8Validation,
},
Runtime: RuntimeConfig{
GoGC: globalGoGC,
},
RuleFiles: []string{
filepath.FromSlash("testdata/first.rules"),
filepath.FromSlash("testdata/my/*.rules"),
},
RemoteWriteConfigs: []*RemoteWriteConfig{
{
URL: mustParseURL("http://remote1/push"),
ProtobufMessage: remoteapi.WriteV1MessageType,
RemoteTimeout: model.Duration(30 * time.Second),
Name: "drop_expensive",
WriteRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
Separator: ";",
Regex: relabel.MustNewRegexp("expensive.*"),
Replacement: "$1",
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
},
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.HTTPClientConfig{
OAuth2: &config.OAuth2{
ClientID: "123",
ClientSecret: "456",
TokenURL: "http://remote1/auth",
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
},
FollowRedirects: true,
EnableHTTP2: false,
},
},
{
URL: mustParseURL("http://remote2/push"),
ProtobufMessage: remoteapi.WriteV2MessageType,
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
Name: "rw_tls",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: false,
},
Headers: map[string]string{"name": "value"},
},
},
OTLPConfig: OTLPConfig{
PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
},
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
LabelNameUnderscoreSanitization: true,
LabelNamePreserveMultipleUnderscores: true,
},
RemoteReadConfigs: []*RemoteReadConfig{
{
URL: mustParseURL("http://remote1/read"),
RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: true,
Name: "default",
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
},
FilterExternalLabels: true,
},
{
URL: mustParseURL("http://remote3/read"),
RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: false,
Name: "read_special",
RequiredMatchers: model.LabelSet{"job": "special"},
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
FilterExternalLabels: true,
},
},
ScrapeConfigs: []*ScrapeConfig{
{
JobName: "prometheus",
HonorLabels: true,
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFallbackProtocol: PrometheusText0_0_4,
ScrapeFailureLogFile: "testdata/fail_prom.log",
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
CredentialsFile: filepath.FromSlash("testdata/valid_token_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
TLSConfig: config.TLSConfig{
MinVersion: config.TLSVersion(tls.VersionTLS10),
},
HTTPHeaders: &config.Headers{
Headers: map[string]config.Header{
"foo": {
Values: []string{"foobar"},
Secrets: []config.Secret{"bar", "foo"},
Files: []string{filepath.FromSlash("testdata/valid_password_file")},
},
},
},
},
ServiceDiscoveryConfigs: discovery.Configs{
&file.SDConfig{
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
&file.SDConfig{
Files: []string{"testdata/bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
Source: "0",
},
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"job", "__meta_dns_name"},
TargetLabel: "job",
Separator: ";",
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
Replacement: "foo-${1}",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"abc"},
TargetLabel: "cde",
Separator: ";",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
TargetLabel: "abc",
Separator: ";",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: "static",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
TargetLabel: "abc",
Separator: ";",
Regex: relabel.MustNewRegexp(""),
Replacement: "static",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"foo"},
TargetLabel: "abc",
Action: relabel.KeepEqual,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"foo"},
TargetLabel: "abc",
Action: relabel.DropEqual,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-x",
HonorTimestamps: true,
ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second),
EnableCompression: true,
BodySizeLimit: 10 * units.MiB,
SampleLimit: 1000,
TargetLimit: 35,
LabelLimit: 35,
LabelNameLengthLimit: 210,
LabelValueLengthLimit: 210,
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "admin_name",
Password: "multiline\nmysecret\ntest",
},
FollowRedirects: true,
EnableHTTP2: true,
},
MetricsPath: "/my_path",
Scheme: "https",
ServiceDiscoveryConfigs: discovery.Configs{
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
},
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"job"},
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"__address__"},
TargetLabel: "__tmp_hash",
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Modulus: 8,
Separator: ";",
Action: relabel.HashMod,
NameValidationScheme: model.UTF8Validation,
},
{
SourceLabels: model.LabelNames{"__tmp_hash"},
Regex: relabel.MustNewRegexp("1"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Keep,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("1"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelMap,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("d"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelDrop,
NameValidationScheme: model.UTF8Validation,
},
{
Regex: relabel.MustNewRegexp("k"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.LabelKeep,
NameValidationScheme: model.UTF8Validation,
},
},
MetricRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
Regex: relabel.MustNewRegexp("expensive_metric.*"),
Separator: ";",
Replacement: relabel.DefaultRelabelConfig.Replacement,
Action: relabel.Drop,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-y",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&consul.SDConfig{
Server: "localhost:1234",
PathPrefix: "/consul",
Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"},
NodeMeta: map[string]string{"rack": "123"},
TagSeparator: consul.DefaultSDConfig.TagSeparator,
Scheme: "https",
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
AllowStale: true,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
InsecureSkipVerify: false,
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__meta_sd_consul_tags"},
Regex: relabel.MustNewRegexp("label:([^=]+)=([^,]+)"),
Separator: ",",
TargetLabel: "${1}",
Replacement: "${2}",
Action: relabel.Replace,
NameValidationScheme: model.UTF8Validation,
},
},
},
{
JobName: "service-z",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "mysecret",
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
{
JobName: "service-kubernetes",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
Password: "mysecret",
},
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
},
},
},
{
JobName: "service-kubernetes-namespaces",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
PasswordFile: filepath.FromSlash("testdata/valid_password_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
Names: []string{
"default",
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-kuma",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&xds.KumaSDConfig{
Server: "http://kuma-control-plane.kuma-system.svc:5676",
ClientID: "main-prometheus",
HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(15 * time.Second),
FetchTimeout: model.Duration(2 * time.Minute),
},
},
},
{
JobName: "service-marathon",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&marathon.SDConfig{
Servers: []string{
"https://marathon.example.com:443",
},
RefreshInterval: model.Duration(30 * time.Second),
AuthToken: "mysecret",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
},
{
JobName: "service-nomad",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&nomad.SDConfig{
AllowStale: true,
Namespace: "default",
RefreshInterval: model.Duration(60 * time.Second),
Region: "global",
Server: "http://localhost:4646",
TagSeparator: ",",
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
},
},
},
{
JobName: "service-ec2",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&aws.EC2SDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Filters: []*aws.EC2Filter{
{
Name: "tag:environment",
Values: []string{"prod"},
},
{
Name: "tag:service",
Values: []string{"web", "db"},
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-lightsail",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&aws.LightsailSDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-azure",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&azure.SDConfig{
Environment: "AzurePublicCloud",
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
ResourceGroup: "my-resource-group",
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
ClientSecret: "mysecret",
AuthenticationMethod: "OAuth",
RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-nerve",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&zookeeper.NerveSDConfig{
Servers: []string{"localhost"},
Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
},
},
},
{
JobName: "0123service-xxx",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
Source: "0",
},
},
},
},
{
JobName: "badfederation",
HonorTimestamps: false,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: "/federate",
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
Source: "0",
},
},
},
},
{
JobName: "測試",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
Source: "0",
},
},
},
},
{
JobName: "httpsd",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&http.SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: "http://example.com/prometheus",
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
{
JobName: "service-triton",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&triton.SDConfig{
Account: "testAccount",
Role: "container",
DNSSuffix: "triton.example.com",
Endpoint: "triton.example.com",
Port: 9163,
RefreshInterval: model.Duration(60 * time.Second),
Version: 1,
TLSConfig: config.TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
{
JobName: "digitalocean-droplets",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&digitalocean.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
{
JobName: "docker",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&moby.DockerSDConfig{
Filters: []moby.Filter{},
Host: "unix:///var/run/docker.sock",
Port: 80,
HostNetworkingHost: "localhost",
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MatchFirstNetwork: true,
},
},
},
{
JobName: "dockerswarm",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&moby.DockerSwarmSDConfig{
Filters: []moby.Filter{},
Host: "http://127.0.0.1:2375",
Role: "nodes",
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "service-openstack",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&openstack.SDConfig{
Role: "instance",
Region: "RegionOne",
Port: 80,
Availability: "public",
RefreshInterval: model.Duration(60 * time.Second),
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
{
JobName: "service-puppetdb",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&puppetdb.SDConfig{
URL: "https://puppetserver/",
Query: "resources { type = \"Package\" and title = \"httpd\" }",
IncludeParameters: true,
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
},
{
JobName: "hetzner",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultProtoFirstScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(true),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
RelabelConfigs: []*relabel.Config{
{
Action: relabel.Uppercase,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
SourceLabels: model.LabelNames{"instance"},
TargetLabel: "instance",
NameValidationScheme: model.UTF8Validation,
},
},
ServiceDiscoveryConfigs: discovery.Configs{
&hetzner.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Role: "hcloud",
},
&hetzner.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{Username: "abcdef", Password: "abcdef"},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Role: "robot",
},
},
},
{
JobName: "service-eureka",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&eureka.SDConfig{
Server: "http://eureka.example.com:8761/eureka",
RefreshInterval: model.Duration(30 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "ovhcloud",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{
&ovhcloud.SDConfig{
Endpoint: "ovh-eu",
ApplicationKey: "testAppKey",
ApplicationSecret: "testAppSecret",
ConsumerKey: "testConsumerKey",
RefreshInterval: model.Duration(60 * time.Second),
Service: "vps",
},
&ovhcloud.SDConfig{
Endpoint: "ovh-eu",
ApplicationKey: "testAppKey",
ApplicationSecret: "testAppSecret",
ConsumerKey: "testConsumerKey",
RefreshInterval: model.Duration(60 * time.Second),
Service: "dedicated_server",
},
},
},
{
JobName: "scaleway",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{
&scaleway.SDConfig{
APIURL: "https://api.scaleway.com",
AccessKey: "SCWXXXXXXXXXXXXXXXXX",
HTTPClientConfig: config.DefaultHTTPClientConfig,
Port: 80,
Project: "11111111-1111-1111-1111-111111111112",
RefreshInterval: model.Duration(60 * time.Second),
Role: "instance",
SecretKey: "11111111-1111-1111-1111-111111111111",
Zone: "fr-par-1",
},
&scaleway.SDConfig{
APIURL: "https://api.scaleway.com",
AccessKey: "SCWXXXXXXXXXXXXXXXXX",
HTTPClientConfig: config.DefaultHTTPClientConfig,
Port: 80,
Project: "11111111-1111-1111-1111-111111111112",
RefreshInterval: model.Duration(60 * time.Second),
Role: "baremetal",
SecretKey: "11111111-1111-1111-1111-111111111111",
Zone: "fr-par-1",
},
},
},
{
JobName: "linode-instances",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&linode.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
TagSeparator: linode.DefaultSDConfig.TagSeparator,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
{
JobName: "stackit-servers",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&stackit.SDConfig{
Project: "11111111-1111-1111-1111-111111111111",
Region: "eu01",
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
{
JobName: "uyuni",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{
&uyuni.SDConfig{
Server: "https://localhost:1234",
Username: "gopher",
Password: "hole",
Entitlement: "monitoring_entitled",
Separator: ",",
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "ionos",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&ionos.SDConfig{
DatacenterID: "8feda53f-15f0-447f-badf-ebe32dad2fc0",
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{Type: "Bearer", Credentials: "abcdef"},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
{
JobName: "vultr",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultScrapeProtocols,
ScrapeFailureLogFile: globScrapeFailureLogFile,
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&vultr.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
},
AlertingConfig: AlertingConfig{
AlertmanagerConfigs: []*AlertmanagerConfig{
{
Scheme: "https",
Timeout: model.Duration(10 * time.Second),
APIVersion: AlertmanagerAPIVersionV2,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "1.2.3.4:9093"},
{model.AddressLabel: "1.2.3.5:9093"},
{model.AddressLabel: "1.2.3.6:9093"},
},
Source: "0",
},
},
},
},
},
},
StorageConfig: StorageConfig{
TSDBConfig: &TSDBConfig{
OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
StaleSeriesCompactionThreshold: 0.5,
Retention: &TSDBRetentionConfig{
Time: model.Duration(24 * time.Hour),
Size: 1 * units.GiB,
},
},
},
TracingConfig: TracingConfig{
Endpoint: "localhost:4317",
ClientType: TracingClientGRPC,
Insecure: false,
Compression: "gzip",
Timeout: model.Duration(5 * time.Second),
Headers: map[string]string{"foo": "bar"},
TLSConfig: config.TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
InsecureSkipVerify: true,
},
},
}
func TestYAMLNotLongerSupportedAMApi(t *testing.T) {
_, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger())
require.Error(t, err)
}
func TestYAMLRoundtrip(t *testing.T) {
want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got, err := Load(string(out), promslog.NewNopLogger())
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got, err := Load(string(out), promslog.NewNopLogger())
require.NoError(t, err)
require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
}
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
t.Run("good config - default resource attributes", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_default_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.False(t, got.OTLPConfig.PromoteAllResourceAttributes)
require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes)
require.Empty(t, got.OTLPConfig.PromoteResourceAttributes)
})
t.Run("good config - promote resource attributes", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_promote_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.False(t, got.OTLPConfig.PromoteAllResourceAttributes)
require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes)
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes)
})
t.Run("bad config - promote resource attributes", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_promote_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
require.ErrorContains(t, err, `invalid 'promote_resource_attributes'`)
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
})
t.Run("good config - promote all resource attributes", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes_promote_all.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.PromoteAllResourceAttributes)
require.Empty(t, got.OTLPConfig.PromoteResourceAttributes)
require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes)
})
t.Run("good config - ignore resource attributes", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_ignore_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.PromoteAllResourceAttributes)
require.Empty(t, got.OTLPConfig.PromoteResourceAttributes)
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.IgnoreResourceAttributes)
})
t.Run("bad config - ignore resource attributes", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_ignore_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
require.ErrorContains(t, err, `invalid 'ignore_resource_attributes'`)
require.ErrorContains(t, err, `duplicated ignored OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty ignored OTel resource attribute`)
})
t.Run("bad config - conflict between promote all and promote specific resource attributes", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_promote_all_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
require.ErrorContains(t, err, `'promote_all_resource_attributes' and 'promote_resource_attributes' cannot be configured simultaneously`)
})
t.Run("bad config - configuring ignoring of resource attributes without also enabling promotion of all resource attributes", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_ignore_resource_attributes_without_promote_all.bad.yml"), false, promslog.NewNopLogger())
require.ErrorContains(t, err, `'ignore_resource_attributes' cannot be configured unless 'promote_all_resource_attributes' is true`)
})
}
func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes)
})
}
func TestOTLPConvertHistogramsToNHCB(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_convert_histograms_to_nhcb.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.ConvertHistogramsToNHCB)
})
}
func TestOTLPPromoteScopeMetadata(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_promote_scope_metadata.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.PromoteScopeMetadata)
})
}
func TestOTLPLabelUnderscoreSanitization(t *testing.T) {
t.Run("defaults to true", func(t *testing.T) {
conf, err := LoadFile(filepath.Join("testdata", "otlp_label_underscore_sanitization_defaults.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
// Test that default values are true
require.True(t, conf.OTLPConfig.LabelNameUnderscoreSanitization)
require.True(t, conf.OTLPConfig.LabelNamePreserveMultipleUnderscores)
})
t.Run("explicitly enabled", func(t *testing.T) {
conf, err := LoadFile(filepath.Join("testdata", "otlp_label_underscore_sanitization_enabled.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(conf)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.LabelNameUnderscoreSanitization)
require.True(t, got.OTLPConfig.LabelNamePreserveMultipleUnderscores)
})
t.Run("explicitly disabled", func(t *testing.T) {
conf, err := LoadFile(filepath.Join("testdata", "otlp_label_underscore_sanitization_disabled.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
// When explicitly set to false, they should be false
require.False(t, conf.OTLPConfig.LabelNameUnderscoreSanitization)
require.False(t, conf.OTLPConfig.LabelNamePreserveMultipleUnderscores)
})
t.Run("empty config uses defaults", func(t *testing.T) {
conf, err := LoadFile(filepath.Join("testdata", "otlp_empty.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
// Empty config should use default values (true)
require.True(t, conf.OTLPConfig.LabelNameUnderscoreSanitization)
require.True(t, conf.OTLPConfig.LabelNamePreserveMultipleUnderscores)
})
}
func TestOTLPAllowUTF8(t *testing.T) {
t.Run("good config - NoUTF8EscapingWithSuffixes", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, otlptranslator.NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
t.Run("incompatible config - NoUTF8EscapingWithSuffixes", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy "NoUTF8EscapingWithSuffixes" is not allowed when UTF8 is disabled`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
t.Log("err", err)
verify(t, err)
})
})
t.Run("good config - NoTranslation", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_no_translation.good.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, otlptranslator.NoTranslation, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
t.Run("incompatible config - NoTranslation", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_no_translation.incompatible.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy "NoTranslation" is not allowed when UTF8 is disabled`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
t.Log("err", err)
verify(t, err)
})
})
t.Run("bad config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
verify(t, err)
})
})
t.Run("good config - missing otlp config uses default", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_empty.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, otlptranslator.UnderscoreEscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
}
func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default.
_, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
testutil.RequireEqualWithOptions(t, expectedConf, c, []cmp.Option{
cmpopts.IgnoreUnexported(config.ProxyConfig{}),
cmpopts.IgnoreUnexported(ionos.SDConfig{}),
cmpopts.IgnoreUnexported(stackit.SDConfig{}),
cmpopts.IgnoreUnexported(regexp.Regexp{}),
cmpopts.IgnoreUnexported(hetzner.SDConfig{}),
cmpopts.IgnoreUnexported(Config{}),
})
}
func TestScrapeIntervalLarger(t *testing.T) {
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
require.Len(t, c.ScrapeConfigs, 1)
for _, sc := range c.ScrapeConfigs {
require.GreaterOrEqual(t, sc.ScrapeInterval, sc.ScrapeTimeout)
}
}
// YAML marshaling must not reveal authentication credentials.
func TestElideSecrets(t *testing.T) {
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
config, err := yaml.Marshal(c)
require.NoError(t, err)
yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
require.Len(t, matches, 25, "wrong number of secret matches found")
require.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.")
}
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
// Parse a valid file that sets a rule files with an absolute path
c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger())
require.NoError(t, err)
require.Equal(t, ruleFilesExpectedConf, c)
}
func TestKubernetesEmptyAPIServer(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
}
func TestKubernetesWithKubeConfig(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
}
func TestKubernetesSelectors(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
}
var expectedErrors = []struct {
filename string
errMsg string
}{
{
filename: "jobname.bad.yml",
errMsg: `job_name is empty`,
},
{
filename: "jobname_dup.bad.yml",
errMsg: `found multiple scrape configs with job name "prometheus"`,
},
{
filename: "scrape_interval.bad.yml",
errMsg: `scrape timeout greater than scrape interval`,
},
{
filename: "labelname.bad.yml",
errMsg: `"\xff" is not a valid label name`,
},
{
filename: "labelvalue.bad.yml",
errMsg: `"\xff" is not a valid label value`,
},
{
filename: "regex.bad.yml",
errMsg: "error parsing regexp",
},
{
filename: "modulus_missing.bad.yml",
errMsg: "relabel configuration for hashmod requires non-zero modulus",
},
{
filename: "labelkeep.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
},
{
filename: "labelkeep2.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
},
{
filename: "labelkeep3.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
},
{
filename: "labelkeep4.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
},
{
filename: "labelkeep5.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
},
{
filename: "labeldrop.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
},
{
filename: "labeldrop2.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
},
{
filename: "labeldrop3.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
},
{
filename: "labeldrop4.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
},
{
filename: "labeldrop5.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
},
{
filename: "dropequal.bad.yml",
errMsg: "relabel configuration for dropequal action requires 'target_label' value",
},
{
filename: "dropequal1.bad.yml",
errMsg: "dropequal action requires only 'source_labels' and `target_label`, and no other fields",
},
{
filename: "keepequal.bad.yml",
errMsg: "relabel configuration for keepequal action requires 'target_label' value",
},
{
filename: "keepequal1.bad.yml",
errMsg: "keepequal action requires only 'source_labels' and `target_label`, and no other fields",
},
{
filename: "labelmap.bad.yml",
errMsg: "!!binary value contains invalid base64 data",
},
{
filename: "lowercase.bad.yml",
errMsg: "relabel configuration for lowercase action requires 'target_label' value",
},
{
filename: "lowercase3.bad.yml",
errMsg: "'replacement' can not be set for lowercase action",
},
{
filename: "uppercase.bad.yml",
errMsg: "relabel configuration for uppercase action requires 'target_label' value",
},
{
filename: "uppercase3.bad.yml",
errMsg: "'replacement' can not be set for uppercase action",
},
{
filename: "rules.bad.yml",
errMsg: "invalid rule file path",
},
{
filename: "unknown_attr.bad.yml",
errMsg: "field consult_sd_configs not found in type",
},
{
filename: "bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{
filename: "bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
},
{
filename: "kubernetes_http_config_without_api_server.bad.yml",
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
},
{
filename: "kubernetes_kubeconfig_with_own_namespace.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously",
},
{
filename: "kubernetes_api_server_with_own_namespace.bad.yml",
errMsg: "cannot use 'api_server' and 'namespaces.own_namespace' simultaneously",
},
{
filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
},
{
filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
},
{
filename: "kubernetes_bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{
filename: "kubernetes_role.bad.yml",
errMsg: "role",
},
{
filename: "kubernetes_selectors_endpoints.bad.yml",
errMsg: "endpoints role supports only pod, service, endpoints selectors",
},
{
filename: "kubernetes_selectors_ingress.bad.yml",
errMsg: "ingress role supports only ingress selectors",
},
{
filename: "kubernetes_selectors_node.bad.yml",
errMsg: "node role supports only node selectors",
},
{
filename: "kubernetes_selectors_pod.bad.yml",
errMsg: "pod role supports only pod selectors",
},
{
filename: "kubernetes_selectors_service.bad.yml",
errMsg: "service role supports only service selectors",
},
{
filename: "kubernetes_namespace_discovery.bad.yml",
errMsg: "field foo not found in type kubernetes.plain",
},
{
filename: "kubernetes_selectors_duplicated_role.bad.yml",
errMsg: "duplicated selector role: pod",
},
{
filename: "kubernetes_selectors_incorrect_selector.bad.yml",
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
},
{
filename: "kubernetes_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
},
{
filename: "kubernetes_authorization_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2 & authorization must be configured",
},
{
filename: "marathon_no_servers.bad.yml",
errMsg: "marathon_sd: must contain at least one Marathon server",
},
{
filename: "marathon_authtoken_authtokenfile.bad.yml",
errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured",
},
{
filename: "marathon_authtoken_basicauth.bad.yml",
errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured",
},
{
filename: "marathon_authtoken_bearertoken.bad.yml",
errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
},
{
filename: "marathon_authtoken_authorization.bad.yml",
errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
},
{
filename: "openstack_role.bad.yml",
errMsg: "unknown OpenStack SD role",
},
{
filename: "openstack_availability.bad.yml",
errMsg: "unknown availability invalid, must be one of admin, internal or public",
},
{
filename: "url_in_targetgroup.bad.yml",
errMsg: "\"http://bad\" is not a valid hostname",
},
{
filename: "target_label_missing.bad.yml",
errMsg: "relabel configuration for replace action requires 'target_label' value",
},
{
filename: "target_label_hashmod_missing.bad.yml",
errMsg: "relabel configuration for hashmod action requires 'target_label' value",
},
{
filename: "unknown_global_attr.bad.yml",
errMsg: "field nonexistent_field not found in type config.plain",
},
{
filename: "remote_read_url_missing.bad.yml",
errMsg: `url for remote_read is empty`,
},
{
filename: "remote_write_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
},
{
filename: "remote_read_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
},
{
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`,
},
{
filename: "remote_write_wrong_msg.bad.yml",
errMsg: `invalid protobuf_message value: unknown type for remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
},
{
filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`,
},
{
filename: "remote_write_dup.bad.yml",
errMsg: `found multiple remote write configs with job name "queue1"`,
},
{
filename: "remote_read_dup.bad.yml",
errMsg: `found multiple remote read configs with job name "queue1"`,
},
{
filename: "ec2_filters_empty_values.bad.yml",
errMsg: `EC2 SD configuration filter values cannot be empty`,
},
{
filename: "ec2_token_file.bad.yml",
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
},
{
filename: "lightsail_token_file.bad.yml",
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
},
{
filename: "section_key_dup.bad.yml",
errMsg: "field scrape_configs already set in type config.plain",
},
{
filename: "azure_client_id_missing.bad.yml",
errMsg: "azure SD configuration requires a client_id",
},
{
filename: "azure_client_secret_missing.bad.yml",
errMsg: "azure SD configuration requires a client_secret",
},
{
filename: "azure_subscription_id_missing.bad.yml",
errMsg: "azure SD configuration requires a subscription_id",
},
{
filename: "azure_tenant_id_missing.bad.yml",
errMsg: "azure SD configuration requires a tenant_id",
},
{
filename: "azure_authentication_method.bad.yml",
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\", \"ManagedIdentity\" or \"SDK\"",
},
{
filename: "azure_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
},
{
filename: "empty_scrape_config.bad.yml",
errMsg: "empty or null scrape config section",
},
{
filename: "empty_rw_config.bad.yml",
errMsg: "empty or null remote write config section",
},
{
filename: "empty_rr_config.bad.yml",
errMsg: "empty or null remote read config section",
},
{
filename: "empty_target_relabel_config.bad.yml",
errMsg: "empty or null target relabeling rule",
},
{
filename: "empty_metric_relabel_config.bad.yml",
errMsg: "empty or null metric relabeling rule",
},
{
filename: "empty_alert_relabel_config.bad.yml",
errMsg: "empty or null alert relabeling rule",
},
{
filename: "empty_alertmanager_relabel_config.bad.yml",
errMsg: "empty or null Alertmanager target relabeling rule",
},
{
filename: "empty_rw_relabel_config.bad.yml",
errMsg: "empty or null relabeling rule in remote write config",
},
{
filename: "empty_static_config.bad.yml",
errMsg: "empty or null section in static_configs",
},
{
filename: "puppetdb_no_query.bad.yml",
errMsg: "query missing",
},
{
filename: "puppetdb_no_url.bad.yml",
errMsg: "URL is missing",
},
{
filename: "puppetdb_bad_url.bad.yml",
errMsg: "host is missing in URL",
},
{
filename: "puppetdb_no_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'",
},
{
filename: "puppetdb_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{
filename: "hetzner_role.bad.yml",
errMsg: "unknown role",
},
{
filename: "eureka_no_server.bad.yml",
errMsg: "empty or null eureka server",
},
{
filename: "eureka_invalid_server.bad.yml",
errMsg: "invalid eureka server URL",
},
{
filename: "scaleway_role.bad.yml",
errMsg: `unknown role "invalid"`,
},
{
filename: "scaleway_no_secret.bad.yml",
errMsg: "one of secret_key & secret_key_file must be configured",
},
{
filename: "scaleway_two_secrets.bad.yml",
errMsg: "at most one of secret_key & secret_key_file must be configured",
},
{
filename: "scrape_body_size_limit.bad.yml",
errMsg: "units: unknown unit in 100",
},
{
filename: "http_url_no_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'",
},
{
filename: "http_url_no_host.bad.yml",
errMsg: "host is missing in URL",
},
{
filename: "http_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{
filename: "http_url_bad_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'",
},
{
filename: "empty_scrape_config_action.bad.yml",
errMsg: "relabel action cannot be empty",
},
{
filename: "tracing_missing_endpoint.bad.yml",
errMsg: "tracing endpoint must be set",
},
{
filename: "tracing_invalid_header.bad.yml",
errMsg: "x-prometheus-remote-write-version is a reserved header. It must not be changed",
},
{
filename: "tracing_invalid_authorization_header.bad.yml",
errMsg: "authorization header configuration is not yet supported",
},
{
filename: "tracing_invalid_compression.bad.yml",
errMsg: "invalid compression type foo provided, valid options: gzip",
},
{
filename: "uyuni_no_server.bad.yml",
errMsg: "Uyuni SD configuration requires server host",
},
{
filename: "uyuni_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{
filename: "ionos_datacenter.bad.yml",
errMsg: "datacenter id can't be empty",
},
{
filename: "ovhcloud_no_secret.bad.yml",
errMsg: "application secret can not be empty",
},
{
filename: "ovhcloud_bad_service.bad.yml",
errMsg: "unknown service: fakeservice",
},
{
filename: "scrape_config_files_glob.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
},
{
filename: "scrape_config_files_scrape_protocols.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`,
},
{
filename: "scrape_config_files_scrape_protocols2.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
},
{
filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`,
},
{
filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml",
errMsg: `unmarshal errors`,
},
{
filename: "scrape_config_utf8_conflicting.bad.yml",
errMsg: `utf8 metric names requested but validation scheme is not set to UTF8`,
},
{
filename: "stackit_endpoint.bad.yml",
errMsg: "invalid endpoint",
},
}
func TestBadConfigs(t *testing.T) {
for _, ee := range expectedErrors {
_, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger())
require.ErrorContains(t, err, ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
}
}
func TestBadStaticConfigsYML(t *testing.T) {
content, err := os.ReadFile("testdata/static_config.bad.yml")
require.NoError(t, err)
var tg targetgroup.Group
err = yaml.UnmarshalStrict(content, &tg)
require.Error(t, err)
}
func TestEmptyConfig(t *testing.T) {
c, err := Load("", promslog.NewNopLogger())
require.NoError(t, err)
exp := DefaultConfig
exp.loaded = true
require.Equal(t, exp, *c)
require.Equal(t, 75, c.Runtime.GoGC)
}
func TestExpandExternalLabels(t *testing.T) {
// Cleanup ant TEST env variable that could exist on the system.
os.Setenv("TEST", "")
c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
}
func TestAgentMode(t *testing.T) {
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger())
require.NoError(t, err)
require.Empty(t, c.RemoteWriteConfigs)
c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger())
require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 1)
require.Equal(
t,
"http://remote1/push",
c.RemoteWriteConfigs[0].URL.String(),
)
}
func TestGlobalConfig(t *testing.T) {
t.Run("empty block restores defaults", func(t *testing.T) {
c, err := Load("global:\n", promslog.NewNopLogger())
require.NoError(t, err)
exp := DefaultConfig
exp.loaded = true
require.Equal(t, exp, *c)
})
// Verify that isZero() correctly identifies non-zero configurations for all
// fields in GlobalConfig. This is important because isZero() is used during
// YAML unmarshaling to detect empty global blocks that should be replaced
// with defaults.
t.Run("isZero", func(t *testing.T) {
for _, tc := range []struct {
name string
config GlobalConfig
expectZero bool
}{
{
name: "empty GlobalConfig",
config: GlobalConfig{},
expectZero: true,
},
{
name: "ScrapeInterval set",
config: GlobalConfig{ScrapeInterval: model.Duration(30 * time.Second)},
expectZero: false,
},
{
name: "BodySizeLimit set",
config: GlobalConfig{BodySizeLimit: 1 * units.MiB},
expectZero: false,
},
{
name: "SampleLimit set",
config: GlobalConfig{SampleLimit: 1000},
expectZero: false,
},
{
name: "TargetLimit set",
config: GlobalConfig{TargetLimit: 500},
expectZero: false,
},
{
name: "LabelLimit set",
config: GlobalConfig{LabelLimit: 100},
expectZero: false,
},
{
name: "LabelNameLengthLimit set",
config: GlobalConfig{LabelNameLengthLimit: 50},
expectZero: false,
},
{
name: "LabelValueLengthLimit set",
config: GlobalConfig{LabelValueLengthLimit: 200},
expectZero: false,
},
{
name: "KeepDroppedTargets set",
config: GlobalConfig{KeepDroppedTargets: 10},
expectZero: false,
},
{
name: "MetricNameValidationScheme set",
config: GlobalConfig{MetricNameValidationScheme: model.LegacyValidation},
expectZero: false,
},
{
name: "MetricNameEscapingScheme set",
config: GlobalConfig{MetricNameEscapingScheme: model.EscapeUnderscores},
expectZero: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
result := tc.config.isZero()
require.Equal(t, tc.expectZero, result)
})
}
})
}
// ScrapeConfigOptions contains options for creating a scrape config.
type ScrapeConfigOptions struct {
JobName string
ScrapeInterval model.Duration
ScrapeTimeout model.Duration
ScrapeProtocols []ScrapeProtocol // Set to DefaultScrapeProtocols by default.
ScrapeNativeHistograms bool
AlwaysScrapeClassicHistograms bool
ConvertClassicHistToNHCB bool
ExtraScrapeMetrics bool
}
func TestGetScrapeConfigs(t *testing.T) {
// Helper function to create a scrape config with the given options.
sc := func(opts ScrapeConfigOptions) *ScrapeConfig {
sc := ScrapeConfig{
JobName: opts.JobName,
HonorTimestamps: true,
ScrapeInterval: opts.ScrapeInterval,
ScrapeTimeout: opts.ScrapeTimeout,
ScrapeProtocols: opts.ScrapeProtocols,
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
MetricsPath: "/metrics",
Scheme: "http",
EnableCompression: true,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{
model.AddressLabel: "localhost:8080",
},
},
Source: "0",
},
},
},
ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms),
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
ExtraScrapeMetrics: boolPtr(opts.ExtraScrapeMetrics),
}
if opts.ScrapeProtocols == nil {
sc.ScrapeProtocols = DefaultScrapeProtocols
}
return &sc
}
testCases := []struct {
name string
configFile string
expectedResult []*ScrapeConfig
expectedError string
}{
{
name: "An included config file should be a valid global config.",
configFile: "testdata/scrape_config_files.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{
JobName: "prometheus",
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeNativeHistograms: false,
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistToNHCB: false,
})},
},
{
name: "A global config that only include a scrape config file.",
configFile: "testdata/scrape_config_files_only.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{
JobName: "prometheus",
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeNativeHistograms: false,
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistToNHCB: false,
})},
},
{
name: "A global config that combine scrape config files and scrape configs.",
configFile: "testdata/scrape_config_files_combined.good.yml",
expectedResult: []*ScrapeConfig{
sc(ScrapeConfigOptions{
JobName: "node",
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeNativeHistograms: false,
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistToNHCB: false,
}),
sc(ScrapeConfigOptions{
JobName: "prometheus",
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeNativeHistograms: false,
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistToNHCB: false,
}),
sc(ScrapeConfigOptions{
JobName: "alertmanager",
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeNativeHistograms: false,
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistToNHCB: false,
}),
},
},
{
name: "A global config that includes a scrape config file with globs",
configFile: "testdata/scrape_config_files_glob.good.yml",
expectedResult: []*ScrapeConfig{
{
JobName: "prometheus",
HonorTimestamps: true,
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
ScrapeProtocols: DefaultScrapeProtocols,
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
EnableCompression: true,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/scrape_configs/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:8080"},
},
Source: "0",
},
},
},
},
{
JobName: "node",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
ScrapeProtocols: DefaultScrapeProtocols,
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
EnableCompression: true,
ServiceDiscoveryConfigs: discovery.Configs{
&vultr.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
},
},
{
name: "A global config that includes twice the same scrape configs.",
configFile: "testdata/scrape_config_files_double_import.bad.yml",
expectedError: `found multiple scrape configs with job name "prometheus"`,
},
{
name: "A global config that includes a scrape config identical to a scrape config in the main file.",
configFile: "testdata/scrape_config_files_duplicate.bad.yml",
expectedError: `found multiple scrape configs with job name "prometheus"`,
},
{
name: "A global config that includes a scrape config file with errors.",
configFile: "testdata/scrape_config_files_global.bad.yml",
expectedError: `scrape timeout greater than scrape interval for scrape config with job name "prometheus"`,
},
{
name: "A global config that enables convert classic histograms to nhcb.",
configFile: "testdata/global_convert_classic_hist_to_nhcb.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})},
},
{
name: "A global config that enables convert classic histograms to nhcb and scrape config that disables the conversion",
configFile: "testdata/local_disable_convert_classic_hist_to_nhcb.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
},
{
name: "A global config that disables convert classic histograms to nhcb and scrape config that enables the conversion",
configFile: "testdata/local_convert_classic_hist_to_nhcb.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})},
},
{
name: "A global config that enables always scrape classic histograms",
configFile: "testdata/global_enable_always_scrape_classic_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})},
},
{
name: "A global config that disables always scrape classic histograms",
configFile: "testdata/global_disable_always_scrape_classic_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
},
{
name: "A global config that disables always scrape classic histograms and scrape config that enables it",
configFile: "testdata/local_enable_always_scrape_classic_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})},
},
{
name: "A global config that enables always scrape classic histograms and scrape config that disables it",
configFile: "testdata/local_disable_always_scrape_classic_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
},
{
name: "A global config that enables scrape native histograms",
configFile: "testdata/global_enable_scrape_native_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: DefaultProtoFirstScrapeProtocols})},
},
{
name: "A global config that enables scrape native histograms and sets scrape protocols explicitly",
configFile: "testdata/global_enable_scrape_native_hist_and_scrape_protocols.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
},
{
name: "A local config that enables scrape native histograms",
configFile: "testdata/local_enable_scrape_native_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: DefaultProtoFirstScrapeProtocols})},
},
{
name: "A local config that enables scrape native histograms and sets scrape protocols explicitly",
configFile: "testdata/local_enable_scrape_native_hist_and_scrape_protocols.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
},
{
name: "A global config that enables scrape native histograms and scrape config that disables it",
configFile: "testdata/local_disable_scrape_native_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: DefaultScrapeProtocols})},
},
{
name: "A global config that enables scrape native histograms and scrape protocols and scrape config that disables scrape native histograms but does not change scrape protocols",
configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
},
{
name: "A global config that enables extra scrape metrics",
configFile: "testdata/global_enable_extra_scrape_metrics.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
},
{
name: "A global config that disables extra scrape metrics",
configFile: "testdata/global_disable_extra_scrape_metrics.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
},
{
name: "A global config that disables extra scrape metrics and scrape config that enables it",
configFile: "testdata/local_enable_extra_scrape_metrics.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
},
{
name: "A global config that enables extra scrape metrics and scrape config that disables it",
configFile: "testdata/local_disable_extra_scrape_metrics.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger())
require.NoError(t, err)
scfgs, err := c.GetScrapeConfigs()
if len(tc.expectedError) > 0 {
require.ErrorContains(t, err, tc.expectedError)
}
require.Equal(t, tc.expectedResult, scfgs)
})
}
}
func TestExtraScrapeMetrics(t *testing.T) {
tests := []struct {
name string
config string
expectGlobal *bool
expectEnabled bool
}{
{
name: "default values (not set)",
config: `
scrape_configs:
- job_name: test
static_configs:
- targets: ['localhost:9090']
`,
expectGlobal: boolPtr(false), // inherits from DefaultGlobalConfig
expectEnabled: false,
},
{
name: "global enabled",
config: `
global:
extra_scrape_metrics: true
scrape_configs:
- job_name: test
static_configs:
- targets: ['localhost:9090']
`,
expectGlobal: boolPtr(true),
expectEnabled: true,
},
{
name: "global disabled",
config: `
global:
extra_scrape_metrics: false
scrape_configs:
- job_name: test
static_configs:
- targets: ['localhost:9090']
`,
expectGlobal: boolPtr(false),
expectEnabled: false,
},
{
name: "scrape override enabled",
config: `
global:
extra_scrape_metrics: false
scrape_configs:
- job_name: test
extra_scrape_metrics: true
static_configs:
- targets: ['localhost:9090']
`,
expectGlobal: boolPtr(false),
expectEnabled: true,
},
{
name: "scrape override disabled",
config: `
global:
extra_scrape_metrics: true
scrape_configs:
- job_name: test
extra_scrape_metrics: false
static_configs:
- targets: ['localhost:9090']
`,
expectGlobal: boolPtr(true),
expectEnabled: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
cfg, err := Load(tc.config, promslog.NewNopLogger())
require.NoError(t, err)
// Check global config
require.Equal(t, tc.expectGlobal, cfg.GlobalConfig.ExtraScrapeMetrics)
// Check scrape config
scfgs, err := cfg.GetScrapeConfigs()
require.NoError(t, err)
require.Len(t, scfgs, 1)
// Check the effective value via the helper method
require.Equal(t, tc.expectEnabled, scfgs[0].ExtraScrapeMetricsEnabled())
})
}
}
func kubernetesSDHostURL() config.URL {
tURL, _ := url.Parse("https://localhost:1234")
return config.URL{URL: tURL}
}
func TestScrapeConfigDisableCompression(t *testing.T) {
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.False(t, got.ScrapeConfigs[0].EnableCompression)
}
func TestScrapeConfigNameValidationSettings(t *testing.T) {
tests := []struct {
name string
inputFile string
expectScheme model.ValidationScheme
expectEscaping model.EscapingScheme
}{
{
name: "blank config implies default",
inputFile: "scrape_config_default_validation_mode",
expectScheme: model.UTF8Validation,
expectEscaping: model.NoEscaping,
},
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
expectScheme: model.LegacyValidation,
expectEscaping: model.DotsEscaping,
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
expectScheme: model.LegacyValidation,
expectEscaping: model.ValueEncodingEscaping,
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
expectScheme: model.UTF8Validation,
expectEscaping: model.DotsEscaping,
},
{
name: "local validation implies underscores escaping",
inputFile: "scrape_config_local_infer_escaping",
expectScheme: model.LegacyValidation,
expectEscaping: model.UnderscoreEscaping,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme)
escaping, err := model.ToEscapingScheme(got.ScrapeConfigs[0].MetricNameEscapingScheme)
require.NoError(t, err)
require.Equal(t, tc.expectEscaping, escaping)
})
}
}
func TestScrapeConfigNameEscapingSettings(t *testing.T) {
tests := []struct {
name string
inputFile string
expectValidationScheme model.ValidationScheme
expectEscapingScheme string
}{
{
name: "blank config implies default",
inputFile: "scrape_config_default_validation_mode",
expectValidationScheme: model.UTF8Validation,
expectEscapingScheme: "allow-utf-8",
},
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
expectValidationScheme: model.LegacyValidation,
expectEscapingScheme: "dots",
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
expectValidationScheme: model.LegacyValidation,
expectEscapingScheme: "values",
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
expectValidationScheme: model.UTF8Validation,
expectEscapingScheme: "dots",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, tc.expectValidationScheme, got.ScrapeConfigs[0].MetricNameValidationScheme)
require.Equal(t, tc.expectEscapingScheme, got.ScrapeConfigs[0].MetricNameEscapingScheme)
})
}
}
func TestScrapeProtocolHeader(t *testing.T) {
tests := []struct {
name string
proto ScrapeProtocol
expectedValue string
}{
{
name: "blank",
proto: ScrapeProtocol(""),
expectedValue: "",
},
{
name: "invalid",
proto: ScrapeProtocol("invalid"),
expectedValue: "",
},
{
name: "prometheus protobuf",
proto: PrometheusProto,
expectedValue: "application/vnd.google.protobuf",
},
{
name: "prometheus text 0.0.4",
proto: PrometheusText0_0_4,
expectedValue: "text/plain",
},
{
name: "prometheus text 1.0.0",
proto: PrometheusText1_0_0,
expectedValue: "text/plain",
},
{
name: "openmetrics 0.0.1",
proto: OpenMetricsText0_0_1,
expectedValue: "application/openmetrics-text",
},
{
name: "openmetrics 1.0.0",
proto: OpenMetricsText1_0_0,
expectedValue: "application/openmetrics-text",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
mediaType := tc.proto.HeaderMediaType()
require.Equal(t, tc.expectedValue, mediaType)
})
}
}
// Regression test against https://github.com/prometheus/prometheus/issues/15538
func TestGetScrapeConfigs_Loaded(t *testing.T) {
t.Run("without load", func(t *testing.T) {
c := &Config{}
_, err := c.GetScrapeConfigs()
require.EqualError(t, err, "scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
})
t.Run("with load", func(t *testing.T) {
c, err := Load("", promslog.NewNopLogger())
require.NoError(t, err)
_, err = c.GetScrapeConfigs()
require.NoError(t, err)
})
} | go | github | https://github.com/prometheus/prometheus | config/config_test.go |
from __future__ import absolute_import
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
from django.contrib.admin.widgets import (AdminDateWidget, AdminTimeWidget,
AdminSplitDateTime, RelatedFieldWidgetWrapper)
from django.forms import (FileInput, CheckboxInput, RadioSelect, CheckboxSelectMultiple)
from bootstrap3 import renderers
try:
from bootstrap3.utils import add_css_class
except ImportError:
from bootstrap3.html import add_css_class
from bootstrap3.text import text_value
class BootstrapFieldRenderer(renderers.FieldRenderer):
"""
A django-bootstrap3 field renderer that renders just the field
"""
def render(self):
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
return html
def add_class_attrs(self, widget=None):
if not widget:
widget = self.widget
# for multiwidgets we recursively update classes for each sub-widget
if isinstance(widget, AdminSplitDateTime):
for w in widget.widgets:
self.add_class_attrs(w)
return
classes = widget.attrs.get('class', '')
if isinstance(widget, ReadOnlyPasswordHashWidget):
classes = add_css_class(classes, 'form-control-static', prepend=True)
elif isinstance(widget, (AdminDateWidget,
AdminTimeWidget,
RelatedFieldWidgetWrapper)):
# for some admin widgets we don't want the input to take full horizontal space
classes = add_css_class(classes, 'form-control form-control-inline', prepend=True)
elif not isinstance(widget, (CheckboxInput,
RadioSelect,
CheckboxSelectMultiple,
FileInput)):
classes = add_css_class(classes, 'form-control', prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
widget.attrs['class'] = classes | unknown | codeparrot/codeparrot-clean | ||
/* chunkset_avx2.c -- AVX2 inline functions to copy small data chunks.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#ifdef X86_AVX2
#include <immintrin.h>
#include "../generic/chunk_permute_table.h"
typedef __m256i chunk_t;
#define CHUNK_SIZE 32
#define HAVE_CHUNKMEMSET_2
#define HAVE_CHUNKMEMSET_4
#define HAVE_CHUNKMEMSET_8
#define HAVE_CHUNK_MAG
/* Populate don't cares so that this is a direct lookup (with some indirection into the permute table), because dist can
* never be 0 - 2, we'll start with an offset, subtracting 3 from the input */
static const lut_rem_pair perm_idx_lut[29] = {
{ 0, 2}, /* 3 */
{ 0, 0}, /* don't care */
{ 1 * 32, 2}, /* 5 */
{ 2 * 32, 2}, /* 6 */
{ 3 * 32, 4}, /* 7 */
{ 0 * 32, 0}, /* don't care */
{ 4 * 32, 5}, /* 9 */
{ 5 * 32, 22}, /* 10 */
{ 6 * 32, 21}, /* 11 */
{ 7 * 32, 20}, /* 12 */
{ 8 * 32, 6}, /* 13 */
{ 9 * 32, 4}, /* 14 */
{10 * 32, 2}, /* 15 */
{ 0 * 32, 0}, /* don't care */
{11 * 32, 15}, /* 17 */
{11 * 32 + 16, 14}, /* 18 */
{11 * 32 + 16 * 2, 13}, /* 19 */
{11 * 32 + 16 * 3, 12}, /* 20 */
{11 * 32 + 16 * 4, 11}, /* 21 */
{11 * 32 + 16 * 5, 10}, /* 22 */
{11 * 32 + 16 * 6, 9}, /* 23 */
{11 * 32 + 16 * 7, 8}, /* 24 */
{11 * 32 + 16 * 8, 7}, /* 25 */
{11 * 32 + 16 * 9, 6}, /* 26 */
{11 * 32 + 16 * 10, 5}, /* 27 */
{11 * 32 + 16 * 11, 4}, /* 28 */
{11 * 32 + 16 * 12, 3}, /* 29 */
{11 * 32 + 16 * 13, 2}, /* 30 */
{11 * 32 + 16 * 14, 1} /* 31 */
};
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi16(tmp);
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
int32_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi32(tmp);
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
int64_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi64x(tmp);
}
static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
*chunk = _mm256_loadu_si256((__m256i *)s);
}
static inline void storechunk(uint8_t *out, chunk_t *chunk) {
_mm256_storeu_si256((__m256i *)out, *chunk);
}
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
__m256i ret_vec;
/* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is
* compiling this to a shared load for all branches, preferring the simpler code. Given that the buf value isn't in
* GPRs to begin with the 256 bit load is _probably_ just as inexpensive */
*chunk_rem = lut_rem.remval;
/* See note in chunkset_ssse3.c for why this is ok */
__msan_unpoison(buf + dist, 32 - dist);
if (dist < 16) {
/* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after
* broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate
* shuffles and combining the halves later */
const __m256i permute_xform =
_mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16);
__m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
__m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
perm_vec = _mm256_add_epi8(perm_vec, permute_xform);
ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
} else if (dist == 16) {
__m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
return _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
} else {
__m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
__m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16));
/* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */
__m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
__m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1);
__m128i xlane_res = _mm_shuffle_epi8(ret_vec0, perm_vec1);
/* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_
* shuffle those values */
__m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes);
ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);
}
return ret_vec;
}
#define CHUNKSIZE chunksize_avx2
#define CHUNKCOPY chunkcopy_avx2
#define CHUNKUNROLL chunkunroll_avx2
#define CHUNKMEMSET chunkmemset_avx2
#define CHUNKMEMSET_SAFE chunkmemset_safe_avx2
#include "chunkset_tpl.h"
#define INFLATE_FAST inflate_fast_avx2
#include "inffast_tpl.h"
#endif | c | github | https://github.com/opencv/opencv | 3rdparty/zlib-ng/arch/x86/chunkset_avx2.c |
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
''' A parser for blocks written in C++ '''
import re
import sys
def dummy_translator(the_type, default_v=None):
""" Doesn't really translate. """
return the_type
class ParserCCBlock(object):
""" Class to read blocks written in C++ """
def __init__(self, filename_cc, filename_h, blockname, version, type_trans=dummy_translator):
self.code_cc = open(filename_cc).read()
self.code_h = open(filename_h).read()
self.blockname = blockname
self.type_trans = type_trans
self.version = version
def read_io_signature(self):
""" Scans a .cc file for an IO signature. """
def _figure_out_iotype_and_vlen(iosigcall, typestr):
""" From a type identifier, returns the data type.
E.g., for sizeof(int), it will return 'int'.
Returns a list! """
if 'gr::io_signature::makev' in iosigcall:
print 'tbi'
raise ValueError
return {'type': [_typestr_to_iotype(x) for x in typestr.split(',')],
'vlen': [_typestr_to_vlen(x) for x in typestr.split(',')]
}
def _typestr_to_iotype(typestr):
""" Convert a type string (e.g. sizeof(int) * vlen) to the type (e.g. 'int'). """
type_match = re.search('sizeof\s*\(([^)]*)\)', typestr)
if type_match is None:
return self.type_trans('char')
return self.type_trans(type_match.group(1))
def _typestr_to_vlen(typestr):
""" From a type identifier, returns the vector length of the block's
input/out. E.g., for 'sizeof(int) * 10', it returns 10. For
'sizeof(int)', it returns '1'. For 'sizeof(int) * vlen', it returns
the string vlen. """
# Catch fringe case where no sizeof() is given
if typestr.find('sizeof') == -1:
return typestr
if typestr.find('*') == -1:
return '1'
vlen_parts = typestr.split('*')
for fac in vlen_parts:
if fac.find('sizeof') != -1:
vlen_parts.remove(fac)
if len(vlen_parts) == 1:
return vlen_parts[0].strip()
elif len(vlen_parts) > 1:
return '*'.join(vlen_parts).strip()
iosig = {}
iosig_regex = '(?P<incall>gr::io_signature::make[23v]?)\s*\(\s*(?P<inmin>[^,]+),\s*(?P<inmax>[^,]+),' + \
'\s*(?P<intype>(\([^\)]*\)|[^)])+)\),\s*' + \
'(?P<outcall>gr::io_signature::make[23v]?)\s*\(\s*(?P<outmin>[^,]+),\s*(?P<outmax>[^,]+),' + \
'\s*(?P<outtype>(\([^\)]*\)|[^)])+)\)'
iosig_match = re.compile(iosig_regex, re.MULTILINE).search(self.code_cc)
try:
iosig['in'] = _figure_out_iotype_and_vlen(iosig_match.group('incall'),
iosig_match.group('intype'))
iosig['in']['min_ports'] = iosig_match.group('inmin')
iosig['in']['max_ports'] = iosig_match.group('inmax')
except ValueError, Exception:
print "Error: Can't parse input signature."
try:
iosig['out'] = _figure_out_iotype_and_vlen(iosig_match.group('outcall'),
iosig_match.group('outtype'))
iosig['out']['min_ports'] = iosig_match.group('outmin')
iosig['out']['max_ports'] = iosig_match.group('outmax')
except ValueError, Exception:
print "Error: Can't parse output signature."
return iosig
def read_params(self):
""" Read the parameters required to initialize the block """
def _scan_param_list(start_idx):
""" Go through a parameter list and return a tuple each:
(type, name, default_value). Python's re just doesn't cut
it for C++ code :( """
i = start_idx
c = self.code_h
if c[i] != '(':
raise ValueError
i += 1
param_list = []
read_state = 'type'
in_string = False
parens_count = 0 # Counts ()
brackets_count = 0 # Counts <>
end_of_list = False
this_type = ''
this_name = ''
this_defv = ''
WHITESPACE = ' \t\n\r\f\v'
while not end_of_list:
# Keep track of (), stop when reaching final closing parens
if not in_string:
if c[i] == ')':
if parens_count == 0:
if read_state == 'type' and len(this_type):
raise ValueError(
'Found closing parentheses before finishing last argument (this is how far I got: %s)'
% str(param_list)
)
if len(this_type):
param_list.append((this_type, this_name, this_defv))
end_of_list = True
break
else:
parens_count -= 1
elif c[i] == '(':
parens_count += 1
# Parameter type (int, const std::string, std::vector<gr_complex>, unsigned long ...)
if read_state == 'type':
if c[i] == '<':
brackets_count += 1
if c[i] == '>':
brackets_count -= 1
if c[i] == '&':
i += 1
continue
if c[i] in WHITESPACE and brackets_count == 0:
while c[i] in WHITESPACE:
i += 1
continue
if this_type == 'const' or this_type == '': # Ignore this
this_type = ''
elif this_type == 'unsigned': # Continue
this_type += ' '
continue
else:
read_state = 'name'
continue
this_type += c[i]
i += 1
continue
# Parameter name
if read_state == 'name':
if c[i] == '&' or c[i] in WHITESPACE:
i += 1
elif c[i] == '=':
if parens_count != 0:
raise ValueError(
'While parsing argument %d (%s): name finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
i += 1
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument %d (%s): name finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
else:
this_name += c[i]
i += 1
continue
# Default value
if read_state == 'defv':
if in_string:
if c[i] == '"' and c[i-1] != '\\':
in_string = False
else:
this_defv += c[i]
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument %d (%s): default value finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'type'
param_list.append((this_type, this_name, this_defv))
this_type = ''
this_name = ''
this_defv = ''
else:
this_defv += c[i]
i += 1
continue
return param_list
# Go, go, go!
if self.version == '37':
make_regex = 'static\s+sptr\s+make\s*'
else:
make_regex = '(?<=_API)\s+\w+_sptr\s+\w+_make_\w+\s*'
make_match = re.compile(make_regex, re.MULTILINE).search(self.code_h)
try:
params_list = _scan_param_list(make_match.end(0))
except ValueError as ve:
print "Can't parse the argument list: ", ve.args[0]
sys.exit(0)
params = []
for plist in params_list:
params.append({'type': self.type_trans(plist[0], plist[2]),
'key': plist[1],
'default': plist[2],
'in_constructor': True})
return params | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, model_helper, brew
class CopyOpsTest(unittest.TestCase):
def tearDown(self):
# Reset workspace after each test
# Otherwise, the multi-GPU test will use previously created tensors,
# which may have been placed on the wrong device
workspace.ResetWorkspace()
def run_test_copy_gradient(self, device_opt):
model = model_helper.ModelHelper(name="copy_test")
with core.DeviceScope(device_opt):
x = model.net.AddExternalInputs("x")
y = model.Copy(x, "y")
loss = model.AveragedLoss(y, "loss")
gradient_map = model.AddGradientOperators([loss])
workspace.FeedBlob(x, np.random.rand(32).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
self.assertTrue(np.array_equal(
workspace.FetchBlob(x),
workspace.FetchBlob(y),
))
self.assertTrue(np.array_equal(
workspace.FetchBlob(gradient_map[x]),
workspace.FetchBlob(gradient_map[y]),
))
def test_copy_gradient_cpu(self):
self.run_test_copy_gradient(core.DeviceOption(caffe2_pb2.CPU, 0))
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU.")
def test_copy_gradient_gpu(self):
self.run_test_copy_gradient(core.DeviceOption(caffe2_pb2.CUDA, 0))
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPU.")
def test_copy_gradient_multiple_gpus(self):
model = model_helper.ModelHelper(name="copy_test")
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
x_cpu = model.net.AddExternalInputs("x_cpu")
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
x_gpu_1 = model.CopyCPUToGPU(x_cpu, "x_gpu_1")
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 1)):
x_gpu_2 = model.Copy(x_gpu_1, "x_gpu_2")
loss = model.AveragedLoss(x_gpu_2, "loss")
gradient_map = model.AddGradientOperators([loss])
workspace.FeedBlob("x_cpu", np.random.rand(32).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
self.assertTrue(np.array_equal(
workspace.FetchBlob("x_gpu_1"),
workspace.FetchBlob("x_gpu_2"),
))
self.assertTrue(np.array_equal(
workspace.FetchBlob(gradient_map["x_gpu_1"]),
workspace.FetchBlob(gradient_map["x_gpu_2"]),
))
def get_op_with_output(model, output_blob_name):
for op in model.net.Proto().op:
if len(op.output) == 1 and op.output[0] == output_blob_name:
return op
return None
self.assertEqual(
get_op_with_output(model, "x_gpu_2_grad").device_option,
core.DeviceOption(caffe2_pb2.CUDA, 1),
)
self.assertEqual(
get_op_with_output(model, "x_cpu_grad").device_option,
core.DeviceOption(caffe2_pb2.CUDA, 0),
)
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU.")
def test_cpu2gpu_gpu2cpu_sparse_gradients(self):
model = model_helper.ModelHelper(name="copy_test")
v = model.param_init_net.UniformFill([], ["v"], shape=[16, 4])
indices = model.param_init_net.UniformFill([], ["v"], shape=[16, 4])
cpu_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
gpu_opt = core.DeviceOption(caffe2_pb2.CUDA, 0)
with core.DeviceScope(gpu_opt):
vcpu = model.CopyGPUToCPU(v, "vcpu")
with core.DeviceScope(cpu_opt):
g = model.Gather([vcpu, indices], "g")
with core.DeviceScope(gpu_opt):
ggpu = model.CopyCPUToGPU(g, "ggpu")
f = brew.fc(model, ggpu, "out", dim_in=4, dim_out=6)
(softmax, loss) = model.SoftmaxWithLoss(
[f, "label"],
["softmax", "loss"],
)
gradient_map = model.AddGradientOperators([loss])
self.assertTrue("v" in gradient_map)
self.assertTrue(isinstance(gradient_map['v'], core.GradientSlice))
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU.")
def test_cpu2gpu_gpu2cpu_gradients(self):
model = model_helper.ModelHelper(name="copy_test")
batch = 32
cpu_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
gpu_opt = core.DeviceOption(caffe2_pb2.CUDA, 0)
with core.NameScope("cpu"):
with core.DeviceScope(cpu_opt):
x_cpu = brew.fc(model, 'data', 'x_cpu', 16, 8)
with core.NameScope("gpu_0"):
with core.DeviceScope(gpu_opt):
x_gpu = model.CopyCPUToGPU(x_cpu, "x_gpu")
pred_gpu = brew.fc(model, x_gpu, "pred_gpu", 8, 4)
pred_cpu = model.CopyGPUToCPU(pred_gpu, "pred_cpu")
with core.DeviceScope(cpu_opt):
with core.NameScope("cpu"):
(softmax, loss) = model.SoftmaxWithLoss(
[pred_cpu, "label"],
["softmax", "loss"],
)
gradient_map = model.AddGradientOperators([loss])
# Add param updates (for cpu and gpu)
init_net = model.param_init_net
with core.DeviceScope(cpu_opt):
with core.NameScope("cpu"):
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
LR = init_net.ConstantFill([], "LR", shape=[1], value=-2.0)
for param in model.GetParams():
model.WeightedSum(
[param, ONE, gradient_map[param], LR],
param,
)
with core.NameScope("gpu_0"):
with core.DeviceScope(gpu_opt):
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
LR = init_net.ConstantFill([], "LR", shape=[1], value=-2.0)
for param in model.GetParams():
model.WeightedSum(
[param, ONE, gradient_map[param], LR],
param,
)
with core.DeviceScope(cpu_opt):
workspace.FeedBlob(
'cpu/data',
np.random.rand(batch, 16).astype(np.float32),
)
workspace.FeedBlob(
'cpu/label',
np.random.randint(4, size=batch).astype(np.int32),
)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
initial_params = {p: workspace.FetchBlob(p) for p in model.GetParams()}
workspace.RunNet(model.net.Proto().name)
updated_params = {p: workspace.FetchBlob(p) for p in model.GetParams()}
for p in model.GetParams():
g = gradient_map[p]
expected = initial_params[p] - 2.0 * workspace.FetchBlob(g)
actual = updated_params[p]
self.assertTrue(
np.array_equal(expected, updated_params[p]),
"Mismatch: {}: {}, {}".format(p, expected, actual),
) | unknown | codeparrot/codeparrot-clean | ||
/**
* Test that a user may only override killOp error code if they have the proper privileges.
*
* @tags: [requires_fcv_83, requires_sharding]
*/
import {configureFailPoint} from "jstests/libs/fail_point_util.js";
import {FixtureHelpers} from "jstests/libs/fixture_helpers.js";
import {ShardingTest} from "jstests/libs/shardingtest.js";
function runTest(m, failPointName) {
const db = m.getDB("foo");
const admin = m.getDB("admin");
admin.createUser({user: "admin", pwd: "password", roles: jsTest.adminUserRoles});
admin.auth("admin", "password");
const logReader = {db: "admin", role: "clusterMonitor"};
db.createUser({user: "reader", pwd: "reader", roles: [{db: "foo", role: "read"}, logReader]});
admin.createRole({
role: "opAdmin",
roles: [],
privileges: [{resource: {cluster: true}, actions: ["inprog", "killop"]}],
});
db.createUser({user: "opAdmin", pwd: "opAdmin", roles: [{role: "opAdmin", db: "admin"}]});
const t = db.killop_error_code;
t.insertOne({x: 1});
if (!FixtureHelpers.isMongos(db)) {
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
}
admin.logout();
// Only used for nice error messages.
function getAllLocalOps() {
return admin.aggregate([{$currentOp: {allUsers: true, localOps: true}}]).toArray();
}
function getExpectedOpIds() {
return admin
.aggregate([{$currentOp: {localOps: true}}])
.toArray()
.filter((op) => op.command.comment === "killop_error_code")
.map((op) => op.opid);
}
let queryAsReader =
'db = db.getSiblingDB("foo"); db.auth("reader", "reader"); assert.commandFailedWithCode(db.runCommand({find: "killop_error_code", comment: "killop_error_code"}), ErrorCodes.InterruptedDueToOverload);';
jsTest.log.info("Starting long-running operation");
db.auth("reader", "reader");
const failpoint = configureFailPoint(m, failPointName);
const query = startParallelShell(queryAsReader, m.port);
jsTest.log.info("Finding ops in $currentOp output");
assert.soon(
() => getExpectedOpIds().length === 1,
() => tojson(getAllLocalOps()),
);
const current_op_id = getExpectedOpIds()[0];
jsTest.log.info("Checking that the user cannot kill the op with a custom error code");
assert.commandFailedWithCode(
db.adminCommand({killOp: 1, op: current_op_id, errorCode: ErrorCodes.InterruptedDueToOverload}),
ErrorCodes.Unauthorized,
);
db.logout();
db.auth("opAdmin", "opAdmin");
jsTest.log.info("Checking that an administrative user can kill the op only with a valid custom error code");
assert.commandFailedWithCode(
db.adminCommand({killOp: 1, op: current_op_id, errorCode: ErrorCodes.DuplicateKey}),
ErrorCodes.Unauthorized,
);
assert.commandWorked(
db.adminCommand({killOp: 1, op: current_op_id, errorCode: ErrorCodes.InterruptedDueToOverload}),
);
db.logout();
failpoint.off();
query();
}
let conn = MongoRunner.runMongod({auth: ""});
runTest(conn, "setYieldAllLocksHang");
MongoRunner.stopMongod(conn);
let st = new ShardingTest({shards: 1, keyFile: "jstests/libs/key1"});
runTest(st.s, "waitInFindBeforeMakingBatch");
st.stop(); | javascript | github | https://github.com/mongodb/mongo | jstests/auth/killop_error_code.js |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class IntegrationAccountCertificate(Resource):
"""The integration account certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict
:ivar created_time: The created time.
:vartype created_time: datetime
:ivar changed_time: The changed time.
:vartype changed_time: datetime
:param metadata: The metadata.
:type metadata: object
:param key: The key details in the key vault.
:type key: :class:`KeyVaultKeyReference
<azure.mgmt.logic.models.KeyVaultKeyReference>`
:param public_certificate: The public certificate.
:type public_certificate: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'key': {'key': 'properties.key', 'type': 'KeyVaultKeyReference'},
'public_certificate': {'key': 'properties.publicCertificate', 'type': 'str'},
}
def __init__(self, location=None, tags=None, metadata=None, key=None, public_certificate=None):
super(IntegrationAccountCertificate, self).__init__(location=location, tags=tags)
self.created_time = None
self.changed_time = None
self.metadata = metadata
self.key = key
self.public_certificate = public_certificate | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* nbtpreprocesskeys.c
* Preprocessing for Postgres btree scan keys.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/nbtree/nbtpreprocesskeys.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/nbtree.h"
#include "access/relscan.h"
#include "common/int.h"
#include "lib/qunique.h"
#include "utils/array.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
typedef struct BTScanKeyPreproc
{
ScanKey inkey;
int inkeyi;
int arrayidx;
} BTScanKeyPreproc;
typedef struct BTSortArrayContext
{
FmgrInfo *sortproc;
Oid collation;
bool reverse;
} BTSortArrayContext;
static bool _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption);
static void _bt_mark_scankey_required(ScanKey skey);
static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
ScanKey leftarg, ScanKey rightarg,
BTArrayKeyInfo *array, FmgrInfo *orderproc,
bool *result);
static bool _bt_compare_array_scankey_args(IndexScanDesc scan,
ScanKey arraysk, ScanKey skey,
FmgrInfo *orderproc, BTArrayKeyInfo *array,
bool *qual_ok);
static bool _bt_saoparray_shrink(IndexScanDesc scan, ScanKey arraysk,
ScanKey skey, FmgrInfo *orderproc,
BTArrayKeyInfo *array, bool *qual_ok);
static bool _bt_skiparray_shrink(IndexScanDesc scan, ScanKey skey,
BTArrayKeyInfo *array, bool *qual_ok);
static void _bt_skiparray_strat_adjust(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array);
static void _bt_skiparray_strat_decrement(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array);
static void _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array);
static void _bt_unmark_keys(IndexScanDesc scan, int *keyDataMap);
static int _bt_reorder_array_cmp(const void *a, const void *b);
static ScanKey _bt_preprocess_array_keys(IndexScanDesc scan, int *new_numberOfKeys);
static void _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap);
static int _bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,
int *numSkipArrayKeys_out);
static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
Oid elemtype, StrategyNumber strat,
const Datum *elems, int nelems);
static void _bt_setup_array_cmp(IndexScanDesc scan, ScanKey skey, Oid elemtype,
FmgrInfo *orderproc, FmgrInfo **sortprocp);
static int _bt_sort_array_elements(ScanKey skey, FmgrInfo *sortproc,
bool reverse, Datum *elems, int nelems);
static bool _bt_merge_arrays(IndexScanDesc scan, ScanKey skey,
FmgrInfo *sortproc, bool reverse,
Oid origelemtype, Oid nextelemtype,
Datum *elems_orig, int *nelems_orig,
Datum *elems_next, int nelems_next);
static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
/*
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The given search-type keys (taken from scan->keyData[])
* are copied to so->keyData[] with possible transformation.
* scan->numberOfKeys is the number of input keys, so->numberOfKeys gets
* the number of output keys. Calling here a second or subsequent time
* (during the same btrescan) is a no-op.
*
* The output keys are marked with additional sk_flags bits beyond the
* system-standard bits supplied by the caller. The DESC and NULLS_FIRST
* indoption bits for the relevant index attribute are copied into the flags.
* Also, for a DESC column, we commute (flip) all the sk_strategy numbers
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
* satisfied to continue the scan. It also attempts to eliminate redundant
* keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
* Required output keys are sorted by index attribute. Presently we expect
* (but verify) that the input keys are already so sorted --- this is done
* by match_clauses_to_index() in indxpath.c. Some reordering of the keys
* within each attribute may be done as a byproduct of the processing here.
* That process must leave array scan keys (within an attribute) in the same
* order as corresponding entries from the scan's BTArrayKeyInfo array info.
* We might also construct skip array scan keys that weren't present in the
* original input keys; these are also output in standard attribute order.
*
* The output keys are marked with flags SK_BT_REQFWD and/or SK_BT_REQBKWD
* if they must be satisfied in order to continue the scan forward or backward
* respectively. _bt_checkkeys uses these flags. For example, if the quals
* are "x = 1 AND y < 4 AND z < 5", then _bt_checkkeys will reject a tuple
* (1,2,7), but we must continue the scan in case there are tuples (1,3,z).
* But once we reach tuples like (1,4,z) we can stop scanning because no
* later tuples could match. This is reflected by marking the x and y keys,
* but not the z key, with SK_BT_REQFWD. In general, the keys for leading
* attributes with "=" keys are marked both SK_BT_REQFWD and SK_BT_REQBKWD.
* For the first attribute without an "=" key, any "<" and "<=" keys are
* marked SK_BT_REQFWD while any ">" and ">=" keys are marked SK_BT_REQBKWD.
* This can be seen to be correct by considering the above example.
* (Actually, the z key _will_ be marked SK_BT_REQFWD, since preprocessing
* will generate a skip array on y -- except when DEBUG_DISABLE_SKIP_SCAN.
* See below description of how and why we generate skip array = keys in the
* presence of a "contradictory" condition such as "y < 4".)
*
* If we never generated skip array scan keys, it would be possible for "gaps"
* to appear that make it unsafe to mark any subsequent input scan keys
* (copied from scan->keyData[]) as required to continue the scan. Prior to
* Postgres 18, a qual like "WHERE y = 4" always resulted in a full scan.
* This qual now becomes "WHERE x = ANY('{every possible x value}') and y = 4"
* on output. In other words, preprocessing now adds a skip array on "x".
* This has the potential to be much more efficient than a full index scan
* (though it behaves like a full scan when there's many distinct "x" values).
*
* Typically, redundant keys are eliminated: we keep only the tightest
* >/>= bound and the tightest </<= bound, and if there's an = key then
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
* we cannot eliminate either key.
*
* When all redundant keys could not be eliminated, we'll output a key array
* that can more or less be treated as if it had no redundant keys. Suppose
* we have "x > 4::int AND x > 10::bigint AND x < 70", and we are unable to
* determine which > key is more restrictive for lack of a suitable cross-type
* operator. We'll arbitrarily pick one of the > keys; the other > key won't
* be marked required. Obviously, the scan will be less efficient if we
* choose x > 4 over x > 10 -- but it can still largely proceed as if there
* was only a single > condition. "x > 10" will be placed at the end of the
* so->keyData[] output array. It'll always be evaluated last, after the keys
* that could be marked required in the usual way (after "x > 4 AND x < 70").
* This can sometimes result in so->keyData[] keys that aren't even in index
* attribute order (if the qual involves multiple attributes). The scan's
* required keys will still be in attribute order, though, so it can't matter.
*
* This scheme ensures that _bt_first always uses the same set of keys at the
* start of a forwards scan as those _bt_checkkeys uses to determine when to
* end a similar backwards scan (and vice-versa). _bt_advance_array_keys
* depends on this: it expects to be able to reliably predict what the next
* _bt_first call will do by testing whether _bt_checkkeys' routines report
* that the final tuple on the page is past the end of matches for the scan's
* keys with the scan direction flipped. If it is (if continuescan=false),
* then it follows that calling _bt_first will, at a minimum, relocate the
* scan to the very next leaf page (in the current scan direction).
*
* As a byproduct of this work, we can detect contradictory quals such
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
* indicating the scan need not be run at all since no tuples can match.
* (In this case we do not bother completing the output key array!)
* Again, missing cross-type operators might cause us to fail to prove the
* quals contradictory when they really are, but the scan will work correctly.
*
* Skip array = keys will even be generated in the presence of "contradictory"
* inequality quals when it'll enable marking later input quals as required.
* We'll merge any such inequalities into the generated skip array by setting
* its array.low_compare or array.high_compare key field. The resulting skip
* array will generate its array elements from a range that's constrained by
* any merged input inequalities (which won't get output in so->keyData[]).
*
* Row compares are treated as ordinary inequality comparisons on the row's
* first index column whenever possible. We treat their first subkey as if it
* was a simple scalar inequality for the purposes of the logic about required
* keys. This also gives us limited ability to detect contradictory/redundant
* conditions involving a row compare: we can do so whenever it involves an
* SK_ISNULL condition on a row compare's first column (the same rules used
* with simple inequalities work just as well here). We have no ability to
* detect redundant/contradictory conditions in any other row compare case.
* Note in particular that we are unable to merge a row comparison key into a
* skip array (only ordinary inequalities are merged). Any so->keyData[] key
* on a column that comes after a row comparison's first column can therefore
* never be marked as required at present.
*
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan. Therefore we can't
* overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
int numberOfKeys = scan->numberOfKeys;
int16 *indoption = scan->indexRelation->rd_indoption;
int new_numberOfKeys;
int numberOfEqualCols;
ScanKey inkeys;
BTScanKeyPreproc xform[BTMaxStrategyNumber];
bool test_result,
redundant_key_kept = false;
AttrNumber attno;
ScanKey arrayKeyData;
int *keyDataMap = NULL;
int arrayidx = 0;
if (so->numberOfKeys > 0)
{
/*
* Only need to do preprocessing once per btrescan, at most. All
* calls after the first are handled as no-ops.
*/
return;
}
/* initialize result variables */
so->qual_ok = true;
so->numberOfKeys = 0;
if (numberOfKeys < 1)
return; /* done if qual-less scan */
/* If any keys are SK_SEARCHARRAY type, set up array-key info */
arrayKeyData = _bt_preprocess_array_keys(scan, &numberOfKeys);
if (!so->qual_ok)
{
/* unmatchable array, so give up */
return;
}
/*
* Treat arrayKeyData[] (a partially preprocessed copy of scan->keyData[])
* as our input if _bt_preprocess_array_keys just allocated it, else just
* use scan->keyData[]
*/
if (arrayKeyData)
{
inkeys = arrayKeyData;
/* Also maintain keyDataMap for remapping so->orderProcs[] later */
keyDataMap = MemoryContextAlloc(so->arrayContext,
numberOfKeys * sizeof(int));
/*
* Also enlarge output array when it might otherwise not have room for
* a skip array's scan key
*/
if (numberOfKeys > scan->numberOfKeys)
so->keyData = repalloc(so->keyData,
numberOfKeys * sizeof(ScanKeyData));
}
else
inkeys = scan->keyData;
/* we check that input keys are correctly ordered */
if (inkeys[0].sk_attno < 1)
elog(ERROR, "btree index keys must be ordered by attribute");
/* We can short-circuit most of the work if there's just one key */
if (numberOfKeys == 1)
{
/* Apply indoption to scankey (might change sk_strategy!) */
if (!_bt_fix_scankey_strategy(&inkeys[0], indoption))
so->qual_ok = false;
memcpy(&so->keyData[0], &inkeys[0], sizeof(ScanKeyData));
so->numberOfKeys = 1;
/* We can mark the qual as required if it's for first index col */
if (inkeys[0].sk_attno == 1)
_bt_mark_scankey_required(&so->keyData[0]);
if (arrayKeyData)
{
/*
* Don't call _bt_preprocess_array_keys_final in this fast path
* (we'll miss out on the single value array transformation, but
* that's not nearly as important when there's only one scan key)
*/
Assert(so->keyData[0].sk_flags & SK_SEARCHARRAY);
Assert(so->keyData[0].sk_strategy != BTEqualStrategyNumber ||
(so->arrayKeys[0].scan_key == 0 &&
!(so->keyData[0].sk_flags & SK_BT_SKIP) &&
OidIsValid(so->orderProcs[0].fn_oid)));
}
return;
}
/*
* Otherwise, do the full set of pushups.
*/
new_numberOfKeys = 0;
numberOfEqualCols = 0;
/*
* Initialize for processing of keys for attr 1.
*
* xform[i] points to the currently best scan key of strategy type i+1; it
* is NULL if we haven't yet found such a key for this attr.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
/*
* Loop iterates from 0 to numberOfKeys inclusive; we use the last pass to
* handle after-last-key processing. Actual exit from the loop is at the
* "break" statement below.
*/
for (int i = 0;; i++)
{
ScanKey inkey = inkeys + i;
int j;
if (i < numberOfKeys)
{
/* Apply indoption to scankey (might change sk_strategy!) */
if (!_bt_fix_scankey_strategy(inkey, indoption))
{
/* NULL can't be matched, so give up */
so->qual_ok = false;
return;
}
}
/*
* If we are at the end of the keys for a particular attr, finish up
* processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || inkey->sk_attno != attno)
{
int priorNumberOfEqualCols = numberOfEqualCols;
/* check input keys are correctly ordered */
if (i < numberOfKeys && inkey->sk_attno < attno)
elog(ERROR, "btree index keys must be ordered by attribute");
/*
* If = has been specified, all other keys can be eliminated as
* redundant. Note that this is no less true if the = key is
* SEARCHARRAY; the only real difference is that the inequality
* key _becomes_ redundant by making _bt_compare_scankey_args
* eliminate the subset of elements that won't need to be matched
* (with SAOP arrays and skip arrays alike).
*
* If we have a case like "key = 1 AND key > 2", we set qual_ok to
* false and abandon further processing. We'll do the same thing
* given a case like "key IN (0, 1) AND key > 2".
*
* We also have to deal with the case of "key IS NULL", which is
* unsatisfiable in combination with any other index condition. By
* the time we get here, that's been classified as an equality
* check, and we've rejected any combination of it with a regular
* equality condition; but not with other types of conditions.
*/
if (xform[BTEqualStrategyNumber - 1].inkey)
{
ScanKey eq = xform[BTEqualStrategyNumber - 1].inkey;
BTArrayKeyInfo *array = NULL;
FmgrInfo *orderproc = NULL;
if (arrayKeyData && (eq->sk_flags & SK_SEARCHARRAY))
{
int eq_in_ikey,
eq_arrayidx;
eq_in_ikey = xform[BTEqualStrategyNumber - 1].inkeyi;
eq_arrayidx = xform[BTEqualStrategyNumber - 1].arrayidx;
array = &so->arrayKeys[eq_arrayidx - 1];
orderproc = so->orderProcs + eq_in_ikey;
Assert(array->scan_key == eq_in_ikey);
Assert(OidIsValid(orderproc->fn_oid));
}
for (j = BTMaxStrategyNumber; --j >= 0;)
{
ScanKey chk = xform[j].inkey;
if (!chk || j == (BTEqualStrategyNumber - 1))
continue;
if (eq->sk_flags & SK_SEARCHNULL)
{
/* IS NULL is contradictory to anything else */
so->qual_ok = false;
return;
}
if (_bt_compare_scankey_args(scan, chk, eq, chk,
array, orderproc,
&test_result))
{
if (!test_result)
{
/* keys proven mutually contradictory */
so->qual_ok = false;
return;
}
/* else discard the redundant non-equality key */
xform[j].inkey = NULL;
xform[j].inkeyi = -1;
}
else
redundant_key_kept = true;
}
/* track number of attrs for which we have "=" keys */
numberOfEqualCols++;
}
/* try to keep only one of <, <= */
if (xform[BTLessStrategyNumber - 1].inkey &&
xform[BTLessEqualStrategyNumber - 1].inkey)
{
ScanKey lt = xform[BTLessStrategyNumber - 1].inkey;
ScanKey le = xform[BTLessEqualStrategyNumber - 1].inkey;
if (_bt_compare_scankey_args(scan, le, lt, le, NULL, NULL,
&test_result))
{
if (test_result)
xform[BTLessEqualStrategyNumber - 1].inkey = NULL;
else
xform[BTLessStrategyNumber - 1].inkey = NULL;
}
else
redundant_key_kept = true;
}
/* try to keep only one of >, >= */
if (xform[BTGreaterStrategyNumber - 1].inkey &&
xform[BTGreaterEqualStrategyNumber - 1].inkey)
{
ScanKey gt = xform[BTGreaterStrategyNumber - 1].inkey;
ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1].inkey;
if (_bt_compare_scankey_args(scan, ge, gt, ge, NULL, NULL,
&test_result))
{
if (test_result)
xform[BTGreaterEqualStrategyNumber - 1].inkey = NULL;
else
xform[BTGreaterStrategyNumber - 1].inkey = NULL;
}
else
redundant_key_kept = true;
}
/*
* Emit the cleaned-up keys into the so->keyData[] array, and then
* mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*
* In practice we'll rarely output non-required scan keys here;
* typically, _bt_preprocess_array_keys has already added "=" keys
* sufficient to form an unbroken series of "=" constraints on all
* attrs prior to the attr from the final scan->keyData[] key.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
if (xform[j].inkey)
{
ScanKey outkey = &so->keyData[new_numberOfKeys++];
memcpy(outkey, xform[j].inkey, sizeof(ScanKeyData));
if (arrayKeyData)
keyDataMap[new_numberOfKeys - 1] = xform[j].inkeyi;
if (priorNumberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
}
}
/*
* Exit loop here if done.
*/
if (i == numberOfKeys)
break;
/* Re-initialize for new attno */
attno = inkey->sk_attno;
memset(xform, 0, sizeof(xform));
}
/* check strategy this key's operator corresponds to */
j = inkey->sk_strategy - 1;
if (inkey->sk_strategy == BTEqualStrategyNumber &&
(inkey->sk_flags & SK_SEARCHARRAY))
{
/* must track how input scan keys map to arrays */
Assert(arrayKeyData);
arrayidx++;
}
/*
* have we seen a scan key for this same attribute and using this same
* operator strategy before now?
*/
if (xform[j].inkey == NULL)
{
/* nope, so this scan key wins by default (at least for now) */
xform[j].inkey = inkey;
xform[j].inkeyi = i;
xform[j].arrayidx = arrayidx;
}
else
{
FmgrInfo *orderproc = NULL;
BTArrayKeyInfo *array = NULL;
/*
* Seen one of these before, so keep only the more restrictive key
* if possible
*/
if (j == (BTEqualStrategyNumber - 1) && arrayKeyData)
{
/*
* Have to set up array keys
*/
if (inkey->sk_flags & SK_SEARCHARRAY)
{
array = &so->arrayKeys[arrayidx - 1];
orderproc = so->orderProcs + i;
Assert(array->scan_key == i);
Assert(OidIsValid(orderproc->fn_oid));
Assert(!(inkey->sk_flags & SK_BT_SKIP));
}
else if (xform[j].inkey->sk_flags & SK_SEARCHARRAY)
{
array = &so->arrayKeys[xform[j].arrayidx - 1];
orderproc = so->orderProcs + xform[j].inkeyi;
Assert(array->scan_key == xform[j].inkeyi);
Assert(OidIsValid(orderproc->fn_oid));
Assert(!(xform[j].inkey->sk_flags & SK_BT_SKIP));
}
/*
* Both scan keys might have arrays, in which case we'll
* arbitrarily pass only one of the arrays. That won't
* matter, since _bt_compare_scankey_args is aware that two
* SEARCHARRAY scan keys mean that _bt_preprocess_array_keys
* failed to eliminate redundant arrays through array merging.
* _bt_compare_scankey_args just returns false when it sees
* this; it won't even try to examine either array.
*/
}
if (_bt_compare_scankey_args(scan, inkey, inkey, xform[j].inkey,
array, orderproc, &test_result))
{
/* Have all we need to determine redundancy */
if (test_result)
{
/*
* New key is more restrictive, and so replaces old key...
*/
if (j != (BTEqualStrategyNumber - 1) ||
!(xform[j].inkey->sk_flags & SK_SEARCHARRAY))
{
xform[j].inkey = inkey;
xform[j].inkeyi = i;
xform[j].arrayidx = arrayidx;
}
else
{
/*
* ...unless we have to keep the old key because it's
* an array that rendered the new key redundant. We
* need to make sure that we don't throw away an array
* scan key. _bt_preprocess_array_keys_final expects
* us to keep all of the arrays that weren't already
* eliminated by _bt_preprocess_array_keys earlier on.
*/
Assert(!(inkey->sk_flags & SK_SEARCHARRAY));
}
}
else if (j == (BTEqualStrategyNumber - 1))
{
/* key == a && key == b, but a != b */
so->qual_ok = false;
return;
}
/* else old key is more restrictive, keep it */
}
else
{
/*
* We can't determine which key is more restrictive. Push
* xform[j] directly to the output array, then set xform[j] to
* the new scan key.
*
* Note: We do things this way around so that our arrays are
* always in the same order as their corresponding scan keys.
* _bt_preprocess_array_keys_final expects this.
*/
ScanKey outkey = &so->keyData[new_numberOfKeys++];
memcpy(outkey, xform[j].inkey, sizeof(ScanKeyData));
if (arrayKeyData)
keyDataMap[new_numberOfKeys - 1] = xform[j].inkeyi;
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
xform[j].inkey = inkey;
xform[j].inkeyi = i;
xform[j].arrayidx = arrayidx;
redundant_key_kept = true;
}
}
}
so->numberOfKeys = new_numberOfKeys;
/*
* Now that we've built a temporary mapping from so->keyData[] (output
* scan keys) to arrayKeyData[] (our input scan keys), fix array->scan_key
* references. Also consolidate the so->orderProcs[] array such that it
* can be subscripted using so->keyData[]-wise offsets.
*/
if (arrayKeyData)
_bt_preprocess_array_keys_final(scan, keyDataMap);
/*
* If there are remaining redundant inequality keys, we must make sure
* that each index attribute has no more than one required >/>= key, and
* no more than one required </<= key. Attributes that have one or more
* required = keys now must keep only one required key (the first = key).
*/
if (unlikely(redundant_key_kept) && so->qual_ok)
_bt_unmark_keys(scan, keyDataMap);
/* Could pfree arrayKeyData/keyDataMap now, but not worth the cycles */
}
/*
* Adjust a scankey's strategy and flags setting as needed for indoptions.
*
* We copy the appropriate indoption value into the scankey sk_flags
* (shifting to avoid clobbering system-defined flag bits). Also, if
* the DESC option is set, commute (flip) the operator strategy number.
*
* A secondary purpose is to check for IS NULL/NOT NULL scankeys and set up
* the strategy field correctly for them.
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
* a NULL means that the qual cannot be satisfied. We return true if the
* comparison value isn't NULL, or false if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
* on a rescan we will be looking at already-processed scankeys. Hence
* we have to be careful not to re-commute the strategy if we already did it.
* It's a bit ugly to modify the caller's copy of the scankey but in practice
* there shouldn't be any problem, since the index's indoptions are certainly
* not going to change while the scankey survives.
*/
static bool
_bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
{
int addflags;
addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
/*
* We treat all btree operators as strict (even if they're not so marked
* in pg_proc). This means that it is impossible for an operator condition
* with a NULL comparison constant to succeed, and we can reject it right
* away.
*
* However, we now also support "x IS NULL" clauses as search conditions,
* so in that case keep going. The planner has not filled in any
* particular strategy in this case, so set it to BTEqualStrategyNumber
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
* Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
* Note: someday we might have to fill in sk_collation from the index
* column's collation. At the moment this is a non-issue because we'll
* never actually call the comparison operator on a NULL.
*/
if (skey->sk_flags & SK_ISNULL)
{
/* SK_ISNULL shouldn't be set in a row header scankey */
Assert(!(skey->sk_flags & SK_ROW_HEADER));
/* Set indoption flags in scankey (might be done already) */
skey->sk_flags |= addflags;
/* Set correct strategy for IS NULL or NOT NULL search */
if (skey->sk_flags & SK_SEARCHNULL)
{
skey->sk_strategy = BTEqualStrategyNumber;
skey->sk_subtype = InvalidOid;
skey->sk_collation = InvalidOid;
}
else if (skey->sk_flags & SK_SEARCHNOTNULL)
{
if (skey->sk_flags & SK_BT_NULLS_FIRST)
skey->sk_strategy = BTGreaterStrategyNumber;
else
skey->sk_strategy = BTLessStrategyNumber;
skey->sk_subtype = InvalidOid;
skey->sk_collation = InvalidOid;
}
else
{
/* regular qual, so it cannot be satisfied */
return false;
}
/* Needn't do the rest */
return true;
}
/* Adjust strategy for DESC, if we didn't already */
if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
skey->sk_strategy = BTCommuteStrategyNumber(skey->sk_strategy);
skey->sk_flags |= addflags;
/* If it's a row header, fix row member flags and strategies similarly */
if (skey->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
if (subkey->sk_flags & SK_ISNULL)
{
/* First row member is NULL, so RowCompare is unsatisfiable */
Assert(subkey->sk_flags & SK_ROW_MEMBER);
return false;
}
for (;;)
{
Assert(subkey->sk_flags & SK_ROW_MEMBER);
addflags = indoption[subkey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
if ((addflags & SK_BT_DESC) && !(subkey->sk_flags & SK_BT_DESC))
subkey->sk_strategy = BTCommuteStrategyNumber(subkey->sk_strategy);
subkey->sk_flags |= addflags;
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
}
}
return true;
}
/*
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
* directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In such
* cases, the first subsidiary key is required, but subsequent ones are
* required only as long as they correspond to successive index columns and
* match the leading column as to sort direction. Otherwise the row
* comparison ordering is different from the index ordering and so we can't
* stop the scan on the basis of those lower-order columns.
*
* Note: when we set required-key flag bits in a subsidiary scankey, we are
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
* anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
{
int addflags;
switch (skey->sk_strategy)
{
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
addflags = SK_BT_REQFWD;
break;
case BTEqualStrategyNumber:
addflags = SK_BT_REQFWD | SK_BT_REQBKWD;
break;
case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber:
addflags = SK_BT_REQBKWD;
break;
default:
elog(ERROR, "unrecognized StrategyNumber: %d",
(int) skey->sk_strategy);
addflags = 0; /* keep compiler quiet */
break;
}
skey->sk_flags |= addflags;
if (skey->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
AttrNumber attno = skey->sk_attno;
/* First subkey should be same column/operator as the header */
Assert(subkey->sk_attno == attno);
Assert(subkey->sk_strategy == skey->sk_strategy);
for (;;)
{
Assert(subkey->sk_flags & SK_ROW_MEMBER);
if (subkey->sk_attno != attno)
break; /* non-adjacent key, so not required */
if (subkey->sk_strategy != skey->sk_strategy)
break; /* wrong direction, so not required */
subkey->sk_flags |= addflags;
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
attno++;
}
}
}
/*
* Compare two scankey values using a specified operator.
*
* The test we want to perform is logically "leftarg op rightarg", where
* leftarg and rightarg are the sk_argument values in those ScanKeys, and
* the comparison operator is the one in the op ScanKey. However, in
* cross-data-type situations we may need to look up the correct operator in
* the index's opfamily: it is the one having amopstrategy = op->sk_strategy
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return true. We return false
* if the comparison could not be made.
*
* If either leftarg or rightarg are an array, we'll apply array-specific
* rules to determine which array elements are redundant on behalf of caller.
* It is up to our caller to save whichever of the two scan keys is the array,
* and discard the non-array scan key (the non-array scan key is guaranteed to
* be redundant with any complete opfamily). Caller isn't expected to call
* here with a pair of array scan keys provided we're dealing with a complete
* opfamily (_bt_preprocess_array_keys will merge array keys together to make
* sure of that).
*
* Note: we'll also shrink caller's array as needed to eliminate redundant
* array elements. One reason why caller should prefer to discard non-array
* scan keys is so that we'll have the opportunity to shrink the array
* multiple times, in multiple calls (for each of several other scan keys on
* the same index attribute).
*
* Note: op always points at the same ScanKey as either leftarg or rightarg.
* Since we don't scribble on the scankeys themselves, this aliasing should
* cause no trouble.
*
* Note: this routine needs to be insensitive to any DESC option applied
* to the index column. For example, "x < 4" is a tighter constraint than
* "x < 5" regardless of which way the index is sorted.
*/
static bool
_bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
ScanKey leftarg, ScanKey rightarg,
BTArrayKeyInfo *array, FmgrInfo *orderproc,
bool *result)
{
Relation rel = scan->indexRelation;
Oid lefttype,
righttype,
optype,
opcintype,
cmp_op;
StrategyNumber strat;
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_MEMBER));
/*
* First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
{
bool leftnull,
rightnull;
/* Handle skip array comparison with IS NOT NULL scan key */
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_BT_SKIP)
{
/* Shouldn't generate skip array in presence of IS NULL key */
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_SEARCHNULL));
Assert((leftarg->sk_flags | rightarg->sk_flags) & SK_SEARCHNOTNULL);
/* Skip array will have no NULL element/IS NULL scan key */
Assert(array->num_elems == -1);
array->null_elem = false;
/* IS NOT NULL key (could be leftarg or rightarg) now redundant */
*result = true;
return true;
}
if (leftarg->sk_flags & SK_ISNULL)
{
Assert(leftarg->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL));
leftnull = true;
}
else
leftnull = false;
if (rightarg->sk_flags & SK_ISNULL)
{
Assert(rightarg->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL));
rightnull = true;
}
else
rightnull = false;
/*
* We treat NULL as either greater than or less than all other values.
* Since true > false, the tests below work correctly for NULLS LAST
* logic. If the index is NULLS FIRST, we need to flip the strategy.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_NULLS_FIRST)
strat = BTCommuteStrategyNumber(strat);
switch (strat)
{
case BTLessStrategyNumber:
*result = (leftnull < rightnull);
break;
case BTLessEqualStrategyNumber:
*result = (leftnull <= rightnull);
break;
case BTEqualStrategyNumber:
*result = (leftnull == rightnull);
break;
case BTGreaterEqualStrategyNumber:
*result = (leftnull >= rightnull);
break;
case BTGreaterStrategyNumber:
*result = (leftnull > rightnull);
break;
default:
elog(ERROR, "unrecognized StrategyNumber: %d", (int) strat);
*result = false; /* keep compiler quiet */
break;
}
return true;
}
/*
* We don't yet know how to determine redundancy when it involves a row
* compare key (barring simple cases involving IS NULL/IS NOT NULL)
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_HEADER)
{
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_BT_SKIP));
return false;
}
/*
* If either leftarg or rightarg are equality-type array scankeys, we need
* specialized handling (since by now we know that IS NULL wasn't used)
*/
if (array)
{
bool leftarray,
rightarray;
leftarray = ((leftarg->sk_flags & SK_SEARCHARRAY) &&
leftarg->sk_strategy == BTEqualStrategyNumber);
rightarray = ((rightarg->sk_flags & SK_SEARCHARRAY) &&
rightarg->sk_strategy == BTEqualStrategyNumber);
/*
* _bt_preprocess_array_keys is responsible for merging together array
* scan keys, and will do so whenever the opfamily has the required
* cross-type support. If it failed to do that, we handle it just
* like the case where we can't make the comparison ourselves.
*/
if (leftarray && rightarray)
{
/* Can't make the comparison */
*result = false; /* suppress compiler warnings */
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_BT_SKIP));
return false;
}
/*
* Otherwise we need to determine if either one of leftarg or rightarg
* uses an array, then pass this through to a dedicated helper
* function.
*/
if (leftarray)
return _bt_compare_array_scankey_args(scan, leftarg, rightarg,
orderproc, array, result);
else if (rightarray)
return _bt_compare_array_scankey_args(scan, rightarg, leftarg,
orderproc, array, result);
/* FALL THRU */
}
/*
* The opfamily we need to worry about is identified by the index column.
*/
Assert(leftarg->sk_attno == rightarg->sk_attno);
opcintype = rel->rd_opcintype[leftarg->sk_attno - 1];
/*
* Determine the actual datatypes of the ScanKey arguments. We have to
* support the convention that sk_subtype == InvalidOid means the opclass
* input type; this is a hack to simplify life for ScanKeyInit().
*/
lefttype = leftarg->sk_subtype;
if (lefttype == InvalidOid)
lefttype = opcintype;
righttype = rightarg->sk_subtype;
if (righttype == InvalidOid)
righttype = opcintype;
optype = op->sk_subtype;
if (optype == InvalidOid)
optype = opcintype;
/*
* If leftarg and rightarg match the types expected for the "op" scankey,
* we can use its already-looked-up comparison function.
*/
if (lefttype == opcintype && righttype == optype)
{
*result = DatumGetBool(FunctionCall2Coll(&op->sk_func,
op->sk_collation,
leftarg->sk_argument,
rightarg->sk_argument));
return true;
}
/*
* Otherwise, we need to go to the syscache to find the appropriate
* operator. (This cannot result in infinite recursion, since no
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
* If the sk_strategy was flipped by _bt_fix_scankey_strategy, we have to
* un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
strat = BTCommuteStrategyNumber(strat);
cmp_op = get_opfamily_member(rel->rd_opfamily[leftarg->sk_attno - 1],
lefttype,
righttype,
strat);
if (OidIsValid(cmp_op))
{
RegProcedure cmp_proc = get_opcode(cmp_op);
if (RegProcedureIsValid(cmp_proc))
{
*result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
op->sk_collation,
leftarg->sk_argument,
rightarg->sk_argument));
return true;
}
}
/* Can't make the comparison */
*result = false; /* suppress compiler warnings */
return false;
}
/*
* Compare an array scan key to a scalar scan key, eliminating contradictory
* array elements such that the scalar scan key becomes redundant.
*
* If the opfamily is incomplete we may not be able to determine which
* elements are contradictory. When we return true we'll have validly set
* *qual_ok, guaranteeing that at least the scalar scan key can be considered
* redundant. We return false if the comparison could not be made (caller
* must keep both scan keys when this happens).
*
* Note: it's up to caller to deal with IS [NOT] NULL scan keys, as well as
* row comparison scan keys. We only deal with scalar scan keys.
*/
static bool
_bt_compare_array_scankey_args(IndexScanDesc scan, ScanKey arraysk, ScanKey skey,
FmgrInfo *orderproc, BTArrayKeyInfo *array,
bool *qual_ok)
{
Assert(arraysk->sk_attno == skey->sk_attno);
Assert(!(arraysk->sk_flags & (SK_ISNULL | SK_ROW_HEADER | SK_ROW_MEMBER)));
Assert((arraysk->sk_flags & SK_SEARCHARRAY) &&
arraysk->sk_strategy == BTEqualStrategyNumber);
/* don't expect to have to deal with NULLs/row comparison scan keys */
Assert(!(skey->sk_flags & (SK_ISNULL | SK_ROW_HEADER | SK_ROW_MEMBER)));
Assert(!(skey->sk_flags & SK_SEARCHARRAY) ||
skey->sk_strategy != BTEqualStrategyNumber);
/*
* Just call the appropriate helper function based on whether it's a SAOP
* array or a skip array. Both helpers will set *qual_ok in passing.
*/
if (array->num_elems != -1)
return _bt_saoparray_shrink(scan, arraysk, skey, orderproc, array,
qual_ok);
else
return _bt_skiparray_shrink(scan, skey, array, qual_ok);
}
/*
* Preprocessing of SAOP array scan key, used to determine which array
* elements are eliminated as contradictory by a non-array scalar key.
*
* _bt_compare_array_scankey_args helper function.
*
* Array elements can be eliminated as contradictory when excluded by some
* other operator on the same attribute. For example, with an index scan qual
* "WHERE a IN (1, 2, 3) AND a < 2", all array elements except the value "1"
* are eliminated, and the < scan key is eliminated as redundant. Cases where
* every array element is eliminated by a redundant scalar scan key have an
* unsatisfiable qual, which we handle by setting *qual_ok=false for caller.
*/
static bool
_bt_saoparray_shrink(IndexScanDesc scan, ScanKey arraysk, ScanKey skey,
FmgrInfo *orderproc, BTArrayKeyInfo *array, bool *qual_ok)
{
Relation rel = scan->indexRelation;
Oid opcintype = rel->rd_opcintype[arraysk->sk_attno - 1];
int cmpresult = 0,
cmpexact = 0,
matchelem,
new_nelems = 0;
FmgrInfo crosstypeproc;
FmgrInfo *orderprocp = orderproc;
Assert(array->num_elems > 0);
Assert(!(arraysk->sk_flags & SK_BT_SKIP));
/*
* _bt_binsrch_array_skey searches an array for the entry best matching a
* datum of opclass input type for the index's attribute (on-disk type).
* We can reuse the array's ORDER proc whenever the non-array scan key's
* type is a match for the corresponding attribute's input opclass type.
* Otherwise, we have to do another ORDER proc lookup so that our call to
* _bt_binsrch_array_skey applies the correct comparator.
*
* Note: we have to support the convention that sk_subtype == InvalidOid
* means the opclass input type; this is a hack to simplify life for
* ScanKeyInit().
*/
if (skey->sk_subtype != opcintype && skey->sk_subtype != InvalidOid)
{
RegProcedure cmp_proc;
Oid arraysk_elemtype;
/*
* Need an ORDER proc lookup to detect redundancy/contradictoriness
* with this pair of scankeys.
*
* Scalar scan key's argument will be passed to _bt_compare_array_skey
* as its tupdatum/lefthand argument (rhs arg is for array elements).
*/
arraysk_elemtype = arraysk->sk_subtype;
if (arraysk_elemtype == InvalidOid)
arraysk_elemtype = rel->rd_opcintype[arraysk->sk_attno - 1];
cmp_proc = get_opfamily_proc(rel->rd_opfamily[arraysk->sk_attno - 1],
skey->sk_subtype, arraysk_elemtype,
BTORDER_PROC);
if (!RegProcedureIsValid(cmp_proc))
{
/* Can't make the comparison */
*qual_ok = false; /* suppress compiler warnings */
return false;
}
/* We have all we need to determine redundancy/contradictoriness */
orderprocp = &crosstypeproc;
fmgr_info(cmp_proc, orderprocp);
}
matchelem = _bt_binsrch_array_skey(orderprocp, false,
NoMovementScanDirection,
skey->sk_argument, false, array,
arraysk, &cmpresult);
switch (skey->sk_strategy)
{
case BTLessStrategyNumber:
cmpexact = 1; /* exclude exact match, if any */
/* FALL THRU */
case BTLessEqualStrategyNumber:
if (cmpresult >= cmpexact)
matchelem++;
/* Resize, keeping elements from the start of the array */
new_nelems = matchelem;
break;
case BTEqualStrategyNumber:
if (cmpresult != 0)
{
/* qual is unsatisfiable */
new_nelems = 0;
}
else
{
/* Shift matching element to the start of the array, resize */
array->elem_values[0] = array->elem_values[matchelem];
new_nelems = 1;
}
break;
case BTGreaterEqualStrategyNumber:
cmpexact = 1; /* include exact match, if any */
/* FALL THRU */
case BTGreaterStrategyNumber:
if (cmpresult >= cmpexact)
matchelem++;
/* Shift matching elements to the start of the array, resize */
new_nelems = array->num_elems - matchelem;
memmove(array->elem_values, array->elem_values + matchelem,
sizeof(Datum) * new_nelems);
break;
default:
elog(ERROR, "unrecognized StrategyNumber: %d",
(int) skey->sk_strategy);
break;
}
Assert(new_nelems >= 0);
Assert(new_nelems <= array->num_elems);
array->num_elems = new_nelems;
*qual_ok = new_nelems > 0;
return true;
}
/*
* Preprocessing of skip array scan key, used to determine redundancy against
* a non-array scalar scan key (must be an inequality).
*
* _bt_compare_array_scankey_args helper function.
*
* Skip arrays work by procedurally generating their elements as needed, so we
* just store the inequality as the skip array's low_compare or high_compare
* (except when there's already a more restrictive low_compare/high_compare).
* The array's final elements are the range of values that still satisfy the
* array's final low_compare and high_compare.
*/
static bool
_bt_skiparray_shrink(IndexScanDesc scan, ScanKey skey, BTArrayKeyInfo *array,
bool *qual_ok)
{
bool test_result;
Assert(array->num_elems == -1);
/*
* Array's index attribute will be constrained by a strict operator/key.
* Array must not "contain a NULL element" (i.e. the scan must not apply
* "IS NULL" qual when it reaches the end of the index that stores NULLs).
*/
array->null_elem = false;
*qual_ok = true;
/*
* Consider if we should treat caller's scalar scan key as the skip
* array's high_compare or low_compare.
*
* In general the current array element must either be a copy of a value
* taken from an index tuple, or a derivative value generated by opclass's
* skip support function. That way the scan can always safely assume that
* it's okay to use the only-input-opclass-type proc from so->orderProcs[]
* (they can be cross-type with SAOP arrays, but never with skip arrays).
*
* This approach is enabled by MINVAL/MAXVAL sentinel key markings, which
* can be thought of as representing either the lowest or highest matching
* array element (excluding the NULL element, where applicable, though as
* just discussed it isn't applicable to this range skip array anyway).
* Array keys marked MINVAL/MAXVAL never have a valid datum in their
* sk_argument field. The scan directly applies the array's low_compare
* key when it encounters MINVAL in the array key proper (just as it
* applies high_compare when it sees MAXVAL set in the array key proper).
* The scan must never use the array's so->orderProcs[] proc against
* low_compare's/high_compare's sk_argument, either (so->orderProcs[] is
* only intended to be used with rhs datums from the array proper/index).
*/
switch (skey->sk_strategy)
{
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
if (array->high_compare)
{
/* replace existing high_compare with caller's key? */
if (!_bt_compare_scankey_args(scan, array->high_compare, skey,
array->high_compare, NULL, NULL,
&test_result))
return false; /* can't determine more restrictive key */
if (!test_result)
return true; /* no, just discard caller's key */
/* yes, replace existing high_compare with caller's key */
}
/* caller's key becomes skip array's high_compare */
array->high_compare = skey;
break;
case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber:
if (array->low_compare)
{
/* replace existing low_compare with caller's key? */
if (!_bt_compare_scankey_args(scan, array->low_compare, skey,
array->low_compare, NULL, NULL,
&test_result))
return false; /* can't determine more restrictive key */
if (!test_result)
return true; /* no, just discard caller's key */
/* yes, replace existing low_compare with caller's key */
}
/* caller's key becomes skip array's low_compare */
array->low_compare = skey;
break;
case BTEqualStrategyNumber:
default:
elog(ERROR, "unrecognized StrategyNumber: %d",
(int) skey->sk_strategy);
break;
}
return true;
}
/*
* Applies the opfamily's skip support routine to convert the skip array's >
* low_compare key (if any) into a >= key, and to convert its < high_compare
* key (if any) into a <= key. Decrements the high_compare key's sk_argument,
* and/or increments the low_compare key's sk_argument (also adjusts their
* operator strategies, while changing the operator as appropriate).
*
* This optional optimization reduces the number of descents required within
* _bt_first. Whenever _bt_first is called with a skip array whose current
* array element is the sentinel value MINVAL, using a transformed >= key
* instead of using the original > key makes it safe to include lower-order
* scan keys in the insertion scan key (there must be lower-order scan keys
* after the skip array). We will avoid an extra _bt_first to find the first
* value in the index > sk_argument -- at least when the first real matching
* value in the index happens to be an exact match for the sk_argument value
* that we produced here by incrementing the original input key's sk_argument.
* (Backwards scans derive the same benefit when they encounter the sentinel
* value MAXVAL, by converting the high_compare key from < to <=.)
*
* Note: The transformation is only correct when it cannot allow the scan to
* overlook matching tuples, but we don't have enough semantic information to
* safely make sure that can't happen during scans with cross-type operators.
* That's why we'll never apply the transformation in cross-type scenarios.
* For example, if we attempted to convert "sales_ts > '2024-01-01'::date"
* into "sales_ts >= '2024-01-02'::date" given a "sales_ts" attribute whose
* input opclass is timestamp_ops, the scan would overlook almost all (or all)
* tuples for sales that fell on '2024-01-01'.
*
* Note: We can safely modify array->low_compare/array->high_compare in place
* because they just point to copies of our scan->keyData[] input scan keys
* (namely the copies returned by _bt_preprocess_array_keys to be used as
* input into the standard preprocessing steps in _bt_preprocess_keys).
* Everything will be reset if there's a rescan.
*/
static void
_bt_skiparray_strat_adjust(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
MemoryContext oldContext;
/*
* Called last among all preprocessing steps, when the skip array's final
* low_compare and high_compare have both been chosen
*/
Assert(arraysk->sk_flags & SK_BT_SKIP);
Assert(array->num_elems == -1 && !array->null_elem && array->sksup);
oldContext = MemoryContextSwitchTo(so->arrayContext);
if (array->high_compare &&
array->high_compare->sk_strategy == BTLessStrategyNumber)
_bt_skiparray_strat_decrement(scan, arraysk, array);
if (array->low_compare &&
array->low_compare->sk_strategy == BTGreaterStrategyNumber)
_bt_skiparray_strat_increment(scan, arraysk, array);
MemoryContextSwitchTo(oldContext);
}
/*
* Convert skip array's < high_compare key into a <= key
*/
static void
_bt_skiparray_strat_decrement(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array)
{
Relation rel = scan->indexRelation;
Oid opfamily = rel->rd_opfamily[arraysk->sk_attno - 1],
opcintype = rel->rd_opcintype[arraysk->sk_attno - 1],
leop;
RegProcedure cmp_proc;
ScanKey high_compare = array->high_compare;
Datum orig_sk_argument = high_compare->sk_argument,
new_sk_argument;
bool uflow;
int16 lookupstrat;
Assert(high_compare->sk_strategy == BTLessStrategyNumber);
/*
* Only perform the transformation when the operator type matches the
* index attribute's input opclass type
*/
if (high_compare->sk_subtype != opcintype &&
high_compare->sk_subtype != InvalidOid)
return;
/* Decrement, handling underflow by marking the qual unsatisfiable */
new_sk_argument = array->sksup->decrement(rel, orig_sk_argument, &uflow);
if (uflow)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
so->qual_ok = false;
return;
}
/*
* Look up <= operator (might fail), accounting for the fact that a
* high_compare on a DESC column already had its strategy commuted
*/
lookupstrat = BTLessEqualStrategyNumber;
if (high_compare->sk_flags & SK_BT_DESC)
lookupstrat = BTGreaterEqualStrategyNumber; /* commute this too */
leop = get_opfamily_member(opfamily, opcintype, opcintype, lookupstrat);
if (!OidIsValid(leop))
return;
cmp_proc = get_opcode(leop);
if (RegProcedureIsValid(cmp_proc))
{
/* Transform < high_compare key into <= key */
fmgr_info(cmp_proc, &high_compare->sk_func);
high_compare->sk_argument = new_sk_argument;
high_compare->sk_strategy = BTLessEqualStrategyNumber;
}
}
/*
* Convert skip array's > low_compare key into a >= key
*/
static void
_bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array)
{
Relation rel = scan->indexRelation;
Oid opfamily = rel->rd_opfamily[arraysk->sk_attno - 1],
opcintype = rel->rd_opcintype[arraysk->sk_attno - 1],
geop;
RegProcedure cmp_proc;
ScanKey low_compare = array->low_compare;
Datum orig_sk_argument = low_compare->sk_argument,
new_sk_argument;
bool oflow;
int16 lookupstrat;
Assert(low_compare->sk_strategy == BTGreaterStrategyNumber);
/*
* Only perform the transformation when the operator type matches the
* index attribute's input opclass type
*/
if (low_compare->sk_subtype != opcintype &&
low_compare->sk_subtype != InvalidOid)
return;
/* Increment, handling overflow by marking the qual unsatisfiable */
new_sk_argument = array->sksup->increment(rel, orig_sk_argument, &oflow);
if (oflow)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
so->qual_ok = false;
return;
}
/*
* Look up >= operator (might fail), accounting for the fact that a
* low_compare on a DESC column already had its strategy commuted
*/
lookupstrat = BTGreaterEqualStrategyNumber;
if (low_compare->sk_flags & SK_BT_DESC)
lookupstrat = BTLessEqualStrategyNumber; /* commute this too */
geop = get_opfamily_member(opfamily, opcintype, opcintype, lookupstrat);
if (!OidIsValid(geop))
return;
cmp_proc = get_opcode(geop);
if (RegProcedureIsValid(cmp_proc))
{
/* Transform > low_compare key into >= key */
fmgr_info(cmp_proc, &low_compare->sk_func);
low_compare->sk_argument = new_sk_argument;
low_compare->sk_strategy = BTGreaterEqualStrategyNumber;
}
}
/*
* _bt_unmark_keys() -- make superfluous required keys nonrequired after all
*
* When _bt_preprocess_keys fails to eliminate one or more redundant keys, it
* calls here to make sure that no index attribute has more than one > or >=
* key marked required, and no more than one required < or <= key. Attributes
* with = keys will always get one = key as their required key. All other
* keys that were initially marked required get "unmarked" here. That way,
* _bt_first and _bt_checkkeys will reliably agree on which keys to use to
* start and/or to end the scan.
*
* We also relocate keys that become/started out nonrequired to the end of
* so->keyData[]. That way, _bt_first and _bt_checkkeys cannot fail to reach
* a required key due to some earlier nonrequired key getting in the way.
*
* Only call here when _bt_compare_scankey_args returned false at least once
* (otherwise, calling here will just waste cycles).
*/
static void
_bt_unmark_keys(IndexScanDesc scan, int *keyDataMap)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
AttrNumber attno;
bool *unmarkikey;
int nunmark,
nunmarked,
nkept,
firsti;
ScanKey keepKeys,
unmarkKeys;
FmgrInfo *keepOrderProcs = NULL,
*unmarkOrderProcs = NULL;
bool haveReqEquals,
haveReqForward,
haveReqBackward;
/*
* Do an initial pass over so->keyData[] that determines which keys to
* keep as required. We expect so->keyData[] to still be in attribute
* order when we're called (though we don't expect any particular order
* among each attribute's keys).
*
* When both equality and inequality keys remain on a single attribute, we
* *must* make sure that exactly one of the equalities remains required.
* Any requiredness markings that we might leave on later keys/attributes
* are predicated on there being required = keys on all prior columns.
*/
unmarkikey = palloc0(so->numberOfKeys * sizeof(bool));
nunmark = 0;
/* Set things up for first key's attribute */
attno = so->keyData[0].sk_attno;
firsti = 0;
haveReqEquals = false;
haveReqForward = false;
haveReqBackward = false;
for (int i = 0; i < so->numberOfKeys; i++)
{
ScanKey origkey = &so->keyData[i];
if (origkey->sk_attno != attno)
{
/* Reset for next attribute */
attno = origkey->sk_attno;
firsti = i;
haveReqEquals = false;
haveReqForward = false;
haveReqBackward = false;
}
/* Equalities get priority over inequalities */
if (haveReqEquals)
{
/*
* We already found the first "=" key for this attribute. We've
* already decided that all its other keys will be unmarked.
*/
Assert(!(origkey->sk_flags & SK_SEARCHNULL));
unmarkikey[i] = true;
nunmark++;
continue;
}
else if ((origkey->sk_flags & SK_BT_REQFWD) &&
(origkey->sk_flags & SK_BT_REQBKWD))
{
/*
* Found the first "=" key for attno. All other attno keys will
* be unmarked.
*/
Assert(origkey->sk_strategy == BTEqualStrategyNumber);
haveReqEquals = true;
for (int j = firsti; j < i; j++)
{
/* Unmark any prior inequality keys on attno after all */
if (!unmarkikey[j])
{
unmarkikey[j] = true;
nunmark++;
}
}
continue;
}
/* Deal with inequalities next */
if ((origkey->sk_flags & SK_BT_REQFWD) && !haveReqForward)
{
haveReqForward = true;
continue;
}
else if ((origkey->sk_flags & SK_BT_REQBKWD) && !haveReqBackward)
{
haveReqBackward = true;
continue;
}
/*
* We have either a redundant inequality key that will be unmarked, or
* we have a key that wasn't marked required in the first place
*/
unmarkikey[i] = true;
nunmark++;
}
/* Should only be called when _bt_compare_scankey_args reported failure */
Assert(nunmark > 0);
/*
* Next, allocate temp arrays: one for required keys that'll remain
* required, the other for all remaining keys
*/
unmarkKeys = palloc(nunmark * sizeof(ScanKeyData));
keepKeys = palloc((so->numberOfKeys - nunmark) * sizeof(ScanKeyData));
nunmarked = 0;
nkept = 0;
if (so->numArrayKeys)
{
unmarkOrderProcs = palloc(nunmark * sizeof(FmgrInfo));
keepOrderProcs = palloc((so->numberOfKeys - nunmark) * sizeof(FmgrInfo));
}
/*
* Next, copy the contents of so->keyData[] into the appropriate temp
* array.
*
* Scans with = array keys need us to maintain invariants around the order
* of so->orderProcs[] and so->arrayKeys[] relative to so->keyData[]. See
* _bt_preprocess_array_keys_final for a full explanation.
*/
for (int i = 0; i < so->numberOfKeys; i++)
{
ScanKey origkey = &so->keyData[i];
ScanKey unmark;
if (!unmarkikey[i])
{
/*
* Key gets to keep its original requiredness markings.
*
* Key will stay in its original position, unless we're going to
* unmark an earlier key (in which case this key gets moved back).
*/
memcpy(keepKeys + nkept, origkey, sizeof(ScanKeyData));
if (so->numArrayKeys)
{
keyDataMap[i] = nkept;
memcpy(keepOrderProcs + nkept, &so->orderProcs[i],
sizeof(FmgrInfo));
}
nkept++;
continue;
}
/*
* Key will be unmarked as needed, and moved to the end of the array,
* next to other keys that will become (or always were) nonrequired
*/
unmark = unmarkKeys + nunmarked;
memcpy(unmark, origkey, sizeof(ScanKeyData));
if (so->numArrayKeys)
{
keyDataMap[i] = (so->numberOfKeys - nunmark) + nunmarked;
memcpy(&unmarkOrderProcs[nunmarked], &so->orderProcs[i],
sizeof(FmgrInfo));
}
/*
* Preprocessing only generates skip arrays when it knows that they'll
* be the only required = key on the attr. We'll never unmark them.
*/
Assert(!(unmark->sk_flags & SK_BT_SKIP));
/*
* Also shouldn't have to unmark an IS NULL or an IS NOT NULL key.
* They aren't cross-type, so an incomplete opfamily can't matter.
*/
Assert(!(unmark->sk_flags & SK_ISNULL) ||
!(unmark->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)));
/* Clear requiredness flags on redundant key (and on any subkeys) */
unmark->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
if (unmark->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(unmark->sk_argument);
Assert(subkey->sk_strategy == unmark->sk_strategy);
for (;;)
{
Assert(subkey->sk_flags & SK_ROW_MEMBER);
subkey->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
}
}
nunmarked++;
}
/* Copy both temp arrays back into so->keyData[] to reorder */
Assert(nkept == so->numberOfKeys - nunmark);
Assert(nunmarked == nunmark);
memcpy(so->keyData, keepKeys, sizeof(ScanKeyData) * nkept);
memcpy(so->keyData + nkept, unmarkKeys, sizeof(ScanKeyData) * nunmarked);
/* Done with temp arrays */
pfree(unmarkikey);
pfree(keepKeys);
pfree(unmarkKeys);
/*
* Now copy so->orderProcs[] temp entries needed by scans with = array
* keys back (just like with the so->keyData[] temp arrays)
*/
if (so->numArrayKeys)
{
memcpy(so->orderProcs, keepOrderProcs, sizeof(FmgrInfo) * nkept);
memcpy(so->orderProcs + nkept, unmarkOrderProcs,
sizeof(FmgrInfo) * nunmarked);
/* Also fix-up array->scan_key references */
for (int arridx = 0; arridx < so->numArrayKeys; arridx++)
{
BTArrayKeyInfo *array = &so->arrayKeys[arridx];
array->scan_key = keyDataMap[array->scan_key];
}
/*
* Sort so->arrayKeys[] based on its new BTArrayKeyInfo.scan_key
* offsets, so that its order matches so->keyData[] order as expected
*/
qsort(so->arrayKeys, so->numArrayKeys, sizeof(BTArrayKeyInfo),
_bt_reorder_array_cmp);
/* Done with temp arrays */
pfree(unmarkOrderProcs);
pfree(keepOrderProcs);
}
}
/*
* qsort comparator for reordering so->arrayKeys[] BTArrayKeyInfo entries
*/
static int
_bt_reorder_array_cmp(const void *a, const void *b)
{
const BTArrayKeyInfo *arraya = a;
const BTArrayKeyInfo *arrayb = b;
return pg_cmp_s32(arraya->scan_key, arrayb->scan_key);
}
/*
* _bt_preprocess_array_keys() -- Preprocess SK_SEARCHARRAY scan keys
*
* If there are any SK_SEARCHARRAY scan keys, deconstruct the array(s) and
* set up BTArrayKeyInfo info for each one that is an equality-type key.
* Returns modified scan keys as input for further, standard preprocessing.
*
* Currently we perform two kinds of preprocessing to deal with redundancies.
* For inequality array keys, it's sufficient to find the extreme element
* value and replace the whole array with that scalar value. This eliminates
* all but one array element as redundant. Similarly, we are capable of
* "merging together" multiple equality array keys (from two or more input
* scan keys) into a single output scan key containing only the intersecting
* array elements. This can eliminate many redundant array elements, as well
* as eliminating whole array scan keys as redundant. It can also allow us to
* detect contradictory quals.
*
* Caller must pass *new_numberOfKeys to give us a way to change the number of
* scan keys that caller treats as input to standard preprocessing steps. The
* returned array is smaller than scan->keyData[] when we could eliminate a
* redundant array scan key (redundant with another array scan key). It is
* convenient for _bt_preprocess_keys caller to have to deal with no more than
* one equality strategy array scan key per index attribute. We'll always be
* able to set things up that way when complete opfamilies are used.
*
* We're also responsible for generating skip arrays (and their associated
* scan keys) here. This enables skip scan. We do this for index attributes
* that initially lacked an equality condition within scan->keyData[], iff
* doing so allows a later scan key (that was passed to us in scan->keyData[])
* to be marked required by our _bt_preprocess_keys caller.
*
* We set the scan key references from the scan's BTArrayKeyInfo info array to
* offsets into the temp modified input array returned to caller. Scans that
* have array keys should call _bt_preprocess_array_keys_final when standard
* preprocessing steps are complete. This will convert the scan key offset
* references into references to the scan's so->keyData[] output scan keys.
*
* Note: the reason we need to return a temp scan key array, rather than just
* modifying scan->keyData[], is that callers are permitted to call btrescan
* without supplying a new set of scankey data. Certain other preprocessing
* routines (e.g., _bt_fix_scankey_strategy) _can_ modify scan->keyData[], but
* we can't make that work here because our modifications are non-idempotent.
*/
static ScanKey
_bt_preprocess_array_keys(IndexScanDesc scan, int *new_numberOfKeys)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;
int16 *indoption = rel->rd_indoption;
Oid skip_eq_ops[INDEX_MAX_KEYS];
int numArrayKeys,
numSkipArrayKeys,
numArrayKeyData;
AttrNumber attno_skip = 1;
int origarrayatt = InvalidAttrNumber,
origarraykey = -1;
Oid origelemtype = InvalidOid;
MemoryContext oldContext;
ScanKey arrayKeyData; /* modified copy of scan->keyData */
/*
* Check the number of input array keys within scan->keyData[] input keys
* (also checks if we should add extra skip arrays based on input keys)
*/
numArrayKeys = _bt_num_array_keys(scan, skip_eq_ops, &numSkipArrayKeys);
so->skipScan = (numSkipArrayKeys > 0);
/* Quit if nothing to do. */
if (numArrayKeys == 0)
return NULL;
/*
* Estimated final size of arrayKeyData[] array we'll return to our caller
* is the size of the original scan->keyData[] input array, plus space for
* any additional skip array scan keys we'll need to generate below
*/
numArrayKeyData = scan->numberOfKeys + numSkipArrayKeys;
/*
* Make a scan-lifespan context to hold array-associated data, or reset it
* if we already have one from a previous rescan cycle.
*/
if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
"BTree array context",
ALLOCSET_SMALL_SIZES);
else
MemoryContextReset(so->arrayContext);
oldContext = MemoryContextSwitchTo(so->arrayContext);
/* Create output scan keys in the workspace context */
arrayKeyData = (ScanKey) palloc(numArrayKeyData * sizeof(ScanKeyData));
/* Allocate space for per-array data in the workspace context */
so->arrayKeys = (BTArrayKeyInfo *) palloc(numArrayKeys * sizeof(BTArrayKeyInfo));
/* Allocate space for ORDER procs used to help _bt_checkkeys */
so->orderProcs = (FmgrInfo *) palloc(numArrayKeyData * sizeof(FmgrInfo));
numArrayKeys = 0;
numArrayKeyData = 0;
for (int input_ikey = 0; input_ikey < scan->numberOfKeys; input_ikey++)
{
ScanKey inkey = scan->keyData + input_ikey,
cur;
FmgrInfo sortproc;
FmgrInfo *sortprocp = &sortproc;
Oid elemtype;
bool reverse;
ArrayType *arrayval;
int16 elmlen;
bool elmbyval;
char elmalign;
int num_elems;
Datum *elem_values;
bool *elem_nulls;
int num_nonnulls;
/* set up next output scan key */
cur = &arrayKeyData[numArrayKeyData];
/* Backfill skip arrays for attrs < or <= input key's attr? */
while (numSkipArrayKeys && attno_skip <= inkey->sk_attno)
{
Oid opfamily = rel->rd_opfamily[attno_skip - 1];
Oid opcintype = rel->rd_opcintype[attno_skip - 1];
Oid collation = rel->rd_indcollation[attno_skip - 1];
Oid eq_op = skip_eq_ops[attno_skip - 1];
CompactAttribute *attr;
RegProcedure cmp_proc;
if (!OidIsValid(eq_op))
{
/*
* Attribute already has an = input key, so don't output a
* skip array for attno_skip. Just copy attribute's = input
* key into arrayKeyData[] once outside this inner loop.
*
* Note: When we get here there must be a later attribute that
* lacks an equality input key, and still needs a skip array
* (if there wasn't then numSkipArrayKeys would be 0 by now).
*/
Assert(attno_skip == inkey->sk_attno);
/* inkey can't be last input key to be marked required: */
Assert(input_ikey < scan->numberOfKeys - 1);
#if 0
/* Could be a redundant input scan key, so can't do this: */
Assert(inkey->sk_strategy == BTEqualStrategyNumber ||
(inkey->sk_flags & SK_SEARCHNULL));
#endif
attno_skip++;
break;
}
cmp_proc = get_opcode(eq_op);
if (!RegProcedureIsValid(cmp_proc))
elog(ERROR, "missing oprcode for skipping equals operator %u", eq_op);
ScanKeyEntryInitialize(cur,
SK_SEARCHARRAY | SK_BT_SKIP, /* flags */
attno_skip, /* skipped att number */
BTEqualStrategyNumber, /* equality strategy */
InvalidOid, /* opclass input subtype */
collation, /* index column's collation */
cmp_proc, /* equality operator's proc */
(Datum) 0); /* constant */
/* Initialize generic BTArrayKeyInfo fields */
so->arrayKeys[numArrayKeys].scan_key = numArrayKeyData;
so->arrayKeys[numArrayKeys].num_elems = -1;
/* Initialize skip array specific BTArrayKeyInfo fields */
attr = TupleDescCompactAttr(RelationGetDescr(rel), attno_skip - 1);
reverse = (indoption[attno_skip - 1] & INDOPTION_DESC) != 0;
so->arrayKeys[numArrayKeys].attlen = attr->attlen;
so->arrayKeys[numArrayKeys].attbyval = attr->attbyval;
so->arrayKeys[numArrayKeys].null_elem = true; /* for now */
so->arrayKeys[numArrayKeys].sksup =
PrepareSkipSupportFromOpclass(opfamily, opcintype, reverse);
so->arrayKeys[numArrayKeys].low_compare = NULL; /* for now */
so->arrayKeys[numArrayKeys].high_compare = NULL; /* for now */
/*
* We'll need a 3-way ORDER proc. Set that up now.
*/
_bt_setup_array_cmp(scan, cur, opcintype,
&so->orderProcs[numArrayKeyData], NULL);
numArrayKeys++;
numArrayKeyData++; /* keep this scan key/array */
/* set up next output scan key */
cur = &arrayKeyData[numArrayKeyData];
/* remember having output this skip array and scan key */
numSkipArrayKeys--;
attno_skip++;
}
/*
* Provisionally copy scan key into arrayKeyData[] array we'll return
* to _bt_preprocess_keys caller
*/
*cur = *inkey;
if (!(cur->sk_flags & SK_SEARCHARRAY))
{
numArrayKeyData++; /* keep this non-array scan key */
continue;
}
/*
* Process SAOP array scan key
*/
Assert(!(cur->sk_flags & (SK_ROW_HEADER | SK_SEARCHNULL | SK_SEARCHNOTNULL)));
/* If array is null as a whole, the scan qual is unsatisfiable */
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
break;
}
/*
* Deconstruct the array into elements
*/
arrayval = DatumGetArrayTypeP(cur->sk_argument);
/* We could cache this data, but not clear it's worth it */
get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
&elmlen, &elmbyval, &elmalign);
deconstruct_array(arrayval,
ARR_ELEMTYPE(arrayval),
elmlen, elmbyval, elmalign,
&elem_values, &elem_nulls, &num_elems);
/*
* Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
for (int j = 0; j < num_elems; j++)
{
if (!elem_nulls[j])
elem_values[num_nonnulls++] = elem_values[j];
}
/* We could pfree(elem_nulls) now, but not worth the cycles */
/* If there's no non-nulls, the scan qual is unsatisfiable */
if (num_nonnulls == 0)
{
so->qual_ok = false;
break;
}
/*
* Determine the nominal datatype of the array elements. We have to
* support the convention that sk_subtype == InvalidOid means the
* opclass input type; this is a hack to simplify life for
* ScanKeyInit().
*/
elemtype = cur->sk_subtype;
if (elemtype == InvalidOid)
elemtype = rel->rd_opcintype[cur->sk_attno - 1];
/*
* If the comparison operator is not equality, then the array qual
* degenerates to a simple comparison against the smallest or largest
* non-null array element, as appropriate.
*/
switch (cur->sk_strategy)
{
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
cur->sk_argument =
_bt_find_extreme_element(scan, cur, elemtype,
BTGreaterStrategyNumber,
elem_values, num_nonnulls);
numArrayKeyData++; /* keep this transformed scan key */
continue;
case BTEqualStrategyNumber:
/* proceed with rest of loop */
break;
case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber:
cur->sk_argument =
_bt_find_extreme_element(scan, cur, elemtype,
BTLessStrategyNumber,
elem_values, num_nonnulls);
numArrayKeyData++; /* keep this transformed scan key */
continue;
default:
elog(ERROR, "unrecognized StrategyNumber: %d",
(int) cur->sk_strategy);
break;
}
/*
* We'll need a 3-way ORDER proc to perform binary searches for the
* next matching array element. Set that up now.
*
* Array scan keys with cross-type equality operators will require a
* separate same-type ORDER proc for sorting their array. Otherwise,
* sortproc just points to the same proc used during binary searches.
*/
_bt_setup_array_cmp(scan, cur, elemtype,
&so->orderProcs[numArrayKeyData], &sortprocp);
/*
* Sort the non-null elements and eliminate any duplicates. We must
* sort in the same ordering used by the index column, so that the
* arrays can be advanced in lockstep with the scan's progress through
* the index's key space.
*/
reverse = (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0;
num_elems = _bt_sort_array_elements(cur, sortprocp, reverse,
elem_values, num_nonnulls);
if (origarrayatt == cur->sk_attno)
{
BTArrayKeyInfo *orig = &so->arrayKeys[origarraykey];
/*
* This array scan key is redundant with a previous equality
* operator array scan key. Merge the two arrays together to
* eliminate contradictory non-intersecting elements (or try to).
*
* We merge this next array back into attribute's original array.
*/
Assert(arrayKeyData[orig->scan_key].sk_attno == cur->sk_attno);
Assert(arrayKeyData[orig->scan_key].sk_collation ==
cur->sk_collation);
if (_bt_merge_arrays(scan, cur, sortprocp, reverse,
origelemtype, elemtype,
orig->elem_values, &orig->num_elems,
elem_values, num_elems))
{
/* Successfully eliminated this array */
pfree(elem_values);
/*
* If no intersecting elements remain in the original array,
* the scan qual is unsatisfiable
*/
if (orig->num_elems == 0)
{
so->qual_ok = false;
break;
}
/* Throw away this scan key/array */
continue;
}
/*
* Unable to merge this array with previous array due to a lack of
* suitable cross-type opfamily support. Will need to keep both
* scan keys/arrays.
*/
}
else
{
/*
* This array is the first for current index attribute.
*
* If it turns out to not be the last array (that is, if the next
* array is redundantly applied to this same index attribute),
* we'll then treat this array as the attribute's "original" array
* when merging.
*/
origarrayatt = cur->sk_attno;
origarraykey = numArrayKeys;
origelemtype = elemtype;
}
/* Initialize generic BTArrayKeyInfo fields */
so->arrayKeys[numArrayKeys].scan_key = numArrayKeyData;
so->arrayKeys[numArrayKeys].num_elems = num_elems;
/* Initialize SAOP array specific BTArrayKeyInfo fields */
so->arrayKeys[numArrayKeys].elem_values = elem_values;
so->arrayKeys[numArrayKeys].cur_elem = -1; /* i.e. invalid */
numArrayKeys++;
numArrayKeyData++; /* keep this scan key/array */
}
Assert(numSkipArrayKeys == 0 || !so->qual_ok);
/* Set final number of equality-type array keys */
so->numArrayKeys = numArrayKeys;
/* Set number of scan keys in arrayKeyData[] */
*new_numberOfKeys = numArrayKeyData;
MemoryContextSwitchTo(oldContext);
return arrayKeyData;
}
/*
* _bt_preprocess_array_keys_final() -- fix up array scan key references
*
* When _bt_preprocess_array_keys performed initial array preprocessing, it
* set each array's array->scan_key to its scankey's arrayKeyData[] offset.
* This function handles translation of the scan key references from the
* BTArrayKeyInfo info array, from input scan key references (to the keys in
* arrayKeyData[]), into output references (to the keys in so->keyData[]).
* Caller's keyDataMap[] array tells us how to perform this remapping.
*
* Also finalizes so->orderProcs[] for the scan. Arrays already have an ORDER
* proc, which might need to be repositioned to its so->keyData[]-wise offset
* (very much like the remapping that we apply to array->scan_key references).
* Non-array equality strategy scan keys (that survived preprocessing) don't
* yet have an so->orderProcs[] entry, so we set one for them here.
*
* Also converts single-element array scan keys into equivalent non-array
* equality scan keys, which decrements so->numArrayKeys. It's possible that
* this will leave this new btrescan without any arrays at all. This isn't
* necessary for correctness; it's just an optimization. Non-array equality
* scan keys are slightly faster than equivalent array scan keys at runtime.
*/
static void
_bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;
int arrayidx = 0;
int last_equal_output_ikey PG_USED_FOR_ASSERTS_ONLY = -1;
Assert(so->qual_ok);
/*
* Nothing for us to do when _bt_preprocess_array_keys only had to deal
* with array inequalities
*/
if (so->numArrayKeys == 0)
return;
for (int output_ikey = 0; output_ikey < so->numberOfKeys; output_ikey++)
{
ScanKey outkey = so->keyData + output_ikey;
int input_ikey;
bool found PG_USED_FOR_ASSERTS_ONLY = false;
Assert(outkey->sk_strategy != InvalidStrategy);
if (outkey->sk_strategy != BTEqualStrategyNumber)
continue;
input_ikey = keyDataMap[output_ikey];
Assert(last_equal_output_ikey < output_ikey);
Assert(last_equal_output_ikey < input_ikey);
last_equal_output_ikey = output_ikey;
/*
* We're lazy about looking up ORDER procs for non-array keys, since
* not all input keys become output keys. Take care of it now.
*/
if (!(outkey->sk_flags & SK_SEARCHARRAY))
{
Oid elemtype;
/* No need for an ORDER proc given an IS NULL scan key */
if (outkey->sk_flags & SK_SEARCHNULL)
continue;
/*
* A non-required scan key doesn't need an ORDER proc, either
* (unless it's associated with an array, which this one isn't)
*/
if (!(outkey->sk_flags & SK_BT_REQFWD))
continue;
elemtype = outkey->sk_subtype;
if (elemtype == InvalidOid)
elemtype = rel->rd_opcintype[outkey->sk_attno - 1];
_bt_setup_array_cmp(scan, outkey, elemtype,
&so->orderProcs[output_ikey], NULL);
continue;
}
/*
* Reorder existing array scan key so->orderProcs[] entries.
*
* Doing this in-place is safe because preprocessing is required to
* output all equality strategy scan keys in original input order
* (among each group of entries against the same index attribute).
* This is also the order that the arrays themselves appear in.
*/
so->orderProcs[output_ikey] = so->orderProcs[input_ikey];
/* Fix-up array->scan_key references for arrays */
for (; arrayidx < so->numArrayKeys; arrayidx++)
{
BTArrayKeyInfo *array = &so->arrayKeys[arrayidx];
/*
* All skip arrays must be marked required, and final column can
* never have a skip array
*/
Assert(array->num_elems > 0 || array->num_elems == -1);
Assert(array->num_elems != -1 || outkey->sk_flags & SK_BT_REQFWD);
Assert(array->num_elems != -1 ||
outkey->sk_attno < IndexRelationGetNumberOfKeyAttributes(rel));
if (array->scan_key == input_ikey)
{
/* found it */
array->scan_key = output_ikey;
found = true;
/*
* Transform array scan keys that have exactly 1 element
* remaining (following all prior preprocessing) into
* equivalent non-array scan keys.
*/
if (array->num_elems == 1)
{
outkey->sk_flags &= ~SK_SEARCHARRAY;
outkey->sk_argument = array->elem_values[0];
so->numArrayKeys--;
/* If we're out of array keys, we can quit right away */
if (so->numArrayKeys == 0)
return;
/* Shift other arrays forward */
memmove(array, array + 1,
sizeof(BTArrayKeyInfo) *
(so->numArrayKeys - arrayidx));
/*
* Don't increment arrayidx (there was an entry that was
* just shifted forward to the offset at arrayidx, which
* will still need to be matched)
*/
}
else
{
/*
* Any skip array low_compare and high_compare scan keys
* are now final. Transform the array's > low_compare key
* into a >= key (and < high_compare keys into a <= key).
*/
if (array->num_elems == -1 && array->sksup &&
!array->null_elem)
_bt_skiparray_strat_adjust(scan, outkey, array);
/* Match found, so done with this array */
arrayidx++;
}
break;
}
}
Assert(found);
}
/*
* Parallel index scans require space in shared memory to store the
* current array elements (for arrays kept by preprocessing) to schedule
* the next primitive index scan. The underlying structure is protected
* using an LWLock, so defensively limit its size. In practice this can
* only affect parallel scans that use an incomplete opfamily.
*/
if (scan->parallel_scan && so->numArrayKeys > INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_internal("number of array scan keys left by preprocessing (%d) exceeds the maximum allowed by parallel btree index scans (%d)",
so->numArrayKeys, INDEX_MAX_KEYS)));
}
/*
* _bt_num_array_keys() -- determine # of BTArrayKeyInfo entries
*
* _bt_preprocess_array_keys helper function. Returns the estimated size of
* the scan's BTArrayKeyInfo array, which is guaranteed to be large enough to
* fit every so->arrayKeys[] entry.
*
* Also sets *numSkipArrayKeys_out to the number of skip arrays caller must
* add to the scan keys it'll output. Caller must add this many skip arrays:
* one array for each of the most significant attributes that lack a = input
* key (IS NULL keys count as = input keys here). The specific attributes
* that need skip arrays are indicated by initializing skip_eq_ops_out[] arg
* 0-based attribute offset to a valid = op strategy Oid. We'll only ever set
* skip_eq_ops_out[] entries to InvalidOid for attributes that already have an
* equality key in scan->keyData[] input keys -- and only when there's some
* later "attribute gap" for us to "fill-in" with a skip array.
*
* We're optimistic about skipping working out: we always add exactly the skip
* arrays needed to maximize the number of input scan keys that can ultimately
* be marked as required to continue the scan (but no more). Given a
* multi-column index on (a, b, c, d), we add skip arrays as follows:
*
* Input keys Output keys (after all preprocessing)
* ---------- -------------------------------------
* a = 1 a = 1 (no skip arrays)
* b = 42 skip a AND b = 42
* a = 1 AND b = 42 a = 1 AND b = 42 (no skip arrays)
* a >= 1 AND b = 42 range skip a AND b = 42
* a = 1 AND b > 42 a = 1 AND b > 42 (no skip arrays)
* a >= 1 AND a <= 3 AND b = 42 range skip a AND b = 42
* a = 1 AND c <= 27 a = 1 AND skip b AND c <= 27
* a = 1 AND d >= 1 a = 1 AND skip b AND skip c AND d >= 1
* a = 1 AND b >= 42 AND d > 1 a = 1 AND range skip b AND skip c AND d > 1
*/
static int
_bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,
int *numSkipArrayKeys_out)
{
Relation rel = scan->indexRelation;
AttrNumber attno_skip = 1,
attno_inkey = 1;
bool attno_has_equal = false,
attno_has_rowcompare = false;
int numSAOPArrayKeys,
numSkipArrayKeys,
prev_numSkipArrayKeys;
Assert(scan->numberOfKeys);
/* Initial pass over input scan keys counts the number of SAOP arrays */
numSAOPArrayKeys = 0;
*numSkipArrayKeys_out = prev_numSkipArrayKeys = numSkipArrayKeys = 0;
for (int i = 0; i < scan->numberOfKeys; i++)
{
ScanKey inkey = scan->keyData + i;
if (inkey->sk_flags & SK_SEARCHARRAY)
numSAOPArrayKeys++;
}
#ifdef DEBUG_DISABLE_SKIP_SCAN
/* don't attempt to add skip arrays */
return numSAOPArrayKeys;
#endif
for (int i = 0;; i++)
{
ScanKey inkey = scan->keyData + i;
/*
* Backfill skip arrays for any wholly omitted attributes prior to
* attno_inkey
*/
while (attno_skip < attno_inkey)
{
Oid opfamily = rel->rd_opfamily[attno_skip - 1];
Oid opcintype = rel->rd_opcintype[attno_skip - 1];
/* Look up input opclass's equality operator (might fail) */
skip_eq_ops_out[attno_skip - 1] =
get_opfamily_member(opfamily, opcintype, opcintype,
BTEqualStrategyNumber);
if (!OidIsValid(skip_eq_ops_out[attno_skip - 1]))
{
/*
* Cannot generate a skip array for this or later attributes
* (input opclass lacks an equality strategy operator)
*/
*numSkipArrayKeys_out = prev_numSkipArrayKeys;
return numSAOPArrayKeys + prev_numSkipArrayKeys;
}
/* plan on adding a backfill skip array for this attribute */
numSkipArrayKeys++;
attno_skip++;
}
prev_numSkipArrayKeys = numSkipArrayKeys;
/*
* Stop once past the final input scan key. We deliberately never add
* a skip array for the last input scan key's attribute -- even when
* there are only inequality keys on that attribute.
*/
if (i == scan->numberOfKeys)
break;
/*
* Later preprocessing steps cannot merge a RowCompare into a skip
* array, so stop adding skip arrays once we see one. (Note that we
* can backfill skip arrays before a RowCompare, which will allow keys
* up to and including the RowCompare to be marked required.)
*
* Skip arrays work by maintaining a current array element value,
* which anchors lower-order keys via an implied equality constraint.
* This is incompatible with the current nbtree row comparison design,
* which compares all columns together, as an indivisible group.
* Alternative designs that can be used alongside skip arrays are
* possible, but it's not clear that they're really worth pursuing.
*
* A RowCompare qual "(a, b, c) > (10, 'foo', 42)" is equivalent to
* "(a=10 AND b='foo' AND c>42) OR (a=10 AND b>'foo') OR (a>10)".
* Decomposing this RowCompare into these 3 disjuncts allows each
* disjunct to be executed as a separate "single value" index scan.
* That'll give all 3 scans the ability to add skip arrays in the
* usual way (when there are any scalar keys after the RowCompare).
* Under this scheme, a qual "(a, b, c) > (10, 'foo', 42) AND d = 99"
* performs 3 separate scans, each of which can mark keys up to and
* including its "d = 99" key as required to continue the scan.
*/
if (attno_has_rowcompare)
break;
/*
* Now consider next attno_inkey (or keep going if this is an
* additional scan key against the same attribute)
*/
if (attno_inkey < inkey->sk_attno)
{
/*
* Now add skip array for previous scan key's attribute, though
* only if the attribute has no equality strategy scan keys
*/
if (attno_has_equal)
{
/* Attributes with an = key must have InvalidOid eq_op set */
skip_eq_ops_out[attno_skip - 1] = InvalidOid;
}
else
{
Oid opfamily = rel->rd_opfamily[attno_skip - 1];
Oid opcintype = rel->rd_opcintype[attno_skip - 1];
/* Look up input opclass's equality operator (might fail) */
skip_eq_ops_out[attno_skip - 1] =
get_opfamily_member(opfamily, opcintype, opcintype,
BTEqualStrategyNumber);
if (!OidIsValid(skip_eq_ops_out[attno_skip - 1]))
{
/*
* Input opclass lacks an equality strategy operator, so
* don't generate a skip array that definitely won't work
*/
break;
}
/* plan on adding a backfill skip array for this attribute */
numSkipArrayKeys++;
}
/* Set things up for this new attribute */
attno_skip++;
attno_inkey = inkey->sk_attno;
attno_has_equal = false;
}
/*
* Track if this attribute's scan keys include any equality strategy
* scan keys (IS NULL keys count as equality keys here). Also track
* if it has any RowCompare keys.
*/
if (inkey->sk_strategy == BTEqualStrategyNumber ||
(inkey->sk_flags & SK_SEARCHNULL))
attno_has_equal = true;
if (inkey->sk_flags & SK_ROW_HEADER)
attno_has_rowcompare = true;
}
*numSkipArrayKeys_out = numSkipArrayKeys;
return numSAOPArrayKeys + numSkipArrayKeys;
}
/*
* _bt_find_extreme_element() -- get least or greatest array element
*
* scan and skey identify the index column, whose opfamily determines the
* comparison semantics. strat should be BTLessStrategyNumber to get the
* least element, or BTGreaterStrategyNumber to get the greatest.
*/
static Datum
_bt_find_extreme_element(IndexScanDesc scan, ScanKey skey, Oid elemtype,
StrategyNumber strat,
const Datum *elems, int nelems)
{
Relation rel = scan->indexRelation;
Oid cmp_op;
RegProcedure cmp_proc;
FmgrInfo flinfo;
Datum result;
int i;
/*
* Look up the appropriate comparison operator in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type comparison operators for any datatype that it supports
* at all.
*/
Assert(skey->sk_strategy != BTEqualStrategyNumber);
Assert(OidIsValid(elemtype));
cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
elemtype,
strat);
if (!OidIsValid(cmp_op))
elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
strat, elemtype, elemtype,
rel->rd_opfamily[skey->sk_attno - 1]);
cmp_proc = get_opcode(cmp_op);
if (!RegProcedureIsValid(cmp_proc))
elog(ERROR, "missing oprcode for operator %u", cmp_op);
fmgr_info(cmp_proc, &flinfo);
Assert(nelems > 0);
result = elems[0];
for (i = 1; i < nelems; i++)
{
if (DatumGetBool(FunctionCall2Coll(&flinfo,
skey->sk_collation,
elems[i],
result)))
result = elems[i];
}
return result;
}
/*
* _bt_setup_array_cmp() -- Set up array comparison functions
*
* Sets ORDER proc in caller's orderproc argument, which is used during binary
* searches of arrays during the index scan. Also sets a same-type ORDER proc
* in caller's *sortprocp argument, which is used when sorting the array.
*
* Preprocessing calls here with all equality strategy scan keys (when scan
* uses equality array keys), including those not associated with any array.
* See _bt_advance_array_keys for an explanation of why it'll need to treat
* simple scalar equality scan keys as degenerate single element arrays.
*
* Caller should pass an orderproc pointing to space that'll store the ORDER
* proc for the scan, and a *sortprocp pointing to its own separate space.
* When calling here for a non-array scan key, sortprocp arg should be NULL.
*
* In the common case where we don't need to deal with cross-type operators,
* only one ORDER proc is actually required by caller. We'll set *sortprocp
* to point to the same memory that caller's orderproc continues to point to.
* Otherwise, *sortprocp will continue to point to caller's own space. Either
* way, *sortprocp will point to a same-type ORDER proc (since that's the only
* safe way to sort/deduplicate the array associated with caller's scan key).
*/
static void
_bt_setup_array_cmp(IndexScanDesc scan, ScanKey skey, Oid elemtype,
FmgrInfo *orderproc, FmgrInfo **sortprocp)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;
RegProcedure cmp_proc;
Oid opcintype = rel->rd_opcintype[skey->sk_attno - 1];
Assert(skey->sk_strategy == BTEqualStrategyNumber);
Assert(OidIsValid(elemtype));
/*
* If scankey operator is not a cross-type comparison, we can use the
* cached comparison function; otherwise gotta look it up in the catalogs
*/
if (elemtype == opcintype)
{
/* Set same-type ORDER procs for caller */
*orderproc = *index_getprocinfo(rel, skey->sk_attno, BTORDER_PROC);
if (sortprocp)
*sortprocp = orderproc;
return;
}
/*
* Look up the appropriate cross-type comparison function in the opfamily.
*
* Use the opclass input type as the left hand arg type, and the array
* element type as the right hand arg type (since binary searches use an
* index tuple's attribute value to search for a matching array element).
*
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but only in cases where it's quite likely that _bt_first
* would fail in just the same way (had we not failed before it could).
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
opcintype, elemtype, BTORDER_PROC);
if (!RegProcedureIsValid(cmp_proc))
elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
BTORDER_PROC, opcintype, elemtype, skey->sk_attno,
RelationGetRelationName(rel));
/* Set cross-type ORDER proc for caller */
fmgr_info_cxt(cmp_proc, orderproc, so->arrayContext);
/* Done if caller doesn't actually have an array they'll need to sort */
if (!sortprocp)
return;
/*
* Look up the appropriate same-type comparison function in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type comparison procs for any datatype that it supports at
* all.
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
elemtype, elemtype, BTORDER_PROC);
if (!RegProcedureIsValid(cmp_proc))
elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
BTORDER_PROC, elemtype, elemtype,
skey->sk_attno, RelationGetRelationName(rel));
/* Set same-type ORDER proc for caller */
fmgr_info_cxt(cmp_proc, *sortprocp, so->arrayContext);
}
/*
* _bt_sort_array_elements() -- sort and de-dup array elements
*
* The array elements are sorted in-place, and the new number of elements
* after duplicate removal is returned.
*
* skey identifies the index column whose opfamily determines the comparison
* semantics, and sortproc is a corresponding ORDER proc. If reverse is true,
* we sort in descending order.
*/
static int
_bt_sort_array_elements(ScanKey skey, FmgrInfo *sortproc, bool reverse,
Datum *elems, int nelems)
{
BTSortArrayContext cxt;
if (nelems <= 1)
return nelems; /* no work to do */
/* Sort the array elements */
cxt.sortproc = sortproc;
cxt.collation = skey->sk_collation;
cxt.reverse = reverse;
qsort_arg(elems, nelems, sizeof(Datum),
_bt_compare_array_elements, &cxt);
/* Now scan the sorted elements and remove duplicates */
return qunique_arg(elems, nelems, sizeof(Datum),
_bt_compare_array_elements, &cxt);
}
/*
* _bt_merge_arrays() -- merge next array's elements into an original array
*
* Called when preprocessing encounters a pair of array equality scan keys,
* both against the same index attribute (during initial array preprocessing).
* Merging reorganizes caller's original array (the left hand arg) in-place,
* without ever copying elements from one array into the other. (Mixing the
* elements together like this would be wrong, since they don't necessarily
* use the same underlying element type, despite all the other similarities.)
*
* Both arrays must have already been sorted and deduplicated by calling
* _bt_sort_array_elements. sortproc is the same-type ORDER proc that was
* just used to sort and deduplicate caller's "next" array. We'll usually be
* able to reuse that order PROC to merge the arrays together now. If not,
* then we'll perform a separate ORDER proc lookup.
*
* If the opfamily doesn't supply a complete set of cross-type ORDER procs we
* may not be able to determine which elements are contradictory. If we have
* the required ORDER proc then we return true (and validly set *nelems_orig),
* guaranteeing that at least the next array can be considered redundant. We
* return false if the required comparisons cannot be made (caller must keep
* both arrays when this happens).
*/
static bool
_bt_merge_arrays(IndexScanDesc scan, ScanKey skey, FmgrInfo *sortproc,
bool reverse, Oid origelemtype, Oid nextelemtype,
Datum *elems_orig, int *nelems_orig,
Datum *elems_next, int nelems_next)
{
Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
BTSortArrayContext cxt;
int nelems_orig_start = *nelems_orig,
nelems_orig_merged = 0;
FmgrInfo *mergeproc = sortproc;
FmgrInfo crosstypeproc;
Assert(skey->sk_strategy == BTEqualStrategyNumber);
Assert(OidIsValid(origelemtype) && OidIsValid(nextelemtype));
if (origelemtype != nextelemtype)
{
RegProcedure cmp_proc;
/*
* Cross-array-element-type merging is required, so can't just reuse
* sortproc when merging
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
origelemtype, nextelemtype, BTORDER_PROC);
if (!RegProcedureIsValid(cmp_proc))
{
/* Can't make the required comparisons */
return false;
}
/* We have all we need to determine redundancy/contradictoriness */
mergeproc = &crosstypeproc;
fmgr_info_cxt(cmp_proc, mergeproc, so->arrayContext);
}
cxt.sortproc = mergeproc;
cxt.collation = skey->sk_collation;
cxt.reverse = reverse;
for (int i = 0, j = 0; i < nelems_orig_start && j < nelems_next;)
{
Datum *oelem = elems_orig + i,
*nelem = elems_next + j;
int res = _bt_compare_array_elements(oelem, nelem, &cxt);
if (res == 0)
{
elems_orig[nelems_orig_merged++] = *oelem;
i++;
j++;
}
else if (res < 0)
i++;
else /* res > 0 */
j++;
}
*nelems_orig = nelems_orig_merged;
return true;
}
/*
* qsort_arg comparator for sorting array elements
*/
static int
_bt_compare_array_elements(const void *a, const void *b, void *arg)
{
Datum da = *((const Datum *) a);
Datum db = *((const Datum *) b);
BTSortArrayContext *cxt = (BTSortArrayContext *) arg;
int32 compare;
compare = DatumGetInt32(FunctionCall2Coll(cxt->sortproc,
cxt->collation,
da, db));
if (cxt->reverse)
INVERT_COMPARE_RESULT(compare);
return compare;
} | c | github | https://github.com/postgres/postgres | src/backend/access/nbtree/nbtpreprocesskeys.c |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new orders. To determine which orders exist, run
get_all_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201206')
# Set advertiser (company), salesperson, and trafficker to assign to each order.
advertiser_id = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
salesperson_id = 'INSERT_SALESPERSON_ID_HERE'
trafficker_id = 'INSERT_TRAFFICKER_ID_HERE'
# Create order objects.
orders = []
for i in xrange(5):
order = {
'name': 'Order #%s' % Utils.GetUniqueName(),
'advertiserId': advertiser_id,
'salespersonId': salesperson_id,
'traffickerId': trafficker_id
}
orders.append(order)
# Add orders.
orders = order_service.CreateOrders(orders)
# Display results.
for order in orders:
print ('Order with id \'%s\' and name \'%s\' was created.'
% (order['id'], order['name'])) | unknown | codeparrot/codeparrot-clean | ||
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core import mail
from django.core.urlresolvers import reverse
import json
from django.contrib.auth.models import User
class UserRelatedTests(TestCase):
def create_sample_user(self):
instance = User.objects.create(
username = 'test',
first_name = 'Foo',
last_name = 'Bar',
email = 'foo@bar.com',
is_active = True,
is_staff = False,
is_superuser = False
)
instance.set_password("fooo")
instance.save()
return instance
def tearDown(self):
mail.outbox = []
self.client.logout()
User.objects.all().delete()
def test_simple_register_and_activate(self):
register_params = {
'username': 'test',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foo@bar.com',
'password': '123123',
'password2': '123123',
}
register_url = reverse('register')
response = self.client.post(register_url, register_params)
self.assertEqual(response.status_code, 302)
user = User.objects.get(username='test')
self.assertTrue(user.get_profile())
self.assertTrue(user.has_usable_password())
self.assertFalse(user.is_active)
# expected send 1 email
self.assertEqual(len(mail.outbox), 1)
activate_url = reverse('activate', args=[user.get_profile().token])
response = self.client.get(activate_url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [('http://testserver/login/', 302)])
user = User.objects.get(username='test')
self.assertTrue(user.is_active)
self.assertEqual(user.get_profile().token, None)
def test_profile_view(self):
user = self.create_sample_user()
url = reverse("profile")
ok = self.client.login(username='test', password='fooo')
self.assertTrue(ok)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_login(self):
instance = self.create_sample_user()
post_params = {
'username': instance.username,
'password': 'fooo',
}
login_url = reverse('login')
response = self.client.post(login_url, post_params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
jdata = json.loads(response.content)
self.assertTrue(jdata['valid'])
def test_password_change(self):
user = self.create_sample_user()
change_password_url = reverse('profile-password')
post_params = {
'password': 'fofo',
'password2': 'fofo',
}
response = self.client.post(change_password_url, post_params, follow=True)
self.assertEqual(response.redirect_chain, [('http://testserver/login/', 302)])
ok = self.client.login(username='test', password='fooo')
self.assertTrue(ok)
response = self.client.post(change_password_url, post_params, follow=True)
self.assertEqual(response.redirect_chain, [('http://testserver/profile/', 302)])
def test_profile_form(self):
user = self.create_sample_user()
post_params = {
'first_name': 'Caco',
}
profile_url = reverse('profile')
response = self.client.post(profile_url, post_params, follow=True)
self.assertEqual(response.redirect_chain, [('http://testserver/login/', 302)])
ok = self.client.login(username='test', password='fooo')
self.assertTrue(ok)
response = self.client.post(profile_url, post_params, follow=True)
self.assertEqual(response.redirect_chain, [('http://testserver/profile/', 302)])
user = User.objects.get(pk=user.id)
self.assertEqual(user.first_name, 'Caco')
self.assertEqual(user.last_name, '')
self.assertEqual(user.username, 'test') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRelation.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '07/10/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsRelation,
QgsGeometry,
QgsPoint,
QgsMapLayerRegistry
)
from qgis.testing import start_app, unittest
start_app()
def createReferencingLayer():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=foreignkey:integer",
"referencinglayer", "memory")
pr = layer.dataProvider()
f1 = QgsFeature()
f1.setFields(layer.pendingFields())
f1.setAttributes(["test1", 123])
f1.setGeometry(QgsGeometry.fromPoint(QgsPoint(100, 200)))
f2 = QgsFeature()
f2.setFields(layer.pendingFields())
f2.setAttributes(["test2", 123])
f2.setGeometry(QgsGeometry.fromPoint(QgsPoint(101, 201)))
assert pr.addFeatures([f1, f2])
return layer
def createReferencedLayer():
layer = QgsVectorLayer(
"Point?field=x:string&field=y:integer&field=z:integer",
"referencedlayer", "memory")
pr = layer.dataProvider()
f1 = QgsFeature()
f1.setFields(layer.pendingFields())
f1.setAttributes(["foo", 123, 321])
f1.setGeometry(QgsGeometry.fromPoint(QgsPoint(1, 1)))
f2 = QgsFeature()
f2.setFields(layer.pendingFields())
f2.setAttributes(["bar", 456, 654])
f2.setGeometry(QgsGeometry.fromPoint(QgsPoint(2, 2)))
f3 = QgsFeature()
f3.setFields(layer.pendingFields())
f3.setAttributes(["foobar", 789, 554])
f3.setGeometry(QgsGeometry.fromPoint(QgsPoint(2, 3)))
assert pr.addFeatures([f1, f2, f3])
return layer
def formatAttributes(attrs):
return repr([str(a) for a in attrs])
class TestQgsRelation(unittest.TestCase):
def setUp(self):
self.referencedLayer = createReferencedLayer()
self.referencingLayer = createReferencingLayer()
QgsMapLayerRegistry.instance().addMapLayers([self.referencedLayer, self.referencingLayer])
def tearDown(self):
QgsMapLayerRegistry.instance().removeAllMapLayers()
def test_isValid(self):
rel = QgsRelation()
assert not rel.isValid()
rel.setRelationId('rel1')
assert not rel.isValid()
rel.setRelationName('Relation Number One')
assert not rel.isValid()
rel.setReferencingLayer(self.referencingLayer.id())
assert not rel.isValid()
rel.setReferencedLayer(self.referencedLayer.id())
assert not rel.isValid()
rel.addFieldPair('foreignkey', 'y')
assert rel.isValid()
def test_getRelatedFeatures(self):
rel = QgsRelation()
rel.setRelationId('rel1')
rel.setRelationName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
feat = next(self.referencedLayer.getFeatures())
self.assertEqual(rel.getRelatedFeaturesFilter(feat), '"foreignkey" = 123')
it = rel.getRelatedFeatures(feat)
assert [a.attributes() for a in it] == [['test1', 123], ['test2', 123]]
def test_getReferencedFeature(self):
rel = QgsRelation()
rel.setRelationId('rel1')
rel.setRelationName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
feat = next(self.referencingLayer.getFeatures())
f = rel.getReferencedFeature(feat)
assert f.isValid()
assert f[0] == 'foo'
def test_fieldPairs(self):
rel = QgsRelation()
rel.setRelationId('rel1')
rel.setRelationName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
assert (rel.fieldPairs() == {'foreignkey': 'y'})
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) Stephane Wirtel
# Copyright (C) 2011 Nicolas Vanhoren
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################
from .main import * | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instance should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name, 'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts.set([a1, a2])
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num']) | unknown | codeparrot/codeparrot-clean | ||
"""Taken and modified from the dbsettings project.
http://code.google.com/p/django-values/
"""
from decimal import Decimal
from django import forms
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import cache
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.files import storage
from askbot.deps.livesettings.models import find_setting, LongSetting, Setting, SettingNotSet
from askbot.deps.livesettings.overrides import get_overrides
from askbot.deps.livesettings.utils import load_module, is_string_like, is_list_or_tuple
from askbot.deps.livesettings.widgets import ImageInput
import datetime
import logging
import signals
import os
__all__ = ['BASE_GROUP', 'BASE_SUPER_GROUP', 'ConfigurationGroup', 'Value', 'BooleanValue',
'DecimalValue', 'DurationValue',
'FloatValue', 'IntegerValue', 'ModuleValue', 'PercentValue', 'PositiveIntegerValue', 'SortedDotDict',
'StringValue', 'SuperGroup', 'ImageValue', 'LongStringValue', 'MultipleStringValue', 'URLValue']
_WARN = {}
log = logging.getLogger('configuration')
NOTSET = object()
class SortedDotDict(SortedDict):
def __getattr__(self, key):
try:
return self[key]
except:
raise AttributeError, key
def __iter__(self):
vals = self.values()
for k in vals:
yield k
def values(self):
vals = super(SortedDotDict, self).values()
vals = [v for v in vals if isinstance(v, (ConfigurationGroup, Value))]
vals.sort()
return vals
class SuperGroup(object):
"""Aggregates ConfigurationGroup's into super-groups
that are used only for the presentation in the UI"""
def __init__(self, name, ordering = 0):
self.name = name
self.ordering = ordering
self.groups = list()
def append(self, group):
"""adds instance of :class:`ConfigurationGroup`
to the super group
"""
if group not in self.groups:
self.groups.append(group)
BASE_SUPER_GROUP = SuperGroup(ugettext_lazy('Main'))
class ConfigurationGroup(SortedDotDict):
"""A simple wrapper for a group of configuration values"""
def __init__(self, key, name, *args, **kwargs):
"""Create a new ConfigurationGroup.
Arguments:
- key
- group name - for display to user
Named Arguments:
- ordering: integer, optional, defaults to 1.
- requires: See `Value` requires. The default `requires` all member values will have if not overridden.
- requiresvalue: See `Values` requires_value. The default `requires_value` if not overridden on the `Value` objects.
"""
self.key = key
self.name = name
self.ordering = kwargs.pop('ordering', 1)
self.requires = kwargs.pop('requires', None)
self.super_group = kwargs.pop('super_group', BASE_SUPER_GROUP)
self.super_group.append(self)
if self.requires:
reqval = kwargs.pop('requiresvalue', key)
if not is_list_or_tuple(reqval):
reqval = (reqval, reqval)
self.requires_value = reqval[0]
self.requires.add_choice(reqval)
super(ConfigurationGroup, self).__init__(*args, **kwargs)
def __cmp__(self, other):
return cmp((self.ordering, self.name), (other.ordering, other.name))
def __eq__(self, other):
return (type(self) == type(other)
and self.ordering == other.ordering
and self.name == other.name)
def __ne__(self, other):
return not self == other
def dict_values(self, load_modules=True):
vals = {}
keys = super(ConfigurationGroup, self).keys()
for key in keys:
v = self[key]
if isinstance(v, Value):
value = v.value
else:
value = v
vals[key] = value
return vals
def values(self):
vals = super(ConfigurationGroup, self).values()
return [v for v in vals if v.enabled()]
BASE_GROUP = ConfigurationGroup(
'BASE',
ugettext_lazy('Base Settings'),
ordering=0
)
class Value(object):
creation_counter = 0
def __init__(self, group, key, **kwargs):
"""
Create a new Value object for configuration.
Args:
- `ConfigurationGroup`
- key - a string key
Named arguments:
- `description` - Will be passed to the field for form usage. Should be a translation proxy. Ex: _('example')
- `help_text` - Will be passed to the field for form usage.
- `choices` - If given, then the form field will use a select box
- `ordering` - Defaults to alphabetical by key if not given.
- `requires` - If given as a `Value`, then this field will only be rendered if that Value evaluates true (for Boolean requires) or the proper key is in the associated value.
- `requiresvalue` - If set, then this field will only be rendered if that value is in the list returned by self.value. Defaults to self.key.
- `hidden` - If true, then render a hidden field.
- `default` - If given, then this Value will return that default whenever it has no assocated `Setting`.
- `update_callback` - if given, then this value will call the callback whenever updated
- `clear_cache` - if `True` - clear all the caches on updates
"""
self.group = group
self.key = key
self.description = kwargs.get('description', None)
self.help_text = kwargs.get('help_text')
self.choices = kwargs.get('choices',[])
self.ordering = kwargs.pop('ordering', 0)
self.hidden = kwargs.pop('hidden', False)
self.update_callback = kwargs.pop('update_callback', None)
self.requires = kwargs.pop('requires', None)
self.clear_cache = kwargs.pop('clear_cache', False)
if self.requires:
reqval = kwargs.pop('requiresvalue', key)
if not is_list_or_tuple(reqval):
reqval = (reqval, reqval)
self.requires_value = reqval[0]
self.requires.add_choice(reqval)
elif group.requires:
self.requires = group.requires
self.requires_value = group.requires_value
if kwargs.has_key('default'):
self.default = kwargs.pop('default')
self.use_default = True
else:
self.use_default = False
self.creation_counter = Value.creation_counter
Value.creation_counter += 1
def __cmp__(self, other):
return cmp((self.ordering, self.description, self.creation_counter), (other.ordering, other.description, other.creation_counter))
def __eq__(self, other):
if type(self) == type(other):
return self.value == other.value
else:
return self.value == other
def __iter__(self):
return iter(self.value)
def __unicode__(self):
return unicode(self.value)
def __str__(self):
return str(self.value)
def add_choice(self, choice):
"""Add a choice if it doesn't already exist."""
if not is_list_or_tuple(choice):
choice = (choice, choice)
skip = False
for k, v in self.choices:
if k == choice[0]:
skip = True
break
if not skip:
self.choices += (choice, )
def choice_field(self, **kwargs):
if self.hidden:
kwargs['widget'] = forms.MultipleHiddenInput()
return forms.ChoiceField(choices=self.choices, **kwargs)
def _choice_values(self):
choices = self.choices
vals = self.value
return [x for x in choices if x[0] in vals]
choice_values = property(fget=_choice_values)
def copy(self):
new_value = self.__class__(self.key)
new_value.__dict__ = self.__dict__.copy()
return new_value
def _default_text(self):
if not self.use_default:
note = ""
else:
if self.default == "":
note = _('Default value: ""')
elif self.choices:
work = []
for x in self.choices:
if x[0] in self.default:
work.append(smart_str(x[1]))
note = _('Default value: ') + ", ".join(work)
else:
note = _("Default value: %s") % unicode(self.default)
return note
default_text = property(fget=_default_text)
def enabled(self):
enabled = False
try:
if not self.requires:
enabled = True
else:
v = self.requires.value
if self.requires.choices:
enabled = self.requires_value == v or self.requires_value in v
elif v:
enabled = True
except SettingNotSet:
pass
return enabled
def make_field(self, **kwargs):
if self.choices:
if self.hidden:
kwargs['widget'] = forms.MultipleHiddenInput()
field = self.choice_field(**kwargs)
else:
if self.hidden:
kwargs['widget'] = forms.HiddenInput()
field = self.field(**kwargs)
field.group = self.group
field.default_text = self.default_text
return field
def make_setting(self, db_value):
log.debug('new setting %s.%s', self.group.key, self.key)
return Setting(group=self.group.key, key=self.key, value=db_value)
def _setting(self):
return find_setting(self.group.key, self.key)
setting = property(fget = _setting)
def _value(self):
use_db, overrides = get_overrides()
if not use_db:
try:
val = overrides[self.group.key][self.key]
except KeyError:
if self.use_default:
val = self.default
else:
raise SettingNotSet('%s.%s is not in your LIVESETTINGS_OPTIONS' % (self.group.key, self.key))
else:
try:
val = self.setting.value
except SettingNotSet, sns:
if self.use_default:
val = self.default
if overrides:
# maybe override the default
grp = overrides.get(self.group.key, {})
if grp.has_key(self.key):
val = grp[self.key]
else:
val = NOTSET
except AttributeError, ae:
log.error("Attribute error: %s", ae)
log.error("%s: Could not get _value of %s", self.key, self.setting)
raise(ae)
except Exception, e:
global _WARN
log.error(e)
if str(e).find("configuration_setting") > -1:
if not _WARN.has_key('configuration_setting'):
log.warn('Error loading setting %s.%s from table, OK if you are in syncdb', self.group.key, self.key)
_WARN['configuration_setting'] = True
if self.use_default:
val = self.default
else:
raise ImproperlyConfigured("All settings used in startup must have defaults, %s.%s does not", self.group.key, self.key)
else:
import traceback
traceback.print_exc()
log.warn("Problem finding settings %s.%s, %s", self.group.key, self.key, e)
raise SettingNotSet("Startup error, couldn't load %s.%s" %(self.group.key, self.key))
return val
def update(self, value):
use_db, overrides = get_overrides()
if use_db:
current_value = self.value
new_value = self.to_python(value)
if current_value != new_value:
if self.update_callback:
new_value = apply(self.update_callback, (current_value, new_value))
db_value = self.get_db_prep_save(new_value)
try:
s = self.setting
s.value = db_value
except SettingNotSet:
s = self.make_setting(db_value)
if self.use_default and self.default == new_value:
if s.id:
log.info("Deleted setting %s.%s", self.group.key, self.key)
s.delete()
else:
log.info("Updated setting %s.%s = %s", self.group.key, self.key, value)
s.save()
signals.configuration_value_changed.send(self, old_value=current_value, new_value=new_value, setting=self)
if self.clear_cache:
cache.clear()
return True
else:
log.debug('not updating setting %s.%s - askbot.deps.livesettings db is disabled',self.group.key, self.key)
return False
@property
def value(self):
val = self._value()
return self.to_python(val)
@property
def editor_value(self):
val = self._value()
return self.to_editor(val)
# Subclasses should override the following methods where applicable
def to_python(self, value):
"Returns a native Python object suitable for immediate use"
if value == NOTSET:
value = None
return value
def get_db_prep_save(self, value):
"Returns a value suitable for storage into a CharField"
if value == NOTSET:
value = ""
return unicode(value)
def to_editor(self, value):
"Returns a value suitable for display in a form widget"
if value == NOTSET:
return NOTSET
return unicode(value)
###############
# VALUE TYPES #
###############
class BooleanValue(Value):
class field(forms.BooleanField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.BooleanField.__init__(self, *args, **kwargs)
def add_choice(self, choice):
# ignore choice adding for boolean types
pass
def to_python(self, value):
if value in (True, 't', 'True', 1, '1'):
return True
return False
to_editor = to_python
class DecimalValue(Value):
class field(forms.DecimalField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.DecimalField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value==NOTSET:
return Decimal("0")
try:
return Decimal(value)
except TypeError, te:
log.warning("Can't convert %s to Decimal for settings %s.%s", value, self.group.key, self.key)
raise TypeError(te)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
# DurationValue has a lot of duplication and ugliness because of issue #2443
# Until DurationField is sorted out, this has to do some extra work
class DurationValue(Value):
class field(forms.CharField):
def clean(self, value):
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' % datetime.timedelta.max)
def to_python(self, value):
if value == NOTSET:
value = 0
if isinstance(value, datetime.timedelta):
return value
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' % datetime.timedelta.max)
def get_db_prep_save(self, value):
if value == NOTSET:
return NOTSET
else:
return unicode(value.days * 24 * 3600 + value.seconds + float(value.microseconds) / 1000000)
class FloatValue(Value):
class field(forms.FloatField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.FloatField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = 0
return float(value)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class IntegerValue(Value):
class field(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.IntegerField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = 0
return int(value)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class PercentValue(Value):
class field(forms.DecimalField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.DecimalField.__init__(self, 100, 0, 5, 2, *args, **kwargs)
class widget(forms.TextInput):
def render(self, *args, **kwargs):
# Place a percent sign after a smaller text field
attrs = kwargs.pop('attrs', {})
attrs['size'] = attrs['max_length'] = 6
return forms.TextInput.render(self, attrs=attrs, *args, **kwargs) + '%'
def to_python(self, value):
if value == NOTSET:
value = 0
return Decimal(value) / 100
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class PositiveIntegerValue(IntegerValue):
class field(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['min_value'] = 0
forms.IntegerField.__init__(self, *args, **kwargs)
class StringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = ""
return unicode(value)
to_editor = to_python
class URLValue(Value):
class field(forms.URLField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.URLField.__init__(self, *args, **kwargs)
class LongStringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
kwargs['widget'] = forms.Textarea()
forms.CharField.__init__(self, *args, **kwargs)
def make_setting(self, db_value):
log.debug('new long setting %s.%s', self.group.key, self.key)
return LongSetting(group=self.group.key, key=self.key, value=db_value)
def to_python(self, value):
if value == NOTSET:
value = ""
return unicode(value)
to_editor = to_python
class ImageValue(StringValue):
def __init__(self, *args, **kwargs):
self.allowed_file_extensions = kwargs.pop(
'allowed_file_extensions',
('jpg', 'gif', 'png')
)
self.upload_directory = kwargs.pop(
'upload_directory',
django_settings.MEDIA_ROOT
)
self.upload_url = kwargs.pop(
'upload_url',
django_settings.MEDIA_URL
)
self.url_resolver = kwargs.pop('url_resolver', None)
super(ImageValue, self).__init__(*args, **kwargs)
class field(forms.FileField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
self.allowed_file_extensions = kwargs.pop('allowed_file_extensions')
url_resolver = kwargs.pop('url_resolver')
kwargs['widget'] = ImageInput(url_resolver = url_resolver)
forms.FileField.__init__(self, *args, **kwargs)
def clean(self, file_data, initial=None):
if not file_data and initial:
return initial
(base_name, ext) = os.path.splitext(file_data.name)
#first character in ext is .
if ext[1:].lower() not in self.allowed_file_extensions:
error_message = _('Allowed image file types are %(types)s') \
% {'types': ', '.join(self.allowed_file_extensions)}
raise forms.ValidationError(error_message)
def make_field(self, **kwargs):
kwargs['url_resolver'] = self.url_resolver
kwargs['allowed_file_extensions'] = self.allowed_file_extensions
return super(StringValue, self).make_field(**kwargs)
def update(self, uploaded_file):
"""uploaded_file is an instance of
django UploadedFile object
"""
#0) initialize file storage
file_storage_class = storage.get_storage_class()
storage_settings = {}
if django_settings.DEFAULT_FILE_STORAGE == \
'django.core.files.storage.FileSystemStorage':
storage_settings = {
'location': self.upload_directory,
'base_url': self.upload_url
}
file_storage = file_storage_class(**storage_settings)
#1) come up with a file name
#todo: need better function here to calc name
file_name = file_storage.get_available_name(uploaded_file.name)
file_storage.save(file_name, uploaded_file)
url = file_storage.url(file_name)
old_file = self.value
old_file = old_file.replace(self.upload_url, '', 1)
old_file_path = os.path.join(self.upload_directory, old_file)
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
#saved file path is relative to the upload_directory
#so that things could be easily relocated
super(ImageValue, self).update(url)
class MultipleStringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def choice_field(self, **kwargs):
kwargs['required'] = False
return forms.MultipleChoiceField(choices=self.choices, **kwargs)
def get_db_prep_save(self, value):
if is_string_like(value):
value = [value]
return simplejson.dumps(value)
def to_python(self, value):
if not value or value == NOTSET:
return []
if is_list_or_tuple(value):
return value
else:
try:
return simplejson.loads(value)
except:
if is_string_like(value):
return [value]
else:
log.warning('Could not decode returning empty list: %s', value)
return []
to_editor = to_python
class ModuleValue(Value):
"""Handles setting modules, storing them as strings in the db."""
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def load_module(self, module):
"""Load a child module"""
value = self._value()
if value == NOTSET:
raise SettingNotSet("%s.%s", self.group.key, self.key)
else:
return load_module("%s.%s" % (value, module))
def to_python(self, value):
if value == NOTSET:
v = {}
else:
v = load_module(value)
return v
def to_editor(self, value):
if value == NOTSET:
value = ""
return value | unknown | codeparrot/codeparrot-clean | ||
from distutils import log, dir_util
import os, sys
from setuptools import Command
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
self.install_layout = None
self.prefix_option = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
self.set_undefined_options('install',('install_layout','install_layout'))
if sys.hexversion > 0x2060000:
self.set_undefined_options('install',('prefix_option','prefix_option'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError(
"unknown value for --install-layout")
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
elif self.prefix_option or 'real_prefix' in sys.__dict__:
# don't modify for virtualenv
pass
else:
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth'
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
_nspkg_tmpl = (
"import sys, types, os",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
@classmethod
def _gen_nspkg_line(cls, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
tmpl_lines = cls._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += cls._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
nsp = set()
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp.add('.'.join(pkg))
pkg.pop()
return sorted(nsp) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
class report_stock_lines_date(osv.osv):
_name = "report.stock.lines.date"
_description = "Dates of Inventories and latest Moves"
_auto = False
_order = "date"
_columns = {
'id': fields.integer('Product Id', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True, select=True),
'date': fields.datetime('Date of latest Inventory', readonly=True),
'move_date': fields.datetime('Date of latest Stock Move', readonly=True),
"active": fields.boolean("Active", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_stock_lines_date')
cr.execute("""
create or replace view report_stock_lines_date as (
select
p.id as id,
p.id as product_id,
max(s.date) as date,
max(m.date) as move_date,
p.active as active
from
product_product p
left join (
stock_inventory_line l
inner join stock_inventory s on (l.inventory_id=s.id and s.state = 'done')
) on (p.id=l.product_id)
left join stock_move m on (m.product_id=p.id and m.state = 'done')
group by p.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# The LLVM/Offload Subproject
The Offload subproject aims at providing tooling, runtimes, and APIs that allow
users to execute code on accelerators or other "co-processors" that may or may
not match the architecture of their "host". In the long run, all kinds of
targets are in scope of this effort, including but not limited to: CPUs, GPUs,
FPGAs, AI/ML accelerators, distributed resources, etc.
For OpenMP offload users, the project is ready and fully usable.
The final API design is still under development. More content will show up here
and on our webpage soon. In the meantime, people are encouraged to participate
in our meetings (see below) and check our
[development board](https://github.com/orgs/llvm/projects/24/) as well as the
discussions on [Discourse](https://discourse.llvm.org/tag/offload).
# Meetings
Every second Wednesday, 7:00 - 8:00am PT, starting Jan 24, 2024.
Alternates with the OpenMP in LLVM meeting.
[invite.ics](https://drive.google.com/file/d/1AYwKdnM01aV9Gv9k435ArEAhn7PAer7z/view?usp=sharing)
[Meeting Minutes and Agenda](https://docs.google.com/document/d/1PAeEshxHCv22JDBCPA9GXGggLp0t7rsnD_jL04MBbzw/edit?usp=sharing) | unknown | github | https://github.com/llvm/llvm-project | offload/README.md |
#!/usr/bin/python3
# example call
# ./master.py /dev/ttyUSB0 /dev/ttyUSB1
## python system imports
import csv
import threading
import queue
import time
import sys
import os.path
import AEMmailer
from casyncosc import SerialServer
## diskSpaceLimit : in MB, when limit reached, processing halts
diskSpaceLimit = 100 # MB
class WriterThread(threading.Thread):
def __init__(self, name, q, lock,fileLockDict, syncTimeFunc, dataDir): #,syncTimeFunc):
"""
Consumer class instanciation:
The Writer thread implements the consumer in the producer/consumer paradigm.
The number of threads created is not known by the individual threads. They
simply pop things off the shared thread-safe queue, extract the data, and
write to the correct data (csv) file, creating it and the data directory if needed.
The file names for the data files are created from the data elts themselves, in combination
with the information obtained by callingthe syncTimeFunc provided as argument.
Data file names are used as keys in the shared fileLockDict, whre the values are semaphores
ensuring unique access to each file.
@param self
@param name a string naming the thread, legacy but left to allow for easier debugging
@param q the thread-safe q which will be popped to get data
@param lock the semaphore ensure unique access to the fileLockDict
@param dataDir the path to the data directory
@param syncTimeFunc a function that will be called to get the synch time to be used in naming the
data files.
"""
threading.Thread.__init__(self)
## for file name lookup
self.fileLockDict = fileLockDict
## string name of the thread, used for debugging or information messages
self.name = name
## work q, source of data to be written to files
self.q = q
## semaphore for exclusive access to the fileLockDict
self.dictLock = lock
## semaphore locking access to the file currently being written
self.fileLock = None
## path to the data file target directory
self.dataDir = dataDir
self.dictLock.acquire()
if not os.path.exists(dataDir):
os.makedirs(dataDir)
self.dictLock.release()
## function which when called will return the synchronisation time of the boards
self.getSynchTimeFunc = syncTimeFunc
def getFormattedRow(self,row):
"""
Formats a row by rounding the float values to 4 decimals
@param row the data row,
@return the row as a list ready to be written to the csv data file.
"""
row[2]= round(row[2],4)
return row[1:3]
def createDataFile(self,outFile):
"""
Called to create the data csv file and write the header row.
"""
headers = ('Timestamp','Value')
with open(outFile, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
#print(headers)
writer.writerow(headers)
print('created file:',outFile)
def decodeADCCID(self,coded):
"""
Decodes the ADC and Channel values which were encoded onto a single byte such that
the top 4 bits are the ADC id, and the lower four are the channel id.
@param coded a single byte containing the encoded values
@return a list as decoded into [ADC_ID, Channel_ID]
"""
maskR = 0b1111
return [((coded>>4)& 0b1111), coded & 0b1111]
def getFile(self,row):
"""
the filename is computed, then the dictLock is acquired to access the fileLockDict and
get a lock for the data file, creating the lock if needed.
the dictLock is released and the fileLock is aquire before returning
@param row the data to be used to get the file name
"""
[adcID,chID] = self.decodeADCCID(row[0])
filename = self.dataDir + '/{bid}_{adc}_{cid}_{syc}.csv'.format(bid=row[3],adc=adcID,cid=chID,syc=self.getSynchTimeFunc())
#print('getting filenam',filename)
self.dictLock.acquire()
try:
self.fileLock = self.fileLockDict[filename]
except KeyError:
self.fileLock = threading.Lock()
self.fileLockDict[filename] = self.fileLock
if not os.path.exists(filename):
self.createDataFile(filename)
self.dictLock.release()
self.fileLock.acquire()
return filename
def releaseFile(self,filename):
"""
Simply releases the fileLock
"""
self.fileLock.release()
def do_work(self,thing):
"""
This method handles the 'consuming' of the thing popped from the queue.
After obtaining a lock for the appropriate data file, a csv row is written,
the lock is released.
@param the object popped from the queue, in our case a list of length 4:
ADCCID,Timestamp,value, Board_ID
"""
filename = self.getFile(thing)
#print('writing a row',thing)
with open(filename, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.getFormattedRow(thing))
self.releaseFile(filename)
def run(self):
"""
Thread run method. pops the queue, sends the popped thing to be consumed.
if a None value is popped, the method exits properply and the thread ends.
"""
try:
while True:
item = self.q.get()
#print(item)
if item is None:
break
self.do_work(item)
self.q.task_done()
except Exception as e:
print(e)
print('thread exiting...')
finally:
self.q.task_done()
class ReaderThread(threading.Thread):
def __init__(self, q, stopEv, mailerFunc, portT):
"""
Consumer class instanciation:
The Writer thread implements the consumer in the producer/consumer paradigm.
The number of threads created is not known by the individual threads. They
simply pop things off the shared thread-safe queue, extract the data, and
write to the correct data (csv) file, creating it and the data directory if needed.
The file names for the data files are created from the data elts themselves, in combination
with the information obtained by callingthe syncTimeFunc provided as argument.
Data file names are used as keys in the shared fileLockDict, whre the values are semaphores
ensuring unique access to each file.
@param self
@param portT a string naming serial port
@param q the thread-safe q which will be popped to get data
@param lock the semaphore ensure unique access to the fileLockDict
@param dataDir the path to the data directory
@param syncTimeFunc a function that will be called to get the synch time to be used in naming the
data files.
"""
threading.Thread.__init__(self)
## string name of the thread, used for debugging or information messages
self.server = SerialServer(portT,stopEv,q,mailerFunc)
## event to set when disk space runs out we exit
self.stopEvent = stopEv
print('Reader created on port: ',portT)
def run(self):
"""
Thread run method. pops the queue, sends the popped thing to be consumed.
if a None value is popped, the method exits properply and the thread ends.
"""
try:
self.server.serve()
except KeyboardInterrupt:
self.stopEvent.set()
###### Master Class CODE ##################
class Master:
def __init__(self, ports = ['/dev/ttyUSB0'], nbThreads=4, dataDir='./DATA'):
"""
Constructor for class Master, implements the start and Producer part of
the Producer/Consumer paradigm for parallel processing.
At instanciation,
a FIFO, thread-safe queue is created which will be where the producer puts
his production and where the consumers get it.
A semaphore (threading.Lock) object is created to ensure unicity of access
the the fileLockDict used by all the consumer threads.
An insnce of the spiCommsMgr it created.
Finally the conusmer threads are created.
@param self,
@param nbThreads defaults to the initializer
@param dataDir default value provided, the directory where the conusmers
will write the csv files.
"""
## file lock dire for writer threads
## keys: full filename
## values: threading.lock object used to guarranty exclusive access to the file for writing
self.fileLockDict = {}
## Directory for data files
self.dataDir = dataDir
## Synchronized work queue
self.q = queue.Queue()
## Semaphore object to be passed to consumer threads for their use
self.lock = threading.Lock()
self.stopEvent = threading.Event()
self.stopEvent.clear()
self.q = queue.Queue()
try:
self.mailer = AEMmailer.AEMMailer()
except AEMmailer.NoPasswordException:
print("No password provided; no mail will be sent...")
self.mailer = None
self.sendMsg("AEM session started!")
self.startTime = time.strftime('%Y_%m_%d_%H.%M.%S', time.localtime())
self.createWriterThreads(nbThreads)
self.createReaderThreads(ports)
def getSyncTime(self):
return self.startTime
def sendMsg(self,msg):
if self.mailer:
self.mailer.connectAndSend(msg)
print(msg)
def createReaderThreads(self,portLis):
self.readerThreads=[]
for port in portLis:
reader = ReaderThread(self.q,self.stopEvent,self.sendMsg,port)
reader.start()
self.readerThreads.append(reader)
def createWriterThreads(self,num):
"""
Creates the number of consumer threads according to the argument.
@param num the number of threads to create
"""
self.writerThreads = []
for i in range(num):
name='WriterThread-' + str(i)
t = WriterThread(name,self.q,self.lock,self.fileLockDict, self.getSyncTime, self.dataDir)
t.start()
self.writerThreads.append(t)
def stopAll(self):
"""
Called at the end of a run, it allows all the consumer threads to exit properly
"""
# block until all tasks are done
print('Shutting down all threads...')
self.stopEvent.set()
self.q.join()
# stop workers
for t in self.writerThreads:
self.q.put(None)
threadCounter = 0
for t in self.writerThreads + self.readerThreads:
#print('Thread', threadCounter,' shut down...')
#threadCounter+=1
t.join()
print('All threads shut down, exiting...')
time.sleep(0.01)
def diskSpaceLimitReached(self):
st = os.statvfs(os.getcwd())
free = st.f_bavail * st.f_frsize
diskFreeMB = free / 1000000
res = diskFreeMB <= diskSpaceLimit
if res:
print('Disk Space Limit Reached :',diskSpaceLimit,'MB')
self.sendMsg('Disk Space Limit ' + str(diskSpaceLimit) + ' MB reached!')
return res
def run(self):
"""
called to start a data run, after the consumer threads have been started.
will display the elapsed time on exit.
To exit, ctrl-c will be handled properly.
"""
startTime = time.time()
try:
while True:
if self.diskSpaceLimitReached():
self.stopEvent.set()
break
time.sleep(10)
except:
pass
finally:
self.stopAll()
elapsedTime = round(time.time()-startTime)
print('Elapsed Time :', elapsedTime, 'seconds')
self.sendMsg('Shutting down!\nElapsed Time : ' + str(elapsedTime) + ' seconds.')
if __name__ == '__main__':
if any(['-h' in sys.argv, '--h' in sys.argv, '--help' in sys.argv]):
print('Usage: $ ./master.py <ports defaults to /dev/ttyUSB0>' )
print('examples;')
print('Usage: $ ./master.py # uses default port /dev/ttyUSB00' )
print('Usage: $ ./master.py /dev/ttyUSB0 # same as previous' )
print('Usage: $ ./master.py /dev/ttyACM0 /dev/ttyUSB0 # use these ports')
print('Note: the AEM board must be running the appropriate software')
sys.exit(0)
if len(sys.argv) < 2:
## instance of Master class running the entire show!
master = Master()
else:
master = Master(sys.argv[1:])
master.run() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.exceptions;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Mockito.mock;
import java.lang.reflect.Field;
import java.util.Collections;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.exceptions.base.MockitoException;
import org.mockito.exceptions.verification.NoInteractionsWanted;
import org.mockito.exceptions.verification.TooFewActualInvocations;
import org.mockito.exceptions.verification.VerificationInOrderFailure;
import org.mockito.internal.invocation.InvocationBuilder;
import org.mockito.internal.stubbing.answers.Returns;
import org.mockito.invocation.Invocation;
import org.mockitousage.IMethods;
import org.mockitoutil.TestBase;
public class ReporterTest extends TestBase {
@Test
public void should_let_passing_null_last_actual_stack_trace() {
assertThatThrownBy(
() -> {
throw Reporter.tooFewActualInvocations(
new org.mockito.internal.reporting.Discrepancy(1, 2),
new InvocationBuilder().toInvocation(),
null);
})
.isInstanceOf(TooFewActualInvocations.class)
.hasMessageContainingAll(
"iMethods.simpleMethod();", "Wanted 1 time:", "But was 2 times:");
}
@Test
public void should_throw_correct_exception_for_null_invocation_listener() {
assertThatThrownBy(
() -> {
throw Reporter.methodDoesNotAcceptParameter(
"invocationListeners", "null vararg array");
})
.isInstanceOf(MockitoException.class)
.hasMessage(
"invocationListeners() does not accept null vararg array See the Javadoc.");
}
@Test
public void
can_use_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_no_more_interaction_wanted() {
Invocation invocation_with_bogus_default_answer =
new InvocationBuilder()
.mock(mock(IMethods.class, new Returns(false)))
.toInvocation();
assertThatThrownBy(
() -> {
throw Reporter.noMoreInteractionsWanted(
invocation_with_bogus_default_answer,
Collections.<VerificationAwareInvocation>emptyList());
})
.isInstanceOf(NoInteractionsWanted.class)
.hasMessageContainingAll(
"No interactions wanted here:",
"But found this interaction on mock 'iMethods':");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_no_more_interaction_wanted_in_order() {
Invocation invocation_with_bogus_default_answer =
new InvocationBuilder()
.mock(mock(IMethods.class, new Returns(false)))
.toInvocation();
assertThatThrownBy(
() -> {
throw Reporter.noMoreInteractionsWantedInOrder(
invocation_with_bogus_default_answer);
})
.isInstanceOf(VerificationInOrderFailure.class)
.hasMessageContainingAll(
"No interactions wanted here:",
"But found this interaction on mock 'iMethods':");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_invalid_argument_position() {
Invocation invocation_with_bogus_default_answer =
new InvocationBuilder()
.mock(mock(IMethods.class, new Returns(false)))
.toInvocation();
assertThatThrownBy(
() -> {
throw Reporter.invalidArgumentPositionRangeAtInvocationTime(
invocation_with_bogus_default_answer, true, 0);
})
.isInstanceOf(MockitoException.class)
.hasMessageContainingAll(
"Invalid argument index for the current invocation of method :",
" -> iMethods.simpleMethod()",
"Last parameter wanted but the method has no arguments.");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_wrong_argument_to_return() {
Invocation invocation_with_bogus_default_answer =
new InvocationBuilder()
.mock(mock(IMethods.class, new Returns(false)))
.toInvocation();
assertThatThrownBy(
() -> {
throw Reporter.wrongTypeOfArgumentToReturn(
invocation_with_bogus_default_answer, "", String.class, 0);
})
.isInstanceOf(MockitoException.class)
.hasMessageContainingAll(
"The argument of type 'String' cannot be returned because the following",
"method should return the type ''",
" -> iMethods.simpleMethod()",
"The reason for this error can be :",
"1. The wanted argument position is incorrect.",
"2. The answer is used on the wrong interaction.",
"Position of the wanted argument is 0 and the method has no arguments.");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_delegate_method_dont_exists() {
Invocation dumb_invocation = new InvocationBuilder().toInvocation();
IMethods mock_with_bogus_default_answer = mock(IMethods.class, new Returns(false));
assertThatThrownBy(
() -> {
throw Reporter.delegatedMethodDoesNotExistOnDelegate(
dumb_invocation.getMethod(),
mock_with_bogus_default_answer,
String.class);
})
.isInstanceOf(MockitoException.class)
.hasMessageContainingAll(
"Methods called on mock must exist in delegated instance.",
"When calling: public abstract java.lang.String org.mockitousage.IMethods.simpleMethod() on mock: iMethods",
"no such method was found.",
"Check that the instance passed to delegatesTo() is of the correct type or contains compatible methods",
"(delegate instance had type: Class)");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_delegate_method_has_wrong_return_type() {
Invocation dumb_invocation = new InvocationBuilder().toInvocation();
IMethods mock_with_bogus_default_answer = mock(IMethods.class, new Returns(false));
assertThatThrownBy(
() -> {
throw Reporter.delegatedMethodHasWrongReturnType(
dumb_invocation.getMethod(),
dumb_invocation.getMethod(),
mock_with_bogus_default_answer,
String.class);
})
.isInstanceOf(MockitoException.class)
.hasMessageContainingAll(
"Methods called on delegated instance must have compatible return types with the mock.",
"When calling: public abstract java.lang.String org.mockitousage.IMethods.simpleMethod() on mock: iMethods",
"return type should be: String, but was: String",
"Check that the instance passed to delegatesTo() is of the correct type or contains compatible methods",
"(delegate instance had type: Class)");
}
@Test
public void
can_use_print_mock_name_even_when_mock_bogus_default_answer_and_when_reporting_injection_failure() {
IMethods mock_with_bogus_default_answer = mock(IMethods.class, new Returns(false));
assertThatThrownBy(
() -> {
throw Reporter.cannotInjectDependency(
someField(), mock_with_bogus_default_answer, new Exception());
})
.isInstanceOf(MockitoException.class)
.hasMessageContainingAll(
"Mockito couldn't inject mock dependency 'iMethods' on field",
"'static final org.mockito.internal.MockitoCore org.mockito.Mockito.MOCKITO_CORE'",
"whose type 'org.mockito.Mockito' was annotated by @InjectMocks in your test.",
"Also I failed because: null");
}
private Field someField() {
return Mockito.class.getDeclaredFields()[0];
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/internal/exceptions/ReporterTest.java |
import React, { Component } from 'react';
import { Alert, Button, Col, Nav, NavItem, NavLink, Row, TabContent, TabPane } from 'reactstrap';
import moment from 'moment-timezone';
import ExpressionInput from './ExpressionInput';
import GraphControls from './GraphControls';
import { GraphTabContent } from './GraphTabContent';
import DataTable from './DataTable';
import TimeInput from './TimeInput';
import QueryStatsView, { QueryStats } from './QueryStatsView';
import { QueryParams, ExemplarData } from '../../types/types';
import { API_PATH } from '../../constants/constants';
import { debounce } from '../../utils';
import { isHeatmapData } from './GraphHeatmapHelpers';
interface PanelProps {
options: PanelOptions;
onOptionsChanged: (opts: PanelOptions) => void;
useLocalTime: boolean;
pastQueries: string[];
metricNames: string[];
removePanel: () => void;
onExecuteQuery: (query: string) => void;
pathPrefix: string;
enableAutocomplete: boolean;
enableHighlighting: boolean;
enableLinter: boolean;
id: string;
}
interface PanelState {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
data: any; // TODO: Type data.
exemplars: ExemplarData;
lastQueryParams: QueryParams | null;
loading: boolean;
warnings: string[] | null;
infos: string[] | null;
error: string | null;
stats: QueryStats | null;
exprInputValue: string;
isHeatmapData: boolean;
}
export interface PanelOptions {
expr: string;
type: PanelType;
range: number; // Range in milliseconds.
endTime: number | null; // Timestamp in milliseconds.
resolution: number | null; // Resolution in seconds.
displayMode: GraphDisplayMode;
showExemplars: boolean;
}
export enum PanelType {
Graph = 'graph',
Table = 'table',
}
export enum GraphDisplayMode {
Lines = 'lines',
Stacked = 'stacked',
Heatmap = 'heatmap',
}
export const PanelDefaultOptions: PanelOptions = {
type: PanelType.Table,
expr: '',
range: 60 * 60 * 1000,
endTime: null,
resolution: null,
displayMode: GraphDisplayMode.Lines,
showExemplars: false,
};
class Panel extends Component<PanelProps, PanelState> {
private abortInFlightFetch: (() => void) | null = null;
private debounceExecuteQuery: () => void;
constructor(props: PanelProps) {
super(props);
this.state = {
data: null,
exemplars: [],
lastQueryParams: null,
loading: false,
warnings: null,
infos: null,
error: null,
stats: null,
exprInputValue: props.options.expr,
isHeatmapData: false,
};
this.debounceExecuteQuery = debounce(this.executeQuery.bind(this), 250);
}
componentDidUpdate({ options: prevOpts }: PanelProps): void {
const { endTime, range, resolution, showExemplars, type } = this.props.options;
if (prevOpts.endTime !== endTime || prevOpts.range !== range) {
this.debounceExecuteQuery();
return;
}
if (prevOpts.resolution !== resolution || prevOpts.type !== type || showExemplars !== prevOpts.showExemplars) {
this.executeQuery();
}
}
componentDidMount(): void {
this.executeQuery();
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
executeQuery = async (): Promise<any> => {
const { exprInputValue: expr } = this.state;
const queryStart = Date.now();
this.props.onExecuteQuery(expr);
if (this.props.options.expr !== expr) {
this.setOptions({ expr });
}
if (expr === '') {
return;
}
if (this.abortInFlightFetch) {
this.abortInFlightFetch();
this.abortInFlightFetch = null;
}
const abortController = new AbortController();
this.abortInFlightFetch = () => abortController.abort();
this.setState({ loading: true });
const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueOf only work when it's a moment?
const startTime = endTime - this.props.options.range / 1000;
const resolution = this.props.options.resolution || Math.max(Math.floor(this.props.options.range / 250000), 1);
const params: URLSearchParams = new URLSearchParams({
query: expr,
});
let path: string;
switch (this.props.options.type) {
case 'graph':
path = 'query_range';
params.append('start', startTime.toString());
params.append('end', endTime.toString());
params.append('step', resolution.toString());
break;
case 'table':
path = 'query';
params.append('time', endTime.toString());
break;
default:
throw new Error('Invalid panel type "' + this.props.options.type + '"');
}
let query;
let exemplars;
try {
query = await fetch(`${this.props.pathPrefix}/${API_PATH}/${path}?${params}`, {
cache: 'no-store',
credentials: 'same-origin',
signal: abortController.signal,
}).then((resp) => resp.json());
if (query.status !== 'success') {
throw new Error(query.error || 'invalid response JSON');
}
if (this.props.options.type === 'graph' && this.props.options.showExemplars) {
params.delete('step'); // Not needed for this request.
exemplars = await fetch(`${this.props.pathPrefix}/${API_PATH}/query_exemplars?${params}`, {
cache: 'no-store',
credentials: 'same-origin',
signal: abortController.signal,
}).then((resp) => resp.json());
if (exemplars.status !== 'success') {
throw new Error(exemplars.error || 'invalid response JSON');
}
}
let resultSeries = 0;
if (query.data) {
const { resultType, result } = query.data;
if (resultType === 'scalar') {
resultSeries = 1;
} else if (result && result.length > 0) {
resultSeries = result.length;
}
}
const isHeatmap = isHeatmapData(query.data);
const isHeatmapDisplayMode = this.props.options.displayMode === GraphDisplayMode.Heatmap;
if (!isHeatmap && isHeatmapDisplayMode) {
this.setOptions({ displayMode: GraphDisplayMode.Lines });
}
this.setState({
error: null,
data: query.data,
exemplars: exemplars?.data,
warnings: query.warnings,
infos: query.infos,
lastQueryParams: {
startTime,
endTime,
resolution,
},
stats: {
loadTime: Date.now() - queryStart,
resolution,
resultSeries,
},
loading: false,
isHeatmapData: isHeatmap,
});
this.abortInFlightFetch = null;
} catch (err: unknown) {
const error = err as Error;
if (error.name === 'AbortError') {
// Aborts are expected, don't show an error for them.
return;
}
this.setState({
error: 'Error executing query: ' + error.message,
loading: false,
});
}
};
setOptions(opts: Partial<PanelOptions>): void {
const newOpts = { ...this.props.options, ...opts };
this.props.onOptionsChanged(newOpts);
}
handleExpressionChange = (expr: string): void => {
this.setState({ exprInputValue: expr });
};
handleChangeRange = (range: number): void => {
this.setOptions({ range: range });
};
getEndTime = (): number | moment.Moment => {
if (this.props.options.endTime === null) {
return moment();
}
return this.props.options.endTime;
};
handleChangeEndTime = (endTime: number | null): void => {
this.setOptions({ endTime: endTime });
};
handleChangeResolution = (resolution: number | null): void => {
this.setOptions({ resolution: resolution });
};
handleChangeType = (type: PanelType): void => {
if (this.props.options.type === type) {
return;
}
this.setState({ data: null });
this.setOptions({ type: type });
};
handleChangeDisplayMode = (mode: GraphDisplayMode): void => {
this.setOptions({ displayMode: mode });
};
handleChangeShowExemplars = (show: boolean): void => {
this.setOptions({ showExemplars: show });
};
handleTimeRangeSelection = (startTime: number, endTime: number): void => {
this.setOptions({ range: endTime - startTime, endTime: endTime });
};
render(): JSX.Element {
const { pastQueries, metricNames, options } = this.props;
return (
<div className="panel">
<Row>
<Col>
<ExpressionInput
value={this.state.exprInputValue}
onExpressionChange={this.handleExpressionChange}
executeQuery={this.executeQuery}
loading={this.state.loading}
enableAutocomplete={this.props.enableAutocomplete}
enableHighlighting={this.props.enableHighlighting}
enableLinter={this.props.enableLinter}
queryHistory={pastQueries}
metricNames={metricNames}
/>
</Col>
</Row>
<Row>
<Col>{this.state.error && <Alert color="danger">{this.state.error}</Alert>}</Col>
</Row>
{this.state.warnings?.map((warning, index) => (
<Row key={index}>
<Col>{warning && <Alert color="warning">{warning}</Alert>}</Col>
</Row>
))}
{this.state.infos?.map((info, index) => (
<Row key={index}>
<Col>{info && <Alert color="info">{info}</Alert>}</Col>
</Row>
))}
<Row>
<Col>
<Nav tabs>
<NavItem>
<NavLink
className={options.type === 'table' ? 'active' : ''}
onClick={() => this.handleChangeType(PanelType.Table)}
>
Table
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={options.type === 'graph' ? 'active' : ''}
onClick={() => this.handleChangeType(PanelType.Graph)}
>
Graph
</NavLink>
</NavItem>
{!this.state.loading && !this.state.error && this.state.stats && <QueryStatsView {...this.state.stats} />}
</Nav>
<TabContent activeTab={options.type}>
<TabPane tabId="table">
{options.type === 'table' && (
<>
<div className="table-controls">
<TimeInput
time={options.endTime}
useLocalTime={this.props.useLocalTime}
range={options.range}
placeholder="Evaluation time"
onChangeTime={this.handleChangeEndTime}
/>
</div>
<DataTable data={this.state.data} useLocalTime={this.props.useLocalTime} />
</>
)}
</TabPane>
<TabPane tabId="graph">
{this.props.options.type === 'graph' && (
<>
<GraphControls
range={options.range}
endTime={options.endTime}
useLocalTime={this.props.useLocalTime}
resolution={options.resolution}
displayMode={options.displayMode}
isHeatmapData={this.state.isHeatmapData}
showExemplars={options.showExemplars}
onChangeRange={this.handleChangeRange}
onChangeEndTime={this.handleChangeEndTime}
onChangeResolution={this.handleChangeResolution}
onChangeDisplayMode={this.handleChangeDisplayMode}
onChangeShowExemplars={this.handleChangeShowExemplars}
/>
<GraphTabContent
data={this.state.data}
exemplars={this.state.exemplars}
displayMode={options.displayMode}
useLocalTime={this.props.useLocalTime}
showExemplars={options.showExemplars}
lastQueryParams={this.state.lastQueryParams}
id={this.props.id}
handleTimeRangeSelection={this.handleTimeRangeSelection}
/>
</>
)}
</TabPane>
</TabContent>
</Col>
</Row>
<Row>
<Col>
<Button className="float-right" color="link" onClick={this.props.removePanel} size="sm">
Remove Panel
</Button>
</Col>
</Row>
</div>
);
}
}
export default Panel; | typescript | github | https://github.com/prometheus/prometheus | web/ui/react-app/src/pages/graph/Panel.tsx |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import cinder
from cinder.api.openstack import wsgi
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
UUID = fakes.FAKE_UUID
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = stubs.stub_volume(1, uuid=UUID)
self.fake_instance['created_at'] =\
datetime.datetime(2013, 1, 1, 1, 1, 1)
self.fake_instance['launched_at'] =\
datetime.datetime(2013, 1, 1, 1, 1, 1)
self.flags(
osapi_volume_extension=[
'cinder.api.contrib.select_extensions'],
osapi_volume_ext_list=['Scheduler_hints'])
self.app = fakes.wsgi_app()
def test_create_server_without_hints(self):
@wsgi.response(202)
def fake_create(*args, **kwargs):
self.assertNotIn('scheduler_hints', kwargs['body'])
return self.fake_instance
self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create',
fake_create)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1', }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
@wsgi.response(202)
def fake_create(*args, **kwargs):
self.assertIn('scheduler_hints', kwargs['body'])
self.assertEqual(kwargs['body']['scheduler_hints'], {"a": "b"})
return self.fake_instance
self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create',
fake_create)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1',
'scheduler_hints': {'a': 'b'}, }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'volume': {
'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1',
'scheduler_hints': 'a', }}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int) | unknown | codeparrot/codeparrot-clean | ||
"""
An implementation of IdReader and IdGenerator that manages ids for the SplitMongo storage
mechanism.
"""
from opaque_keys.edx.locator import LocalId, DefinitionLocator
from xmodule.x_module import OpaqueKeyReader, AsideKeyGenerator
from xmodule.modulestore.split_mongo import BlockKey
# TODO: Migrate split_mongo to use this class for all key mapping/creation.
class SplitMongoIdManager(OpaqueKeyReader, AsideKeyGenerator): # pylint: disable=abstract-method
"""
An IdManager that knows how to retrieve the DefinitionLocator, given
a usage_id and a :class:`.CachingDescriptorSystem`.
"""
def __init__(self, caching_descriptor_system):
self._cds = caching_descriptor_system
def get_definition_id(self, usage_id):
if isinstance(usage_id.block_id, LocalId):
# a LocalId indicates that this block hasn't been persisted yet, and is instead stored
# in-memory in the local_modules dictionary.
return self._cds.local_modules[usage_id].scope_ids.def_id
else:
block_key = BlockKey.from_usage_key(usage_id)
module_data = self._cds.get_module_data(block_key, usage_id.course_key)
if module_data.definition is not None:
return DefinitionLocator(usage_id.block_type, module_data.definition)
else:
raise ValueError("All non-local blocks should have a definition specified") | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
class Image < ActiveRecord::Base
belongs_to :imageable, polymorphic: true, foreign_key: :imageable_identifier, foreign_type: :imageable_class
end | ruby | github | https://github.com/rails/rails | activerecord/test/models/image.rb |
/*
* AltiVec optimizations for libjpeg-turbo
*
* Copyright (C) 2014, D. R. Commander. All Rights Reserved.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
/* RGB --> YCC CONVERSION */
#include "jsimd_altivec.h"
#define F_0_081 5329 /* FIX(0.08131) */
#define F_0_114 7471 /* FIX(0.11400) */
#define F_0_168 11059 /* FIX(0.16874) */
#define F_0_250 16384 /* FIX(0.25000) */
#define F_0_299 19595 /* FIX(0.29900) */
#define F_0_331 21709 /* FIX(0.33126) */
#define F_0_418 27439 /* FIX(0.41869) */
#define F_0_500 32768 /* FIX(0.50000) */
#define F_0_587 38470 /* FIX(0.58700) */
#define F_0_337 (F_0_587 - F_0_250) /* FIX(0.58700) - FIX(0.25000) */
#define SCALEBITS 16
#define ONE_HALF (1 << (SCALEBITS - 1))
#define RGBG_INDEX0 \
{ 0, 1, 3, 4, 6, 7, 9, 10, 2, 1, 5, 4, 8, 7, 11, 10 }
#define RGBG_INDEX1 \
{ 12, 13, 15, 16, 18, 19, 21, 22, 14, 13, 17, 16, 20, 19, 23, 22 }
#define RGBG_INDEX2 \
{ 8, 9, 11, 12, 14, 15, 17, 18, 10, 9, 13, 12, 16, 15, 19, 18 }
#define RGBG_INDEX3 \
{ 4, 5, 7, 8, 10, 11, 13, 14, 6, 5, 9, 8, 12, 11, 15, 14 }
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
#define jsimd_rgb_ycc_convert_altivec jsimd_extrgb_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX0
#undef RGBG_INDEX1
#undef RGBG_INDEX2
#undef RGBG_INDEX3
#undef jsimd_rgb_ycc_convert_altivec
#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
#define RGBG_INDEX \
{ 0, 1, 4, 5, 8, 9, 12, 13, 2, 1, 6, 5, 10, 9, 14, 13 }
#define jsimd_rgb_ycc_convert_altivec jsimd_extrgbx_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX
#undef jsimd_rgb_ycc_convert_altivec
#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
#define RGBG_INDEX0 \
{ 2, 1, 5, 4, 8, 7, 11, 10, 0, 1, 3, 4, 6, 7, 9, 10 }
#define RGBG_INDEX1 \
{ 14, 13, 17, 16, 20, 19, 23, 22, 12, 13, 15, 16, 18, 19, 21, 22 }
#define RGBG_INDEX2 \
{ 10, 9, 13, 12, 16, 15, 19, 18, 8, 9, 11, 12, 14, 15, 17, 18 }
#define RGBG_INDEX3 \
{ 6, 5, 9, 8, 12, 11, 15, 14, 4, 5, 7, 8, 10, 11, 13, 14 }
#define jsimd_rgb_ycc_convert_altivec jsimd_extbgr_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX0
#undef RGBG_INDEX1
#undef RGBG_INDEX2
#undef RGBG_INDEX3
#undef jsimd_rgb_ycc_convert_altivec
#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
#define RGBG_INDEX \
{ 2, 1, 6, 5, 10, 9, 14, 13, 0, 1, 4, 5, 8, 9, 12, 13 }
#define jsimd_rgb_ycc_convert_altivec jsimd_extbgrx_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX
#undef jsimd_rgb_ycc_convert_altivec
#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
#define RGBG_INDEX \
{ 3, 2, 7, 6, 11, 10, 15, 14, 1, 2, 5, 6, 9, 10, 13, 14 }
#define jsimd_rgb_ycc_convert_altivec jsimd_extxbgr_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX
#undef jsimd_rgb_ycc_convert_altivec
#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
#define RGBG_INDEX \
{ 1, 2, 5, 6, 9, 10, 13, 14, 3, 2, 7, 6, 11, 10, 15, 14 }
#define jsimd_rgb_ycc_convert_altivec jsimd_extxrgb_ycc_convert_altivec
#include "jccolext-altivec.c"
#undef RGB_PIXELSIZE
#undef RGBG_INDEX
#undef jsimd_rgb_ycc_convert_altivec | c | github | https://github.com/opencv/opencv | 3rdparty/libjpeg-turbo/simd/powerpc/jccolor-altivec.c |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.testing.AbstractPackageSanityTests;
import java.lang.reflect.Method;
import java.nio.channels.FileChannel.MapMode;
import java.nio.charset.CharsetEncoder;
import org.jspecify.annotations.NullUnmarked;
/**
* Basic sanity tests for the entire package.
*
* @author Ben Yu
*/
@NullUnmarked
public class PackageSanityTests extends AbstractPackageSanityTests {
public PackageSanityTests() {
setDefault(BaseEncoding.class, BaseEncoding.base64());
setDefault(int.class, 32);
setDefault(String.class, "abcd");
setDefault(Method.class, AbstractPackageSanityTests.class.getDeclaredMethods()[0]);
setDefault(MapMode.class, MapMode.READ_ONLY);
setDefault(CharsetEncoder.class, UTF_8.newEncoder());
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/io/PackageSanityTests.java |
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"context"
"sync"
"github.com/cockroachdb/cockroach/pkg/util/cancelchecker"
"github.com/cockroachdb/cockroach/pkg/util/metamorphic"
"github.com/cockroachdb/errors"
)
// ieResultReader is used to read internalExecutor results.
// It is managed by the rowsIterator.
type ieResultReader interface {
// firstResult returns the first result. The return values carry the same
// semantics as of nextResult. This method assumes that the writer is not
// currently blocked and waits for the initial result to be written.
firstResult(ctx context.Context) (_ ieIteratorResult, done bool, err error)
// nextResult returns the next result. Done will always be true if err
// is non-nil. Err will be non-nil if either close has been called or
// the passed context is finished.
nextResult(ctx context.Context) (_ ieIteratorResult, done bool, err error)
// close ensures that either writer has finished writing. The writer will
// receive a signal to drain, and close will drain the writer's channel.
close() error
}
// ieResultWriter is used by the internalExecutor to write results to an
// iterator.
type ieResultWriter interface {
// addResult adds a result. It may block until the next result is requested
// by the reader, depending on the synchronization strategy.
addResult(ctx context.Context, result ieIteratorResult) error
// finish is used to indicate that the writer is done writing rows.
finish()
}
var asyncIEResultChannelBufferSize = metamorphic.ConstantWithTestRange(
"async-IE-result-channel-buffer-size",
32, /* defaultValue */
1, /* min */
32, /* max */
)
// newAsyncIEResultChannel returns an ieResultChannel which does not attempt to
// synchronize the writer with the reader.
func newAsyncIEResultChannel() *ieResultChannel {
return &ieResultChannel{
dataCh: make(chan ieIteratorResult, asyncIEResultChannelBufferSize),
doneCh: make(chan struct{}),
}
}
// ieResultChannel is used to coordinate passing results from an
// internalExecutor to its corresponding iterator. It can be constructed to
// ensure that there is no concurrency between the reader and writer.
type ieResultChannel struct {
// dataCh is the channel on which the connExecutor goroutine sends the rows
// (in addResult) and, in the synchronous case, will block on waitCh after
// each send. The iterator goroutine blocks on dataCh until there is
// something to receive (rows or other metadata) and will return the data to
// the caller. On the next call to Next(), the iterator goroutine unblocks
// the producer and will block itself again. dataCh will be closed (in
// finish()) when the connExecutor goroutine exits its run() loop whereas
// waitCh is closed when closing the iterator.
dataCh chan ieIteratorResult
// waitCh is nil for async ieResultChannels. It is never closed. In all places
// where the caller may interact with it the doneCh is also used. This policy
// is in place to make it safe to unblock both the reader and the writer
// without any hazards of a blocked reader attempting to send on a closed
// channel.
waitCh chan struct{}
// doneCh is used to indicate that the ieResultReader has been closed and is
// closed under the doneOnce, the writer will transition to draining. This
// is crucial to ensure that a synchronous writer does not attempt to
// continue to operate after the reader has called close.
doneCh chan struct{}
doneErr error
doneOnce sync.Once
}
// newSyncIEResultChannel is used to ensure that in execution scenarios which
// do not permit concurrency that there is none. It works by blocking the
// writing goroutine immediately upon sending on the data channel and only
// unblocking it after the reader signals.
func newSyncIEResultChannel() *ieResultChannel {
return &ieResultChannel{
dataCh: make(chan ieIteratorResult),
waitCh: make(chan struct{}),
doneCh: make(chan struct{}),
}
}
func (i *ieResultChannel) firstResult(
ctx context.Context,
) (_ ieIteratorResult, done bool, err error) {
// errors.Wrap returns nil if err is nil.
const wrapMsg = "failed to read query result"
getCtxErr := func(ctx context.Context) error {
if ctx.Err() == nil {
return nil
}
return cancelchecker.QueryCanceledError
}
select {
case <-ctx.Done():
return ieIteratorResult{}, true, errors.Wrap(getCtxErr(ctx), wrapMsg)
case <-i.doneCh:
return ieIteratorResult{}, true, errors.Wrap(getCtxErr(ctx), wrapMsg)
case res, ok := <-i.dataCh:
if !ok {
return ieIteratorResult{}, true, errors.Wrap(getCtxErr(ctx), wrapMsg)
}
return res, false, nil
}
}
func (i *ieResultChannel) maybeUnblockWriter(ctx context.Context) (done bool, err error) {
if i.async() {
return false, nil
}
// errors.Wrap returns nil if ctx.Err() is nil.
const wrapMsg = "maybe unblock writer"
select {
case <-ctx.Done():
return true, errors.Wrap(ctx.Err(), wrapMsg)
case <-i.doneCh:
return true, errors.Wrap(ctx.Err(), wrapMsg)
case i.waitCh <- struct{}{}:
return false, nil
}
}
func (i *ieResultChannel) async() bool {
return i.waitCh == nil
}
func (i *ieResultChannel) nextResult(
ctx context.Context,
) (_ ieIteratorResult, done bool, err error) {
if done, err = i.maybeUnblockWriter(ctx); done {
return ieIteratorResult{}, done, err
}
return i.firstResult(ctx)
}
func (i *ieResultChannel) close() error {
i.doneOnce.Do(func() {
close(i.doneCh)
for {
// In the async case, res might contain some actual rows, but we're
// not interested in them; in the sync case, only errors are
// expected to be retrieved from now one because the writer
// transitions to draining.
res, done, err := i.nextResult(context.TODO())
if i.doneErr == nil {
if res.err != nil {
i.doneErr = res.err
} else if err != nil {
i.doneErr = err
}
}
if done {
return
}
}
})
return i.doneErr
}
// errIEResultChannelClosed is returned by the writer when the reader has closed
// ieResultChannel. The error indicates to the writer to drain the query
// execution, but the reader won't propagate it further.
var errIEResultChannelClosed = errors.New("ieResultReader closed")
func (i *ieResultChannel) addResult(ctx context.Context, result ieIteratorResult) error {
// errors.Wrap returns nil if ctx.Err() is nil.
const wrapMsg = "add result"
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), wrapMsg)
case <-i.doneCh:
// Prefer the context error if there is one.
if ctxErr := ctx.Err(); ctxErr != nil {
return errors.Wrap(ctx.Err(), wrapMsg)
}
return errIEResultChannelClosed
case i.dataCh <- result:
}
return i.maybeBlock(ctx)
}
func (i *ieResultChannel) maybeBlock(ctx context.Context) error {
// errors.Wrap returns nil if ctx.Err() is nil.
const wrapMsg = "maybe block"
if i.async() {
return nil
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), wrapMsg)
case <-i.doneCh:
// Prefer the context error if there is one.
if ctxErr := ctx.Err(); ctxErr != nil {
return errors.Wrap(ctxErr, wrapMsg)
}
return errIEResultChannelClosed
case <-i.waitCh:
return nil
}
}
func (i *ieResultChannel) finish() {
close(i.dataCh)
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/internal_result_channel.go |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.Repeating-a-row-with-a-repeating-horizontal-panel.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Panel Title $horizontal",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Row title $row",
"collapse": false,
"repeat": {
"mode": "variable",
"value": "row"
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 8,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-4"
},
"repeat": {
"mode": "variable",
"value": "horizontal",
"direction": "h"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "utc",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Repeating a row with a repeating horizontal panel",
"variables": [
{
"kind": "CustomVariable",
"spec": {
"name": "vertical",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
},
{
"kind": "CustomVariable",
"spec": {
"name": "horizontal",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
},
{
"kind": "CustomVariable",
"spec": {
"name": "row",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
}
]
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/e2e-repeats/v0alpha1.Repeating-a-row-with-a-repeating-horizontal-panel.v42.v2alpha1.json |
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
)
// CategorizeEndpoints returns:
//
// - The service's usable Cluster-traffic-policy endpoints (taking topology into account, if
// relevant). This will be nil if the service does not ever use Cluster traffic policy.
//
// - The service's usable Local-traffic-policy endpoints. This will be nil if the
// service does not ever use Local traffic policy.
//
// - The combined list of all endpoints reachable from this node (which is the union of the
// previous two lists, but in the case where it is identical to one or the other, we avoid
// allocating a separate list).
//
// - An indication of whether the service has any endpoints reachable from anywhere in the
// cluster. (This may be true even if allReachableEndpoints is empty.)
//
// "Usable endpoints" means Ready endpoints by default, but will fall back to
// Serving-Terminating endpoints (independently for Cluster and Local) if no Ready
// endpoints are available.
//
// Note: NodeTopologyConfig.handleNodeEvent (pkg/proxy/config) filters topology labels
// before notifying proxiers. If you modify the logic over here to watch other endpoint
// types or labels, ensure the filtering logic in NodeTopologyConfig is updated accordingly.
func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeName string, topologyLabels map[string]string) (clusterEndpoints, localEndpoints, allReachableEndpoints []Endpoint, hasAnyEndpoints bool) {
if len(endpoints) == 0 {
// If there are no endpoints, we have nothing to categorize
return
}
var topologyMode string
var useServingTerminatingEndpoints bool
if svcInfo.UsesClusterEndpoints() {
zone := topologyLabels[v1.LabelTopologyZone]
topologyMode = topologyModeFromHints(svcInfo, endpoints, nodeName, zone)
clusterEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool {
if !ep.IsReady() {
return false
}
if !availableForTopology(ep, topologyMode, nodeName, zone) {
return false
}
return true
})
// If we didn't get any endpoints, try again using terminating endpoints.
// (Note that we would already have chosen to ignore topology if there
// were no ready endpoints for the given topology, so the problem at this
// point must be that there are no ready endpoints anywhere.)
if len(clusterEndpoints) == 0 {
clusterEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool {
if ep.IsServing() && ep.IsTerminating() {
return true
}
return false
})
}
// If there are any Ready endpoints anywhere in the cluster, we are
// guaranteed to get one in clusterEndpoints.
if len(clusterEndpoints) > 0 {
hasAnyEndpoints = true
}
}
if !svcInfo.UsesLocalEndpoints() {
allReachableEndpoints = clusterEndpoints
return
}
// Pre-scan the endpoints, to figure out which type of endpoint Local
// traffic policy will use, and also to see if there are any usable
// endpoints anywhere in the cluster.
var hasLocalReadyEndpoints, hasLocalServingTerminatingEndpoints bool
for _, ep := range endpoints {
if ep.IsReady() {
hasAnyEndpoints = true
if ep.IsLocal() {
hasLocalReadyEndpoints = true
}
} else if ep.IsServing() && ep.IsTerminating() {
hasAnyEndpoints = true
if ep.IsLocal() {
hasLocalServingTerminatingEndpoints = true
}
}
}
if hasLocalReadyEndpoints {
localEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool {
return ep.IsLocal() && ep.IsReady()
})
} else if hasLocalServingTerminatingEndpoints {
useServingTerminatingEndpoints = true
localEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool {
return ep.IsLocal() && ep.IsServing() && ep.IsTerminating()
})
}
if !svcInfo.UsesClusterEndpoints() {
allReachableEndpoints = localEndpoints
return
}
if topologyMode == "" && !useServingTerminatingEndpoints {
// !useServingTerminatingEndpoints means that localEndpoints contains only
// Ready endpoints. topologyMode=="" means that clusterEndpoints contains *every*
// Ready endpoint. So clusterEndpoints must be a superset of localEndpoints.
allReachableEndpoints = clusterEndpoints
return
}
// clusterEndpoints may contain remote endpoints that aren't in localEndpoints, while
// localEndpoints may contain terminating or topologically-unavailable local endpoints
// that aren't in clusterEndpoints. So we have to merge the two lists.
endpointsMap := make(map[string]Endpoint, len(clusterEndpoints)+len(localEndpoints))
for _, ep := range clusterEndpoints {
endpointsMap[ep.String()] = ep
}
for _, ep := range localEndpoints {
endpointsMap[ep.String()] = ep
}
allReachableEndpoints = make([]Endpoint, 0, len(endpointsMap))
for _, ep := range endpointsMap {
allReachableEndpoints = append(allReachableEndpoints, ep)
}
return
}
// topologyModeFromHints returns a topology mode ("", "PreferSameZone", or
// "PreferSameNode") based on the Endpoint hints:
// - If the PreferSameTrafficDistribution feature gate is enabled, and every ready
// endpoint has a node hint, and at least one endpoint is hinted for this node, then
// it returns "PreferSameNode".
// - Otherwise, if every ready endpoint has a zone hint, and at least one endpoint is
// hinted for this node's zone, then it returns "PreferSameZone".
// - Otherwise it returns "" (meaning, no topology / default traffic distribution).
func topologyModeFromHints(svcInfo ServicePort, endpoints []Endpoint, nodeName, zone string) string {
hasReadyEndpoints := false
hasEndpointForNode := false
allEndpointsHaveNodeHints := true
hasEndpointForZone := false
allEndpointsHaveZoneHints := true
for _, endpoint := range endpoints {
if !endpoint.IsReady() {
continue
}
hasReadyEndpoints = true
if endpoint.NodeHints().Len() == 0 {
allEndpointsHaveNodeHints = false
} else if endpoint.NodeHints().Has(nodeName) {
hasEndpointForNode = true
}
if endpoint.ZoneHints().Len() == 0 {
allEndpointsHaveZoneHints = false
} else if endpoint.ZoneHints().Has(zone) {
hasEndpointForZone = true
}
}
// If no ready endpoints exist, there are no hints to consider
if !hasReadyEndpoints {
return ""
}
if utilfeature.DefaultFeatureGate.Enabled(features.PreferSameTrafficDistribution) {
if allEndpointsHaveNodeHints {
if hasEndpointForNode {
return v1.ServiceTrafficDistributionPreferSameNode
}
klog.V(2).InfoS("Ignoring same-node topology hints for service since no hints were provided for node", "service", svcInfo, "node", nodeName)
} else {
klog.V(7).InfoS("Ignoring same-node topology hints for service since one or more endpoints is missing a node hint", "service", svcInfo)
}
}
if allEndpointsHaveZoneHints {
if hasEndpointForZone {
return v1.ServiceTrafficDistributionPreferSameZone
}
if zone == "" {
klog.V(2).InfoS("Ignoring same-zone topology hints for service since node is missing label", "service", svcInfo, "label", v1.LabelTopologyZone)
} else {
klog.V(2).InfoS("Ignoring same-zone topology hints for service since no hints were provided for zone", "service", svcInfo, "zone", zone)
}
} else {
klog.V(7).InfoS("Ignoring same-zone topology hints for service since one or more endpoints is missing a zone hint", "service", svcInfo.String())
}
return ""
}
// availableForTopology checks if this endpoint is available for use on this node when
// using the given topologyMode. (Note that there's no fallback here; the fallback happens
// when deciding which mode to use, not when applying that decision.)
func availableForTopology(endpoint Endpoint, topologyMode, nodeName, zone string) bool {
switch topologyMode {
case "":
return true
case v1.ServiceTrafficDistributionPreferSameNode:
return endpoint.NodeHints().Has(nodeName)
case v1.ServiceTrafficDistributionPreferSameZone:
return endpoint.ZoneHints().Has(zone)
default:
return false
}
}
// filterEndpoints filters endpoints according to predicate
func filterEndpoints(endpoints []Endpoint, predicate func(Endpoint) bool) []Endpoint {
filteredEndpoints := make([]Endpoint, 0, len(endpoints))
for _, ep := range endpoints {
if predicate(ep) {
filteredEndpoints = append(filteredEndpoints, ep)
}
}
return filteredEndpoints
} | go | github | https://github.com/kubernetes/kubernetes | pkg/proxy/topology.go |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"math"
"sort"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/testutil"
)
type backfillSample struct {
Timestamp int64
Value float64
Labels labels.Labels
}
func sortSamples(samples []backfillSample) {
sort.Slice(samples, func(x, y int) bool {
sx, sy := samples[x], samples[y]
if sx.Timestamp != sy.Timestamp {
return sx.Timestamp < sy.Timestamp
}
return sx.Value < sy.Value
})
}
func queryAllSeries(t testing.TB, q storage.Querier, _, _ int64) []backfillSample {
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{}
for ss.Next() {
series := ss.At()
it := series.Iterator(nil)
require.NoError(t, it.Err())
for it.Next() == chunkenc.ValFloat {
ts, v := it.At()
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
}
}
return samples
}
func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) {
blocks := db.Blocks()
require.Len(t, blocks, expectedNumBlocks, "did not create correct number of blocks")
for i, block := range blocks {
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
}
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
defer func() {
require.NoError(t, q.Close())
}()
allSamples := queryAllSeries(t, q, expectedMinTime, expectedMaxTime)
sortSamples(allSamples)
sortSamples(expectedSamples)
testutil.RequireEqual(t, expectedSamples, allSamples, "did not create correct samples")
if len(allSamples) > 0 {
require.Equal(t, expectedMinTime, allSamples[0].Timestamp, "timestamp of first sample is not the expected minimum time")
require.Equal(t, expectedMaxTime, allSamples[len(allSamples)-1].Timestamp, "timestamp of last sample is not the expected maximum time")
}
}
func TestBackfill(t *testing.T) {
t.Parallel()
tests := []struct {
ToParse string
IsOk bool
Description string
MaxSamplesInAppender int
MaxBlockDuration time.Duration
Labels map[string]string
Expected struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}
}{
{
ToParse: `# EOF`,
IsOk: true,
Description: "Empty file.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: math.MaxInt64,
MaxTime: math.MinInt64,
NumBlocks: 0,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
http_requests_total{code="400"} 1 1565133713.990
# EOF
`,
IsOk: true,
Description: "Multiple samples with different timestamp for different series.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565133713990,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565133713990,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
http_requests_total{code="200"} 1022 1565392913.989
http_requests_total{code="200"} 1023 1565652113.989
# EOF
`,
IsOk: true,
Description: "Multiple samples separated by 3 days.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565652113989,
NumBlocks: 3,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565392913989,
Value: 1022,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565652113989,
Value: 1023,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
},
},
},
{
ToParse: `# TYPE go info
go_info{version="go1.15.3"} 1 1565392913.989
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
# EOF
`,
IsOk: true,
Description: "Unordered samples from multiple series, which end in different blocks.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565392913989,
NumBlocks: 2,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565392913989,
Value: 1,
Labels: labels.FromStrings("__name__", "go_info", "version", "go1.15.3"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
http_requests_total{code="200"} 1 1565133714.989
http_requests_total{code="400"} 2 1565133715.989
# EOF
`,
IsOk: true,
Description: "Multiple samples with different timestamp for the same series.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565133715989,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565133714989,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565133715989,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1624463088.000
http_requests_total{code="200"} 1 1627055153.000
http_requests_total{code="400"} 2 1627056153.000
# EOF
`,
IsOk: true,
Description: "Long maximum block duration puts all data into one block.",
MaxSamplesInAppender: 5000,
MaxBlockDuration: 2048 * time.Hour,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1624463088000,
MaxTime: 1627056153000,
NumBlocks: 1,
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
Samples: []backfillSample{
{
Timestamp: 1624463088000,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1627055153000,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1627056153000,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1 1624463088.000
http_requests_total{code="200"} 2 1629503088.000
http_requests_total{code="200"} 3 1629863088.000
# EOF
`,
IsOk: true,
Description: "Long maximum block duration puts all data into two blocks.",
MaxSamplesInAppender: 5000,
MaxBlockDuration: 2048 * time.Hour,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1624463088000,
MaxTime: 1629863088000,
NumBlocks: 2,
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
Samples: []backfillSample{
{
Timestamp: 1624463088000,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1629503088000,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1629863088000,
Value: 3,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1 1624463088.000
http_requests_total{code="200"} 2 1765943088.000
http_requests_total{code="200"} 3 1768463088.000
# EOF
`,
IsOk: true,
Description: "Maximum block duration longer than longest possible duration, uses largest duration, puts all data into two blocks.",
MaxSamplesInAppender: 5000,
MaxBlockDuration: 200000 * time.Hour,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1624463088000,
MaxTime: 1768463088000,
NumBlocks: 2,
BlockDuration: int64(39366 * time.Hour / time.Millisecond),
Samples: []backfillSample{
{
Timestamp: 1624463088000,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1765943088000,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1768463088000,
Value: 3,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
http_requests_total{code="200"} 1022 1565144513.989
http_requests_total{code="400"} 2 1565155313.989
http_requests_total{code="400"} 1 1565166113.989
# EOF
`,
IsOk: true,
Description: "Multiple samples that end up in different blocks.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565166113989,
NumBlocks: 4,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565144513989,
Value: 1022,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565155313989,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
{
Timestamp: 1565166113989,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 1565133713.989
http_requests_total{code="200"} 1022 1565133714
http_requests_total{code="200"} 1023 1565133716
http_requests_total{code="200"} 1022 1565144513.989
http_requests_total{code="400"} 2 1565155313.989
http_requests_total{code="400"} 3 1565155314
http_requests_total{code="400"} 1 1565166113.989
# EOF
`,
IsOk: true,
Description: "Number of samples are greater than the sample batch size.",
MaxSamplesInAppender: 2,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1565133713989,
MaxTime: 1565166113989,
NumBlocks: 4,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1565133713989,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565133714000,
Value: 1022,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565133716000,
Value: 1023,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565144513989,
Value: 1022,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 1565155313989,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
{
Timestamp: 1565155314000,
Value: 3,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
{
Timestamp: 1565166113989,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{ // For https://github.com/prometheus/prometheus/issues/8476.
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1021 0
http_requests_total{code="200"} 1022 7199
http_requests_total{code="400"} 1023 0
http_requests_total{code="400"} 1024 7199
# EOF
`,
IsOk: true,
Description: "One series spanning 2h in same block should not cause problems to other series.",
MaxSamplesInAppender: 1,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 0,
MaxTime: 7199000,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 0,
Value: 1021,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 7199000,
Value: 1022,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
},
{
Timestamp: 0,
Value: 1023,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
{
Timestamp: 7199000,
Value: 1024,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
},
},
},
},
{
ToParse: `no_help_no_type{foo="bar"} 42 6900
# EOF
`,
IsOk: true,
Description: "Sample with no #HELP or #TYPE keyword.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 6900000,
MaxTime: 6900000,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 6900000,
Value: 42,
Labels: labels.FromStrings("__name__", "no_help_no_type", "foo", "bar"),
},
},
},
},
{
ToParse: `no_newline_after_eof 42 6900
# EOF`,
IsOk: true,
Description: "Sample without newline after # EOF.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 6900000,
MaxTime: 6900000,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 6900000,
Value: 42,
Labels: labels.FromStrings("__name__", "no_newline_after_eof"),
},
},
},
},
{
ToParse: `bare_metric 42.24 1001
# EOF
`,
IsOk: true,
Description: "Bare sample.",
MaxSamplesInAppender: 5000,
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1001000,
MaxTime: 1001000,
NumBlocks: 1,
BlockDuration: tsdb.DefaultBlockDuration,
Samples: []backfillSample{
{
Timestamp: 1001000,
Value: 42.24,
Labels: labels.FromStrings("__name__", "bare_metric"),
},
},
},
},
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1 1624463088.000
http_requests_total{code="200"} 2 1629503088.000
http_requests_total{code="200"} 3 1629863088.000
# EOF
`,
IsOk: true,
Description: "Sample with external labels.",
MaxSamplesInAppender: 5000,
MaxBlockDuration: 2048 * time.Hour,
Labels: map[string]string{"cluster_id": "123", "org_id": "999"},
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1624463088000,
MaxTime: 1629863088000,
NumBlocks: 2,
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
Samples: []backfillSample{
{
Timestamp: 1624463088000,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
{
Timestamp: 1629503088000,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
{
Timestamp: 1629863088000,
Value: 3,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
},
},
},
{
ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
# TYPE rpc_duration_seconds summary
rpc_duration_seconds{quantile="0.01"} 3102
rpc_duration_seconds{quantile="0.05"} 3272
# EOF
`,
IsOk: false,
Description: "Does not have timestamp.",
},
{
ToParse: `# HELP bad_metric This a bad metric
# TYPE bad_metric bad_type
bad_metric{type="has a bad type information"} 0.0 111
# EOF
`,
IsOk: false,
Description: "Has a bad type information.",
},
{
ToParse: `# HELP no_nl This test has no newline so will fail
# TYPE no_nl gauge
no_nl{type="no newline"}
# EOF
`,
IsOk: false,
Description: "No newline.",
},
{
ToParse: `# HELP no_eof This test has no EOF so will fail
# TYPE no_eof gauge
no_eof 1 1
`,
IsOk: false,
Description: "No EOF.",
},
{
ToParse: `# HELP after_eof There is data after EOF.
# TYPE after_eof gauge
after_eof 1 1
# EOF
after_eof 1 2
`,
IsOk: false,
Description: "Data after EOF.",
},
}
for _, test := range tests {
t.Run(test.Description, func(t *testing.T) {
t.Parallel()
t.Logf("Test:%s", test.Description)
outputDir := t.TempDir()
err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels)
if !test.IsOk {
require.Error(t, err, test.Description)
return
}
require.NoError(t, err)
options := tsdb.DefaultOptions()
options.RetentionDuration = int64(10 * 365 * 24 * time.Hour / time.Millisecond) // maximum duration tests require a long retention
db, err := tsdb.Open(outputDir, nil, nil, options, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
testBlocks(t, db, test.Expected.MinTime, test.Expected.MaxTime, test.Expected.BlockDuration, test.Expected.Samples, test.Expected.NumBlocks)
})
}
} | go | github | https://github.com/prometheus/prometheus | cmd/promtool/backfill_test.go |
import _parca
import os
import os.path
from MatrixInfo import *
class alignment:
m = int()
g = int()
d = int()
data = list()
def __init__(self, c_al):
self.m = c_al.m
self.d = c_al.d
self.g = c_al.g
self.data = []
for a, b in c_al.data:
aa = int(a)
bb = int(b)
self.data += [(aa,bb)]
def _shift_matrix(matrix, shift):
r = dict()
for key in matrix.keys():
val = matrix[key]
val += shift
r[key] = int(val)
return r
_aligner = _parca.aligner(40, "parca-"+str(os.getpid()), 350)
def _utf8_matrix_keys(matrix):
return matrix
r = dict()
for a, b in matrix.keys():
val = matrix[a,b]
ua = a.encode('utf-8')
ub = b.encode('utf-8')
key = ua, ub
r[ua,ub] = val
return r
def set_score_matrix(matrix):
global _aligner
_aligner.set_score_matrix(_utf8_matrix_keys(matrix))
def process_direct_stage(sequence1, sequence2,
gep=1.0,
matrix=blosum62,
limit=40):
global _aligner
_aligner.init(limit, "parca-"+str(os.getpid()), 350)
shift = int(gep*2)
set_score_matrix(_shift_matrix(matrix,shift))
_aligner.process_direct_stage(sequence1,sequence2)
cnt = _aligner.result_count()
r = []
for i in range(0,cnt):
r += [_aligner.get_alignment_info(i)]
return r
def process_backward_stage(no):
global _aligner
return _aligner.get_alignment(no).data
def get_pareto_alignments(sequence1, sequence2,
gep=1.0,
matrix=blosum62,
limit=40):
global _aligner
process_direct_stage(sequence1,sequence2,gep,matrix,limit)
cnt = _aligner.result_count()
r = []
for i in range(0,cnt):
c_al = _aligner.get_alignment(i)
r += [alignment(c_al)]
return r
def set_temporary_directory(dirname):
global _aligner
normpath = os.path.abspath(dirname)
_aligner.set_temporary_directory(normpath.encode("utf-8"))
def get_last_error():
global _aligner
return _aligner.get_last_error()
# perform self-tests on import
#_aligner.selftest_matrix_1()
#_aligner.selftest_matrix_1()
#_aligner.selftest_matrix_1() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Tests for the teams API at the HTTP request level."""
import json
from datetime import datetime
import pytz
from dateutil import parser
import ddt
from elasticsearch.exceptions import ConnectionError
from mock import patch
from search.search_engine_base import SearchEngine
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models.signals import post_save
from django.utils import translation
from nose.plugins.attrib import attr
import unittest
from rest_framework.test import APITestCase, APIClient
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.factories import StaffFactory
from common.test.utils import skip_signal
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from util.testing import EventTestMixin
from .factories import CourseTeamFactory, LAST_ACTIVITY_AT
from ..models import CourseTeamMembership
from ..search_indexes import CourseTeamIndexer, CourseTeam, course_team_post_save_callback
from django_comment_common.models import Role, FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
@attr('shard_1')
class TestDashboard(SharedModuleStoreTestCase):
"""Tests for the Teams dashboard."""
test_password = "test"
NUM_TOPICS = 10
@classmethod
def setUpClass(cls):
super(TestDashboard, cls).setUpClass()
cls.course = CourseFactory.create(
teams_configuration={
"max_team_size": 10,
"topics": [
{
"name": "Topic {}".format(topic_id),
"id": topic_id,
"description": "Description for topic {}".format(topic_id)
}
for topic_id in range(cls.NUM_TOPICS)
]
}
)
def setUp(self):
"""
Set up tests
"""
super(TestDashboard, self).setUp()
# will be assigned to self.client by default
self.user = UserFactory.create(password=self.test_password)
self.teams_url = reverse('teams_dashboard', args=[self.course.id])
def test_anonymous(self):
"""Verifies that an anonymous client cannot access the team
dashboard, and is redirected to the login page."""
anonymous_client = APIClient()
response = anonymous_client.get(self.teams_url)
redirect_url = '{0}?next={1}'.format(settings.LOGIN_URL, self.teams_url)
self.assertRedirects(response, redirect_url)
def test_not_enrolled_not_staff(self):
""" Verifies that a student who is not enrolled cannot access the team dashboard. """
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_staff(self):
"""
Verifies that a user with global access who is not enrolled in the course can access the team dashboard.
"""
staff_user = UserFactory(is_staff=True, password=self.test_password)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=self.test_password)
response = staff_client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_not_staff(self):
"""
Verifies that a user without global access who is enrolled in the course can access the team dashboard.
"""
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_teams_not_enabled(self):
"""
Verifies that a user without global access who is enrolled in the course cannot access the team dashboard
if the teams feature is not enabled.
"""
course = CourseFactory.create()
teams_url = reverse('teams_dashboard', args=[course.id])
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(teams_url)
self.assertEqual(404, response.status_code)
@unittest.skip("Fix this - getting unreliable query counts")
def test_query_counts(self):
# Enroll in the course and log in
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
# Check the query count on the dashboard with no teams
with self.assertNumQueries(18):
self.client.get(self.teams_url)
# Create some teams
for topic_id in range(self.NUM_TOPICS):
team = CourseTeamFactory.create(
name=u"Team for topic {}".format(topic_id),
course_id=self.course.id,
topic_id=topic_id,
)
# Add the user to the last team
team.add_user(self.user)
# Check the query count on the dashboard again
with self.assertNumQueries(24):
self.client.get(self.teams_url)
def test_bad_course_id(self):
"""
Verifies expected behavior when course_id does not reference an existing course or is invalid.
"""
bad_org = "badorgxxx"
bad_team_url = self.teams_url.replace(self.course.id.org, bad_org)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
bad_team_url = bad_team_url.replace(bad_org, "invalid/course/id")
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create a course two
course_two = CourseFactory.create(
teams_configuration={
"max_team_size": 1,
"topics": [
{
"name": "Test topic for course two",
"id": 1,
"description": "Description for test topic for course two."
}
]
}
)
# Login and enroll user in both course course
self.client.login(username=self.user.username, password=self.test_password)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.user, course_id=course_two.id)
# Create teams in both courses
course_one_team = CourseTeamFactory.create(name="Course one team", course_id=self.course.id, topic_id=1)
course_two_team = CourseTeamFactory.create(name="Course two team", course_id=course_two.id, topic_id=1) # pylint: disable=unused-variable
# Check that initially list of user teams in course one is empty
course_one_teams_url = reverse('teams_dashboard', args=[self.course.id])
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
# Add user to a course one team
course_one_team.add_user(self.user)
# Check that list of user teams in course one is not empty, it is one now
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 1', response.content)
# Check that list of user teams in course two is still empty
course_two_teams_url = reverse('teams_dashboard', args=[course_two.id])
response = self.client.get(course_two_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
"""Base class for Team API test cases."""
test_password = 'password'
@classmethod
def setUpClass(cls):
with super(TeamAPITestCase, cls).setUpClassAndTestData():
teams_configuration_1 = {
'topics':
[
{
'id': 'topic_{}'.format(i),
'name': name,
'description': 'Description for topic {}.'.format(i)
} for i, name in enumerate([u'Sólar power', 'Wind Power', 'Nuclear Power', 'Coal Power'])
]
}
cls.test_course_1 = CourseFactory.create(
org='TestX',
course='TS101',
display_name='Test Course',
teams_configuration=teams_configuration_1
)
teams_configuration_2 = {
'topics':
[
{
'id': 'topic_5',
'name': 'Other Interests',
'description': 'Description for topic 5.'
},
{
'id': 'topic_6',
'name': 'Public Profiles',
'description': 'Description for topic 6.'
},
{
'id': 'Topic_6.5',
'name': 'Test Accessibility Topic',
'description': 'Description for Topic_6.5'
},
],
'max_team_size': 1
}
cls.test_course_2 = CourseFactory.create(
org='MIT',
course='6.002x',
display_name='Circuits',
teams_configuration=teams_configuration_2
)
@classmethod
def setUpTestData(cls):
super(TeamAPITestCase, cls).setUpTestData()
cls.topics_count = 4
cls.users = {
'staff': AdminFactory.create(password=cls.test_password),
'course_staff': StaffFactory.create(course_key=cls.test_course_1.id, password=cls.test_password)
}
cls.create_and_enroll_student(username='student_enrolled')
cls.create_and_enroll_student(username='student_enrolled_not_on_team')
cls.create_and_enroll_student(username='student_unenrolled', courses=[])
# Make this student a community TA.
cls.create_and_enroll_student(username='community_ta')
seed_permissions_roles(cls.test_course_1.id)
community_ta_role = Role.objects.get(name=FORUM_ROLE_COMMUNITY_TA, course_id=cls.test_course_1.id)
community_ta_role.users.add(cls.users['community_ta'])
# This student is enrolled in both test courses and is a member of a team in each course, but is not on the
# same team as student_enrolled.
cls.create_and_enroll_student(
courses=[cls.test_course_1, cls.test_course_2],
username='student_enrolled_both_courses_other_team'
)
# Make this student have a public profile
cls.create_and_enroll_student(
courses=[cls.test_course_2],
username='student_enrolled_public_profile'
)
profile = cls.users['student_enrolled_public_profile'].profile
profile.year_of_birth = 1970
profile.save()
# This student is enrolled in the other course, but not yet a member of a team. This is to allow
# course_2 to use a max_team_size of 1 without breaking other tests on course_1
cls.create_and_enroll_student(
courses=[cls.test_course_2],
username='student_enrolled_other_course_not_on_team'
)
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
cls.solar_team = CourseTeamFactory.create(
name=u'Sólar team',
course_id=cls.test_course_1.id,
topic_id='topic_0'
)
cls.wind_team = CourseTeamFactory.create(name='Wind Team', course_id=cls.test_course_1.id)
cls.nuclear_team = CourseTeamFactory.create(name='Nuclear Team', course_id=cls.test_course_1.id)
cls.another_team = CourseTeamFactory.create(name='Another Team', course_id=cls.test_course_2.id)
cls.public_profile_team = CourseTeamFactory.create(
name='Public Profile Team',
course_id=cls.test_course_2.id,
topic_id='topic_6'
)
cls.search_team = CourseTeamFactory.create(
name='Search',
description='queryable text',
country='GS',
language='to',
course_id=cls.test_course_2.id,
topic_id='topic_7'
)
cls.chinese_team = CourseTeamFactory.create(
name=u'著文企臺個',
description=u'共樣地面較,件展冷不護者這與民教過住意,國制銀產物助音是勢一友',
country='CN',
language='zh_HANS',
course_id=cls.test_course_2.id,
topic_id='topic_7'
)
cls.test_team_name_id_map = {team.name: team for team in (
cls.solar_team,
cls.wind_team,
cls.nuclear_team,
cls.another_team,
cls.public_profile_team,
cls.search_team,
cls.chinese_team,
)}
for user, course in [('staff', cls.test_course_1), ('course_staff', cls.test_course_1)]:
CourseEnrollment.enroll(
cls.users[user], course.id, check_access=True
)
# Django Rest Framework v3 requires us to pass a request to serializers
# that have URL fields. Since we're invoking this code outside the context
# of a request, we need to simulate that there's a request.
cls.solar_team.add_user(cls.users['student_enrolled'])
cls.nuclear_team.add_user(cls.users['student_enrolled_both_courses_other_team'])
cls.another_team.add_user(cls.users['student_enrolled_both_courses_other_team'])
cls.public_profile_team.add_user(cls.users['student_enrolled_public_profile'])
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
@classmethod
def create_and_enroll_student(cls, courses=None, username=None):
""" Creates a new student and enrolls that student in the course.
Adds the new user to the cls.users dictionary with the username as the key.
Returns the username once the user has been created.
"""
if username is not None:
user = UserFactory.create(password=cls.test_password, username=username)
else:
user = UserFactory.create(password=cls.test_password)
courses = courses if courses is not None else [cls.test_course_1]
for course in courses:
CourseEnrollment.enroll(user, course.id, check_access=True)
cls.users[user.username] = user
return user.username
def login(self, user):
"""Given a user string, logs the given user in.
Used for testing with ddt, which does not have access to self in
decorators. If user is 'student_inactive', then an inactive user will
be both created and logged in.
"""
if user == 'student_inactive':
student_inactive = UserFactory.create(password=self.test_password)
self.client.login(username=student_inactive.username, password=self.test_password)
student_inactive.is_active = False
student_inactive.save()
else:
self.client.login(username=self.users[user].username, password=self.test_password)
def make_call(self, url, expected_status=200, method='get', data=None, content_type=None, **kwargs):
"""Makes a call to the Team API at the given url with method and data.
If a user is specified in kwargs, that user is first logged in.
"""
user = kwargs.pop('user', 'student_enrolled_not_on_team')
if user:
self.login(user)
func = getattr(self.client, method)
if content_type:
response = func(url, data=data, content_type=content_type)
else:
response = func(url, data=data)
self.assertEqual(
expected_status,
response.status_code,
msg="Expected status {expected} but got {actual}: {content}".format(
expected=expected_status,
actual=response.status_code,
content=response.content,
)
)
if expected_status == 200:
return json.loads(response.content)
else:
return response
def get_teams_list(self, expected_status=200, data=None, no_course_id=False, **kwargs):
"""Gets the list of teams as the given user with data as query params. Verifies expected_status."""
data = data if data else {}
if 'course_id' not in data and not no_course_id:
data.update({'course_id': self.test_course_1.id})
return self.make_call(reverse('teams_list'), expected_status, 'get', data, **kwargs)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create and enroll user in both courses
user = self.create_and_enroll_student(
courses=[self.test_course_1, self.test_course_2],
username='test_user_enrolled_both_courses'
)
course_one_data = {'course_id': self.test_course_1.id, 'username': user}
course_two_data = {'course_id': self.test_course_2.id, 'username': user}
# Check that initially list of user teams in course one is empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 0)
# Add user to a course one team
self.solar_team.add_user(self.users[user])
# Check that list of user teams in course one is not empty now
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 1)
# Check that list of user teams in course two is still empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_two_data)
self.assertEqual(team_list['count'], 0)
def build_team_data(self, name="Test team", course=None, description="Filler description", **kwargs):
"""Creates the payload for creating a team. kwargs can be used to specify additional fields."""
data = kwargs
course = course if course else self.test_course_1
data.update({
'name': name,
'course_id': str(course.id),
'description': description,
})
return data
def post_create_team(self, expected_status=200, data=None, **kwargs):
"""Posts data to the team creation endpoint. Verifies expected_status."""
return self.make_call(reverse('teams_list'), expected_status, 'post', data, **kwargs)
def get_team_detail(self, team_id, expected_status=200, data=None, **kwargs):
"""Gets detailed team information for team_id. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'get', data, **kwargs)
def delete_team(self, team_id, expected_status, **kwargs):
"""Delete the given team. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'delete', **kwargs)
def patch_team_detail(self, team_id, expected_status, data=None, **kwargs):
"""Patches the team with team_id using data. Verifies expected_status."""
return self.make_call(
reverse('teams_detail', args=[team_id]),
expected_status,
'patch',
json.dumps(data) if data else None,
'application/merge-patch+json',
**kwargs
)
def get_topics_list(self, expected_status=200, data=None, **kwargs):
"""Gets the list of topics, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('topics_list'), expected_status, 'get', data, **kwargs)
def get_topic_detail(self, topic_id, course_id, expected_status=200, data=None, **kwargs):
"""Gets a single topic, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('topics_detail', kwargs={'topic_id': topic_id, 'course_id': str(course_id)}),
expected_status,
'get',
data,
**kwargs
)
def get_membership_list(self, expected_status=200, data=None, **kwargs):
"""Gets the membership list, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'get', data, **kwargs)
def post_create_membership(self, expected_status=200, data=None, **kwargs):
"""Posts data to the membership creation endpoint. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'post', data, **kwargs)
def get_membership_detail(self, team_id, username, expected_status=200, data=None, **kwargs):
"""Gets an individual membership record, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'get',
data,
**kwargs
)
def delete_membership(self, team_id, username, expected_status=200, **kwargs):
"""Deletes an individual membership record. Verifies expected_status."""
url = reverse('team_membership_detail', args=[team_id, username]) + '?admin=true'
return self.make_call(url, expected_status, 'delete', **kwargs)
def verify_expanded_public_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'bio', 'country', 'profile_image', 'time_zone', 'language_proficiencies']:
self.assertIn(field, user)
def verify_expanded_private_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'profile_image']:
self.assertIn(field, user)
for field in ['bio', 'country', 'time_zone', 'language_proficiencies']:
self.assertNotIn(field, user)
def verify_expanded_team(self, team):
"""Verifies that fields exist on the returned team json indicating that it is expanded."""
for field in ['id', 'name', 'course_id', 'topic_id', 'date_created', 'description']:
self.assertIn(field, team)
@ddt.ddt
class TestListTeamsAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team listing API endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestListTeamsAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
teams = self.get_teams_list(user=user, expected_status=status)
if status == 200:
self.assertEqual(3, teams['count'])
def test_missing_course_id(self):
self.get_teams_list(400, no_course_id=True)
def verify_names(self, data, status, names=None, **kwargs):
"""Gets a team listing with data as query params, verifies status, and then verifies team names if specified."""
teams = self.get_teams_list(data=data, expected_status=status, **kwargs)
if names is not None and 200 <= status < 300:
results = teams['results']
self.assertEqual(names, [team['name'] for team in results])
def test_filter_invalid_course_id(self):
self.verify_names({'course_id': 'no_such_course'}, 400)
def test_filter_course_id(self):
self.verify_names(
{'course_id': self.test_course_2.id},
200,
['Another Team', 'Public Profile Team', 'Search', u'著文企臺個'],
user='staff'
)
def test_filter_topic_id(self):
self.verify_names({'course_id': self.test_course_1.id, 'topic_id': 'topic_0'}, 200, [u'Sólar team'])
def test_filter_username(self):
self.verify_names({'course_id': self.test_course_1.id, 'username': 'student_enrolled'}, 200, [u'Sólar team'])
self.verify_names({'course_id': self.test_course_1.id, 'username': 'staff'}, 200, [])
@ddt.data(
(None, 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
('name', 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
# Note that "Nuclear Team" and "Solar team" have the same open_slots.
# "Solar team" comes first due to secondary sort by last_activity_at.
('open_slots', 200, ['Wind Team', u'Sólar team', 'Nuclear Team']),
# Note that "Wind Team" and "Nuclear Team" have the same last_activity_at.
# "Wind Team" comes first due to secondary sort by open_slots.
('last_activity_at', 200, [u'Sólar team', 'Wind Team', 'Nuclear Team']),
)
@ddt.unpack
def test_order_by(self, field, status, names):
# Make "Solar team" the most recently active team.
# The CourseTeamFactory sets the last_activity_at to a fixed time (in the past), so all of the
# other teams have the same last_activity_at.
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
solar_team = self.test_team_name_id_map[u'Sólar team']
solar_team.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
solar_team.save()
data = {'order_by': field} if field else {}
self.verify_names(data, status, names)
def test_order_by_with_text_search(self):
data = {'order_by': 'name', 'text_search': 'search'}
self.verify_names(data, 400, [])
self.assert_no_events_were_emitted()
@ddt.data((404, {'course_id': 'no/such/course'}), (400, {'topic_id': 'no_such_topic'}))
@ddt.unpack
def test_no_results(self, status, data):
self.get_teams_list(status, data)
def test_page_size(self):
result = self.get_teams_list(200, {'page_size': 2})
self.assertEquals(2, result['num_pages'])
def test_page(self):
result = self.get_teams_list(200, {'page_size': 1, 'page': 3})
self.assertEquals(3, result['num_pages'])
self.assertIsNone(result['next'])
self.assertIsNotNone(result['previous'])
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_teams_list(200, {'expand': 'user', 'topic_id': 'topic_0'})
self.verify_expanded_private_user(result['results'][0]['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_teams_list(
200,
{
'expand': 'user',
'topic_id': 'topic_6',
'course_id': self.test_course_2.id
},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['membership'][0]['user'])
@ddt.data(
('search', ['Search']),
('queryable', ['Search']),
('Tonga', ['Search']),
('Island', ['Search']),
('not-a-query', []),
('team', ['Another Team', 'Public Profile Team']),
(u'著文企臺個', [u'著文企臺個']),
)
@ddt.unpack
def test_text_search(self, text_search, expected_team_names):
def reset_search_index():
"""Clear out the search index and reindex the teams."""
CourseTeamIndexer.engine().destroy()
for team in self.test_team_name_id_map.values():
CourseTeamIndexer.index(team)
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
self.assert_event_emitted(
'edx.team.searched',
search_text=text_search,
topic_id=None,
number_of_results=len(expected_team_names)
)
# Verify that the searches still work for a user from a different locale
with translation.override('ar'):
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
def test_delete_removed_from_search(self):
team = CourseTeamFactory.create(
name=u'zoinks',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[team.name],
user='staff'
)
team.delete()
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[],
user='staff'
)
@ddt.ddt
class TestCreateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled_not_on_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.post_create_team(status, self.build_team_data(name="New Team"), user=user)
if status == 200:
self.verify_expected_team_id(team, 'new-team')
teams = self.get_teams_list(user=user)
self.assertIn("New Team", [team['name'] for team in teams['results']])
def _expected_team_id(self, team, expected_prefix):
""" Return the team id that we'd expect given this team data and this prefix. """
return expected_prefix + '-' + team['discussion_topic_id']
def verify_expected_team_id(self, team, expected_prefix):
""" Verifies that the team id starts with the specified prefix and ends with the discussion_topic_id """
self.assertIn('id', team)
self.assertIn('discussion_topic_id', team)
self.assertEqual(team['id'], self._expected_team_id(team, expected_prefix))
def test_naming(self):
new_teams = [
self.post_create_team(data=self.build_team_data(name=name), user=self.create_and_enroll_student())
for name in ["The Best Team", "The Best Team", "A really long team name"]
]
# Check that teams with the same name have unique IDs.
self.verify_expected_team_id(new_teams[0], 'the-best-team')
self.verify_expected_team_id(new_teams[1], 'the-best-team')
self.assertNotEqual(new_teams[0]['id'], new_teams[1]['id'])
# Verify expected truncation behavior with names > 20 characters.
self.verify_expected_team_id(new_teams[2], 'a-really-long-team-n')
@ddt.data((400, {
'name': 'Bad Course ID',
'course_id': 'no_such_course',
'description': "Filler Description"
}), (404, {
'name': "Non-existent course ID",
'course_id': 'no/such/course',
'description': "Filler Description"
}))
@ddt.unpack
def test_bad_course_data(self, status, data):
self.post_create_team(status, data)
def test_student_in_team(self):
response = self.post_create_team(
400,
data=self.build_team_data(
name="Doomed team",
course=self.test_course_1,
description="Overly ambitious student"
),
user='student_enrolled'
)
self.assertEqual(
"You are already in a team in this course.",
json.loads(response.content)["user_message"]
)
@ddt.data('staff', 'course_staff', 'community_ta')
def test_privileged_create_multiple_teams(self, user):
""" Privileged users can create multiple teams, even if they are already in one. """
# First add the privileged user to a team.
self.post_create_membership(
200,
self.build_membership_data(user, self.solar_team),
user=user
)
self.post_create_team(
data=self.build_team_data(
name="Another team",
course=self.test_course_1,
description="Privileged users are the best"
),
user=user
)
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
self.post_create_team(400, self.build_team_data(**kwargs))
def test_missing_name(self):
self.post_create_team(400, {
'course_id': str(self.test_course_1.id),
'description': "foobar"
})
def test_full_student_creator(self):
creator = self.create_and_enroll_student()
team = self.post_create_team(data=self.build_team_data(
name="Fully specified team",
course=self.test_course_1,
description="Another fantastic team",
topic_id='great-topic',
country='CA',
language='fr'
), user=creator)
# Verify the id (it ends with a unique hash, which is the same as the discussion_id).
self.verify_expected_team_id(team, 'fully-specified-team')
del team['id']
self.assert_event_emitted(
'edx.team.created',
team_id=self._expected_team_id(team, 'fully-specified-team'),
)
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self._expected_team_id(team, 'fully-specified-team'),
user_id=self.users[creator].id,
add_method='added_on_create'
)
# Remove date_created and discussion_topic_id because they change between test runs
del team['date_created']
del team['discussion_topic_id']
# Since membership is its own list, we want to examine this separately.
team_membership = team['membership']
del team['membership']
# verify that it's been set to a time today.
self.assertEqual(
parser.parse(team['last_activity_at']).date(),
datetime.utcnow().replace(tzinfo=pytz.utc).date()
)
del team['last_activity_at']
# Verify that the creating user gets added to the team.
self.assertEqual(len(team_membership), 1)
member = team_membership[0]['user']
self.assertEqual(member['username'], creator)
self.assertEqual(team, {
'name': 'Fully specified team',
'language': 'fr',
'country': 'CA',
'topic_id': 'great-topic',
'course_id': str(self.test_course_1.id),
'description': 'Another fantastic team'
})
@ddt.data('staff', 'course_staff', 'community_ta')
def test_membership_staff_creator(self, user):
# Verify that staff do not automatically get added to a team
# when they create one.
team = self.post_create_team(data=self.build_team_data(
name="New team",
course=self.test_course_1,
description="Another fantastic team",
), user=user)
self.assertEqual(team['membership'], [])
@ddt.ddt
class TestDetailTeamAPI(TeamAPITestCase):
"""Test cases for the team detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.get_team_detail(self.solar_team.team_id, status, user=user)
if status == 200:
self.assertEqual(team['description'], self.solar_team.description)
self.assertEqual(team['discussion_topic_id'], self.solar_team.discussion_topic_id)
self.assertEqual(parser.parse(team['last_activity_at']), LAST_ACTIVITY_AT)
def test_does_not_exist(self):
self.get_team_detail('no_such_team', 404)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_team_detail(self.solar_team.team_id, 200, {'expand': 'user'})
self.verify_expanded_private_user(result['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_team_detail(
self.public_profile_team.team_id,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['membership'][0]['user'])
@ddt.ddt
class TestDeleteTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team delete endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 204),
('course_staff', 204),
('community_ta', 204)
)
@ddt.unpack
def test_access(self, user, status):
self.delete_team(self.solar_team.team_id, status, user=user)
if status == 204:
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
def test_does_not_exist(self):
self.delete_team('nonexistent', 404)
def test_memberships_deleted(self):
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 1)
self.delete_team(self.solar_team.team_id, 204, user='staff')
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 0)
@ddt.ddt
class TestUpdateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team update endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestUpdateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
prev_name = self.solar_team.name
team = self.patch_team_detail(self.solar_team.team_id, status, {'name': 'foo'}, user=user)
if status == 200:
self.assertEquals(team['name'], 'foo')
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field='name',
old=prev_name,
new='foo'
)
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 404),
('staff', 404),
('course_staff', 404),
('community_ta', 404),
)
@ddt.unpack
def test_access_bad_id(self, user, status):
self.patch_team_detail("no_such_team", status, {'name': 'foo'}, user=user)
@ddt.data(
('id', 'foobar'),
('description', ''),
('country', 'no_such_country'),
('language', 'no_such_language')
)
@ddt.unpack
def test_bad_requests(self, key, value):
self.patch_team_detail(self.solar_team.team_id, 400, {key: value}, user='staff')
@ddt.data(('country', 'US'), ('language', 'en'), ('foo', 'bar'))
@ddt.unpack
def test_good_requests(self, key, value):
if hasattr(self.solar_team, key):
prev_value = getattr(self.solar_team, key)
self.patch_team_detail(self.solar_team.team_id, 200, {key: value}, user='staff')
if hasattr(self.solar_team, key):
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field=key,
old=prev_value,
new=value
)
def test_does_not_exist(self):
self.patch_team_detail('no_such_team', 404, user='staff')
@ddt.ddt
class TestListTopicsAPI(TeamAPITestCase):
"""Test cases for the topic listing endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topics = self.get_topics_list(status, {'course_id': self.test_course_1.id}, user=user)
if status == 200:
self.assertEqual(topics['count'], self.topics_count)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_key(self, course_id):
self.get_topics_list(404, {'course_id': course_id})
def test_without_course_id(self):
self.get_topics_list(400)
@ddt.data(
(None, 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
('name', 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
# Note that "Nuclear Power" and "Solar power" both have 2 teams. "Coal Power" and "Window Power"
# both have 0 teams. The secondary sort is alphabetical by name.
('team_count', 200, ['Nuclear Power', u'Sólar power', 'Coal Power', 'Wind Power'], 'team_count'),
('no_such_field', 400, [], None),
)
@ddt.unpack
def test_order_by(self, field, status, names, expected_ordering):
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Nuclear Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Nuclear Team 1', course_id=self.test_course_1.id, topic_id='topic_2'
)
CourseTeamFactory.create(
name=u'Nuclear Team 2', course_id=self.test_course_1.id, topic_id='topic_2'
)
data = {'course_id': self.test_course_1.id}
if field:
data['order_by'] = field
topics = self.get_topics_list(status, data)
if status == 200:
self.assertEqual(names, [topic['name'] for topic in topics['results']])
self.assertEqual(topics['sort_order'], expected_ordering)
def test_order_by_team_count_secondary(self):
"""
Ensure that the secondary sort (alphabetical) when primary sort is team_count
works across pagination boundaries.
"""
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Wind Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Wind Team 1', course_id=self.test_course_1.id, topic_id='topic_1'
)
CourseTeamFactory.create(
name=u'Wind Team 2', course_id=self.test_course_1.id, topic_id='topic_1'
)
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 1,
'order_by': 'team_count'
})
self.assertEqual(["Wind Power", u'Sólar power'], [topic['name'] for topic in topics['results']])
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 2,
'order_by': 'team_count'
})
self.assertEqual(["Coal Power", "Nuclear Power"], [topic['name'] for topic in topics['results']])
def test_pagination(self):
response = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
})
self.assertEqual(2, len(response['results']))
self.assertIn('next', response)
self.assertIn('previous', response)
self.assertIsNone(response['previous'])
self.assertIsNotNone(response['next'])
def test_default_ordering(self):
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
self.assertEqual(response['sort_order'], 'name')
def test_team_count(self):
"""Test that team_count is included for each topic"""
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
for topic in response['results']:
self.assertIn('team_count', topic)
if topic['id'] == u'topic_0':
self.assertEqual(topic['team_count'], 1)
else:
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestDetailTopicAPI(TeamAPITestCase):
"""Test cases for the topic detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topic = self.get_topic_detail('topic_0', self.test_course_1.id, status, user=user)
if status == 200:
for field in ('id', 'name', 'description'):
self.assertIn(field, topic)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_id(self, course_id):
self.get_topic_detail('topic_0', course_id, 404)
def test_invalid_topic_id(self):
self.get_topic_detail('no_such_topic', self.test_course_1.id, 404)
def test_topic_detail_with_caps_and_dot_in_id(self):
self.get_topic_detail('Topic_6.5', self.test_course_2.id, user='student_enrolled_public_profile')
def test_team_count(self):
"""Test that team_count is included with a topic"""
topic = self.get_topic_detail(topic_id='topic_0', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 1)
topic = self.get_topic_detail(topic_id='topic_1', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestListMembershipAPI(TeamAPITestCase):
"""Test cases for the membership list endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 200),
('student_enrolled_both_courses_other_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id}, user=user)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['user']['username'], self.users['student_enrolled'].username)
@ddt.data(
(None, 401, False),
('student_inactive', 401, False),
('student_unenrolled', 200, False),
('student_enrolled', 200, True),
('student_enrolled_both_courses_other_team', 200, True),
('staff', 200, True),
('course_staff', 200, True),
('community_ta', 200, True),
)
@ddt.unpack
def test_access_by_username(self, user, status, has_content):
membership = self.get_membership_list(status, {'username': self.users['student_enrolled'].username}, user=user)
if status == 200:
if has_content:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
else:
self.assertEqual(membership['count'], 0)
@ddt.data(
('student_enrolled_both_courses_other_team', 'TestX/TS101/Test_Course', 200, 'Nuclear Team'),
('student_enrolled_both_courses_other_team', 'MIT/6.002x/Circuits', 200, 'Another Team'),
('student_enrolled', 'TestX/TS101/Test_Course', 200, u'Sólar team'),
('student_enrolled', 'MIT/6.002x/Circuits', 400, ''),
)
@ddt.unpack
def test_course_filter_with_username(self, user, course_id, status, team_name):
membership = self.get_membership_list(
status,
{
'username': self.users[user],
'course_id': course_id
},
user=user
)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.test_team_name_id_map[team_name].team_id)
@ddt.data(
('TestX/TS101/Test_Course', 200),
('MIT/6.002x/Circuits', 400),
)
@ddt.unpack
def test_course_filter_with_team_id(self, course_id, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id, 'course_id': course_id})
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
def test_bad_course_id(self):
self.get_membership_list(404, {'course_id': 'no_such_course'})
def test_no_username_or_team_id(self):
self.get_membership_list(400, {})
def test_bad_team_id(self):
self.get_membership_list(404, {'team_id': 'no_such_team'})
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'user'})
self.verify_expanded_private_user(result['results'][0]['user'])
def test_expand_public_user(self):
result = self.get_membership_list(
200,
{'team_id': self.public_profile_team.team_id, 'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['user'])
def test_expand_team(self):
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'team'})
self.verify_expanded_team(result['results'][0]['team'])
@ddt.ddt
class TestCreateMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 404),
('student_enrolled_both_courses_other_team', 404),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.post_create_membership(
status,
self.build_membership_data('student_enrolled_not_on_team', self.solar_team),
user=user
)
if status == 200:
self.assertEqual(membership['user']['username'], self.users['student_enrolled_not_on_team'].username)
self.assertEqual(membership['team']['team_id'], self.solar_team.team_id)
memberships = self.get_membership_list(200, {'team_id': self.solar_team.team_id})
self.assertEqual(memberships['count'], 2)
add_method = 'joined_from_team_view' if user == 'student_enrolled_not_on_team' else 'added_by_another_user'
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled_not_on_team'].id,
add_method=add_method
)
else:
self.assert_no_events_were_emitted()
def test_no_username(self):
response = self.post_create_membership(400, {'team_id': self.solar_team.team_id})
self.assertIn('username', json.loads(response.content)['field_errors'])
def test_no_team(self):
response = self.post_create_membership(400, {'username': self.users['student_enrolled_not_on_team'].username})
self.assertIn('team_id', json.loads(response.content)['field_errors'])
def test_bad_team(self):
self.post_create_membership(
404,
self.build_membership_data_raw(self.users['student_enrolled'].username, 'no_such_team')
)
def test_bad_username(self):
self.post_create_membership(
404,
self.build_membership_data_raw('no_such_user', self.solar_team.team_id),
user='staff'
)
@ddt.data('student_enrolled', 'staff', 'course_staff')
def test_join_twice(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled', self.solar_team),
user=user
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
def test_join_second_team_in_course(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_both_courses_other_team', self.solar_team),
user='student_enrolled_both_courses_other_team'
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
@ddt.data('staff', 'course_staff')
def test_not_enrolled_in_team_course(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_unenrolled', self.solar_team),
user=user
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
def test_over_max_team_size_in_course_2(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_other_course_not_on_team', self.another_team),
user='student_enrolled_other_course_not_on_team'
)
self.assertIn('full', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
"""Test cases for the membership detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.get_membership_detail('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.get_membership_detail(self.solar_team.team_id, 'no_such_user', 404)
def test_no_membership(self):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled_not_on_team'].username,
404
)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'user'}
)
self.verify_expanded_private_user(result['user'])
def test_expand_public_user(self):
result = self.get_membership_detail(
self.public_profile_team.team_id,
self.users['student_enrolled_public_profile'].username,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['user'])
def test_expand_team(self):
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'team'}
)
self.verify_expanded_team(result['team'])
@ddt.ddt
class TestDeleteMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership deletion endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 404),
('student_enrolled', 204),
('staff', 204),
('course_staff', 204),
('community_ta', 204),
)
@ddt.unpack
def test_access(self, user, status):
self.delete_membership(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
if status == 204:
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='removed_by_admin'
)
else:
self.assert_no_events_were_emitted()
def test_leave_team(self):
"""
The key difference between this test and test_access above is that
removal via "Edit Membership" and "Leave Team" emit different events
despite hitting the same API endpoint, due to the 'admin' query string.
"""
url = reverse('team_membership_detail', args=[self.solar_team.team_id, self.users['student_enrolled'].username])
self.make_call(url, 204, 'delete', user='student_enrolled')
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='self_removal'
)
def test_bad_team(self):
self.delete_membership('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.delete_membership(self.solar_team.team_id, 'no_such_user', 404)
def test_missing_membership(self):
self.delete_membership(self.wind_team.team_id, self.users['student_enrolled'].username, 404)
class TestElasticSearchErrors(TeamAPITestCase):
"""Test that the Team API is robust to Elasticsearch connection errors."""
ES_ERROR = ConnectionError('N/A', 'connection error', {})
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_list_teams(self, __):
"""Test that text searches return a 503 when Elasticsearch is down.
The endpoint should still return 200 when a search is not supplied."""
self.get_teams_list(
expected_status=503,
data={'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
user='staff'
)
self.get_teams_list(
expected_status=200,
data={'course_id': self.test_course_1.id},
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_create_team(self, __):
"""Test that team creation is robust to Elasticsearch errors."""
self.post_create_team(
expected_status=200,
data=self.build_team_data(name='zoinks'),
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_delete_team(self, __):
"""Test that team deletion is robust to Elasticsearch errors."""
self.delete_team(self.wind_team.team_id, 204, user='staff')
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_patch_team(self, __):
"""Test that team updates are robust to Elasticsearch errors."""
self.patch_team_detail(
self.wind_team.team_id,
200,
data={'description': 'new description'},
user='staff'
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/windows/rpc_terminal.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import os
import signal
import sys
from king_phisher import find
from king_phisher import serializers
from king_phisher import utilities
from king_phisher import version
from king_phisher.client import client_rpc
from king_phisher.client import dialogs
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
try:
from gi.repository import Vte
except ImportError:
has_vte = False
"""Whether the :py:mod:`Vte` module is available or not."""
else:
has_vte = True
__all__ = ('RPCTerminal', 'RPCTerminalAppWindow')
ZOOM_RATE = 0.2
class RPCTerminalAppWindow(gui_utilities.GladeGObject):
dependencies = gui_utilities.GladeDependencies(
children=(
'box_main',
'menu_edit',
'menu_help'
),
top_level=(
'StockDialogQuestionImage',
'StockHelpImage'
)
)
top_gobject = 'window'
def __init__(self, terminal, *args, **kwargs):
super(RPCTerminalAppWindow, self).__init__(*args, **kwargs)
self.terminal = terminal
self.child_pid = None
self.gobjects['box_main'].pack_end(self.terminal, True, True, 0)
if hasattr(self.terminal.props, 'rewrap_on_resize'):
self.terminal.set_property('rewrap-on-resize', True)
self.terminal.set_property('scroll-on-keystroke', True)
self.terminal.set_property('scrollback-lines', 2048)
def signal_menuitem_edit_copy(self, menuitem):
self.terminal.copy_clipboard()
def signal_menuitem_edit_paste(self, menuitem):
self.terminal.paste_clipboard()
def signal_menuitem_help_about(self, menuitem):
dialogs.AboutDialog(self.application).interact()
def signal_menuitem_help_api_docs(self, menuitem):
rpc_api_docs_url = "https://king-phisher.readthedocs.io/en/{0}/server/rpc_api.html".format('latest' if version.version_label in ('alpha', 'beta') else 'stable')
utilities.open_uri(rpc_api_docs_url)
def signal_menuitem_help_wiki(self, menuitem):
utilities.open_uri('https://github.com/securestate/king-phisher/wiki')
def signal_menuitem_view_zoom_in(self, menuitem):
font_scale = self.terminal.get_property('font-scale')
font_scale += font_scale * ZOOM_RATE
self.terminal.set_property('font-scale', font_scale)
def signal_menuitem_view_zoom_out(self, menuitem):
font_scale = self.terminal.get_property('font-scale')
font_scale = font_scale / (1.0 + ZOOM_RATE)
self.terminal.set_property('font-scale', font_scale)
def signal_menuitem_view_zoom_reset(self, menuitem):
self.terminal.set_property('font-scale', 1.0)
def signal_window_destroy(self, window):
if self.child_pid is None:
self.logger.error('signal_window_destory was called but the child pid is None')
return
if os.path.exists("/proc/{0}".format(self.child_pid)):
self.logger.debug("sending sigkill to child process: {0}".format(self.child_pid))
os.kill(self.child_pid, signal.SIGKILL)
class RPCTerminal(object):
"""
A terminal using VTE that allows raw RPC methods to be called from
within the King Phisher client. This is primarily useful for
unofficial and advanced features or debugging and development.
"""
def __init__(self, application):
"""
:param application: The application instance to which this window belongs.
:type application: :py:class:`.KingPhisherClientApplication`
"""
utilities.assert_arg_type(application, Gtk.Application, arg_pos=1)
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
if not has_vte:
gui_utilities.show_dialog_error('RPC Terminal Is Unavailable', self.application.get_active_window(), 'VTE is not installed')
return
config = application.config
self.terminal = Vte.Terminal()
self.rpc_window = RPCTerminalAppWindow(self.terminal, self.application)
rpc = self.application.rpc
config = {
'campaign_id': config['campaign_id'],
'campaign_name': config['campaign_name'],
'rpc_data': {
'address': (rpc.host, rpc.port),
'use_ssl': rpc.use_ssl,
'username': rpc.username,
'uri_base': rpc.uri_base,
'headers': rpc.headers
},
'user_data_path': self.application.user_data_path,
'user_library_path': self.application.user_library_path
}
module_path = os.path.dirname(client_rpc.__file__) + ((os.path.sep + '..') * client_rpc.__name__.count('.'))
module_path = os.path.normpath(module_path)
python_command = [
"import {0}".format(client_rpc.__name__),
"{0}.vte_child_routine('{1}')".format(client_rpc.__name__, serializers.JSON.dumps(config, pretty=False))
]
python_command = '; '.join(python_command)
if hasattr(self.terminal, 'pty_new_sync'):
# Vte._version >= 2.91
vte_pty = self.terminal.pty_new_sync(Vte.PtyFlags.DEFAULT)
self.terminal.set_pty(vte_pty)
self.terminal.connect('child-exited', lambda vt, status: self.rpc_window.window.destroy())
else:
# Vte._version <= 2.90
vte_pty = self.terminal.pty_new(Vte.PtyFlags.DEFAULT)
self.terminal.set_pty_object(vte_pty)
self.terminal.connect('child-exited', lambda vt: self.rpc_window.window.destroy())
child_pid, _, _, _ = GLib.spawn_async(
working_directory=os.getcwd(),
argv=[sys.executable, '-c', python_command],
envp=[
find.ENV_VAR + '=' + os.environ[find.ENV_VAR],
'DISPLAY=' + os.environ['DISPLAY'],
'PATH=' + os.environ['PATH'],
'PYTHONDONTWRITEBYTECODE=x',
'PYTHONPATH=' + module_path,
'TERM=' + os.environ.get('TERM', 'xterm')
],
flags=(GLib.SpawnFlags.SEARCH_PATH | GLib.SpawnFlags.DO_NOT_REAP_CHILD),
child_setup=self._child_setup,
user_data=vte_pty
)
self.logger.info("vte spawned child process with pid: {0}".format(child_pid))
self.child_pid = child_pid
self.terminal.watch_child(child_pid)
GLib.spawn_close_pid(child_pid)
self.rpc_window.window.show_all()
self.rpc_window.child_pid = child_pid
return
def _child_setup(self, vte_pty):
vte_pty.child_setup() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None | unknown | codeparrot/codeparrot-clean | ||
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tableofcontents.py
__version__=''' $Id$ '''
__doc__="""Experimental class to generate Tables of Contents easily
This module defines a single TableOfContents() class that can be used to
create automatically a table of tontents for Platypus documents like
this:
story = []
toc = TableOfContents()
story.append(toc)
# some heading paragraphs here...
doc = MyTemplate(path)
doc.multiBuild(story)
The data needed to create the table is a list of (level, text, pageNum)
triplets, plus some paragraph styles for each level of the table itself.
The triplets will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
(level, text, pageNum) = ...
self.notify('TOCEntry', (level, text, pageNum))
Optionally the list can contain four items in which case the last item
is a destination key which the entry should point to. A bookmark
with this key needs to be created first like this:
key = 'ch%s' % self.seq.nextf('chapter')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (level, text, pageNum, key))
As the table of contents need at least two passes over the Platypus
story which is why the moultiBuild0() method must be called.
The level<NUMBER>ParaStyle variables are the paragraph styles used
to format the entries in the table of contents. Their indentation
is calculated like this: each entry starts at a multiple of some
constant named delta. If one entry spans more than one line, all
lines after the first are indented by the same constant named
epsilon.
"""
from reportlab.lib import enums
from reportlab.lib.units import cm
from reportlab.lib.utils import commasplit, escapeOnce, encode_label, decode_label, strTypes
from reportlab.lib.styles import ParagraphStyle, _baseFontName
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.flowables import Spacer, Flowable
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
def unquote(txt):
from xml.sax.saxutils import unescape
return unescape(txt, {"'": "'", """: '"'})
try:
set
except:
class set(list):
def add(self,x):
if x not in self:
list.append(self,x)
def drawPageNumbers(canvas, style, pages, availWidth, availHeight, dot=' . '):
'''
Draws pagestr on the canvas using the given style.
If dot is None, pagestr is drawn at the current position in the canvas.
If dot is a string, pagestr is drawn right-aligned. If the string is not empty,
the gap is filled with it.
'''
pages.sort()
pagestr = ', '.join([str(p) for p, _ in pages])
x, y = canvas._curr_tx_info['cur_x'], canvas._curr_tx_info['cur_y']
fontSize = style.fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
#if it's too long to fit, we need to shrink to fit in 10% increments.
#it would be very hard to output multiline entries.
#however, we impose a minimum size of 1 point as we don't want an
#infinite loop. Ultimately we should allow a TOC entry to spill
#over onto a second line if needed.
freeWidth = availWidth-x
while pagestrw > freeWidth and fontSize >= 1.0:
fontSize = 0.9 * fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
if isinstance(dot, strTypes):
if dot:
dotw = stringWidth(dot, style.fontName, fontSize)
dotsn = int((availWidth-x-pagestrw)/dotw)
else:
dotsn = dotw = 0
text = '%s%s' % (dotsn * dot, pagestr)
newx = availWidth - dotsn*dotw - pagestrw
pagex = availWidth - pagestrw
elif dot is None:
text = ', ' + pagestr
newx = x
pagex = newx
else:
raise TypeError('Argument dot should either be None or an instance of basestring.')
tx = canvas.beginText(newx, y)
tx.setFont(style.fontName, fontSize)
tx.setFillColor(style.textColor)
tx.textLine(text)
canvas.drawText(tx)
commaw = stringWidth(', ', style.fontName, fontSize)
for p, key in pages:
if not key:
continue
w = stringWidth(str(p), style.fontName, fontSize)
canvas.linkRect('', key, (pagex, y, pagex+w, y+style.leading), relative=1)
pagex += w + commaw
# Default paragraph styles for tables of contents.
# (This could also be generated automatically or even
# on-demand if it is not known how many levels the
# TOC will finally need to display...)
delta = 1*cm
epsilon = 0.5*cm
defaultLevelStyles = [
ParagraphStyle(
name='Level 0',
fontName=_baseFontName,
fontSize=10,
leading=11,
firstLineIndent = 0,
leftIndent = epsilon)]
defaultTableStyle = \
TableStyle([
('VALIGN', (0,0), (-1,-1), 'TOP'),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0),
])
class TableOfContents(IndexingFlowable):
"""This creates a formatted table of contents.
It presumes a correct block of data is passed in.
The data block contains a list of (level, text, pageNumber)
triplets. You can supply a paragraph style for each level
(starting at zero).
Set dotsMinLevel to determine from which level on a line of
dots should be drawn between the text and the page number.
If dotsMinLevel is set to a negative value, no dotted lines are drawn.
"""
def __init__(self):
self.rightColumnWidth = 72
self.levelStyles = defaultLevelStyles
self.tableStyle = defaultTableStyle
self.dotsMinLevel = 1
self._table = None
self._entries = []
self._lastEntries = []
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries[:]
self.clearEntries()
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'TOCEntry' events only.
"""
if kind == 'TOCEntry':
self.addEntry(*stuff)
def clearEntries(self):
self._entries = []
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
try:
return self.levelStyles[n]
except IndexError:
prevstyle = self.getLevelStyle(n-1)
self.levelStyles.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+delta,
leftIndent = prevstyle.leftIndent+delta))
return self.levelStyles[n]
def addEntry(self, level, text, pageNum, key=None):
"""Adds one entry to the table of contents.
This allows incremental buildup by a doctemplate.
Requires that enough styles are defined."""
assert type(level) == type(1), "Level must be an integer"
self._entries.append((level, text, pageNum, key))
def addEntries(self, listOfEntries):
"""Bulk creation of entries in the table of contents.
If you knew the titles but not the page numbers, you could
supply them to get sensible output on the first run."""
for entryargs in listOfEntries:
self.addEntry(*entryargs)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0,'Placeholder for table of contents',0,None)]
else:
_tempEntries = self._lastEntries
def drawTOCEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
label = label.split(',')
page, level, key = int(label[0]), int(label[1]), eval(label[2],{})
style = self.getLevelStyle(level)
if self.dotsMinLevel >= 0 and level >= self.dotsMinLevel:
dot = ' . '
else:
dot = ''
drawPageNumbers(canvas, style, [(page, key)], availWidth, availHeight, dot)
self.canv.drawTOCEntryEnd = drawTOCEntryEnd
tableData = []
for (level, text, pageNum, key) in _tempEntries:
style = self.getLevelStyle(level)
if key:
text = '<a href="#%s">%s</a>' % (key, text)
keyVal = repr(key).replace(',','\\x2c').replace('"','\\x2c')
else:
keyVal = None
para = Paragraph('%s<onDraw name="drawTOCEntryEnd" label="%d,%d,%s"/>' % (text, pageNum, level, keyVal), style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
self._table = Table(tableData, colWidths=(availWidth,), style=self.tableStyle)
self.width, self.height = self._table.wrapOn(self.canv,availWidth, availHeight)
return (self.width, self.height)
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._table.splitOn(self.canv,availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._table.drawOn(canvas, x, y, _sW)
def makeTuple(x):
if hasattr(x, '__iter__'):
return tuple(x)
return (x,)
class SimpleIndex(IndexingFlowable):
"""Creates multi level indexes.
The styling can be cutomized and alphabetic headers turned on and off.
"""
def __init__(self, **kwargs):
"""
Constructor of SimpleIndex.
Accepts the same arguments as the setup method.
"""
#keep stuff in a dictionary while building
self._entries = {}
self._lastEntries = {}
self._flowable = None
self.setup(**kwargs)
def getFormatFunc(self,format):
try:
D = {}
exec('from reportlab.lib.sequencer import _format_%s as formatFunc' % format, D)
return D['formatFunc']
except ImportError:
raise ValueError('Unknown format %r' % format)
def setup(self, style=None, dot=None, tableStyle=None, headers=True, name=None, format='123', offset=0):
"""
This method makes it possible to change styling and other parameters on an existing object.
style is the paragraph style to use for index entries.
dot can either be None or a string. If it's None, entries are immediatly followed by their
corresponding page numbers. If it's a string, page numbers are aligned on the right side
of the document and the gap filled with a repeating sequence of the string.
tableStyle is the style used by the table which the index uses to draw itself. Use this to
change properties like spacing between elements.
headers is a boolean. If it is True, alphabetic headers are displayed in the Index when the first
letter changes. If False, we just output some extra space before the next item
name makes it possible to use several indexes in one document. If you want this use this
parameter to give each index a unique name. You can then index a term by refering to the
name of the index which it should appear in:
<index item="term" name="myindex" />
format can be 'I', 'i', '123', 'ABC', 'abc'
"""
if style is None:
style = ParagraphStyle(name='index',
fontName=_baseFontName,
fontSize=11)
self.textStyle = style
self.tableStyle = tableStyle or defaultTableStyle
self.dot = dot
self.headers = headers
if name is None:
from reportlab.platypus.paraparser import DEFAULT_INDEX_NAME as name
self.name = name
self.formatFunc = self.getFormatFunc(format)
self.offset = offset
def __call__(self,canv,kind,label):
try:
terms, format, offset = decode_label(label)
except:
terms = label
format = offset = None
if format is None:
formatFunc = self.formatFunc
else:
formatFunc = self.getFormatFunc(format)
if offset is None:
offset = self.offset
terms = commasplit(terms)
pns = formatFunc(canv.getPageNumber()-offset)
key = 'ix_%s_%s_p_%s' % (self.name, label, pns)
info = canv._curr_tx_info
canv.bookmarkHorizontal(key, info['cur_x'], info['cur_y'] + info['leading'])
self.addEntry(terms, pns, key)
def getCanvasMaker(self, canvasmaker=canvas.Canvas):
def newcanvasmaker(*args, **kwargs):
from reportlab.pdfgen import canvas
c = canvasmaker(*args, **kwargs)
setattr(c,self.name,self)
return c
return newcanvasmaker
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries.copy()
self.clearEntries()
def clearEntries(self):
self._entries = {}
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'IndexEntry' events only.
"""
if kind == 'IndexEntry':
(text, pageNum) = stuff
self.addEntry(text, pageNum)
def addEntry(self, text, pageNum, key=None):
"""Allows incremental buildup"""
self._entries.setdefault(makeTuple(text),set([])).add((pageNum, key))
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._flowable.splitOn(self.canv,availWidth, availHeight)
def _getlastEntries(self, dummy=[(['Placeholder for index'],enumerate((None,)*3))]):
'''Return the last run's entries! If there are none, returns dummy.'''
if not self._lastEntries:
if self._entries:
return list(self._entries.items())
return dummy
return list(self._lastEntries.items())
def _build(self,availWidth,availHeight):
_tempEntries = self._getlastEntries()
def getkey(seq):
return [x.upper() for x in seq[0]]
_tempEntries.sort(key=getkey)
leveloffset = self.headers and 1 or 0
def drawIndexEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
style = self.getLevelStyle(leveloffset)
pages = decode_label(label)
drawPageNumbers(canvas, style, pages, availWidth, availHeight, self.dot)
self.canv.drawIndexEntryEnd = drawIndexEntryEnd
alpha = ''
tableData = []
lastTexts = []
alphaStyle = self.getLevelStyle(0)
for texts, pageNumbers in _tempEntries:
texts = list(texts)
#track when the first character changes; either output some extra
#space, or the first letter on a row of its own. We cannot do
#widow/orphan control, sadly.
nalpha = texts[0][0].upper()
if alpha != nalpha:
alpha = nalpha
if self.headers:
header = alpha
else:
header = ' '
tableData.append([Spacer(1, alphaStyle.spaceBefore),])
tableData.append([Paragraph(header, alphaStyle),])
tableData.append([Spacer(1, alphaStyle.spaceAfter),])
i, diff = listdiff(lastTexts, texts)
if diff:
lastTexts = texts
texts = texts[i:]
label = encode_label(list(pageNumbers))
texts[-1] = '%s<onDraw name="drawIndexEntryEnd" label="%s"/>' % (texts[-1], label)
for text in texts:
#Platypus and RML differ on how parsed XML attributes are escaped.
#e.g. <index item="M&S"/>. The only place this seems to bite us is in
#the index entries so work around it here.
text = escapeOnce(text)
style = self.getLevelStyle(i+leveloffset)
para = Paragraph(text, style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
i += 1
self._flowable = Table(tableData, colWidths=[availWidth], style=self.tableStyle)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
self._build(availWidth,availHeight)
self.width, self.height = self._flowable.wrapOn(self.canv,availWidth, availHeight)
return self.width, self.height
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._flowable.drawOn(canvas, x, y, _sW)
def draw(self):
t = self._flowable
ocanv = getattr(t,'canv',None)
if not ocanv:
t.canv = self.canv
try:
t.draw()
finally:
if not ocanv:
del t.canv
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
if not hasattr(self.textStyle, '__iter__'):
self.textStyle = [self.textStyle]
try:
return self.textStyle[n]
except IndexError:
self.textStyle = list(self.textStyle)
prevstyle = self.getLevelStyle(n-1)
self.textStyle.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+.2*cm,
leftIndent = prevstyle.leftIndent+.2*cm))
return self.textStyle[n]
AlphabeticIndex = SimpleIndex
def listdiff(l1, l2):
m = min(len(l1), len(l2))
for i in range(m):
if l1[i] != l2[i]:
return i, l2[i:]
return m, l2[m:]
class ReferenceText(IndexingFlowable):
"""Fakery to illustrate how a reference would work if we could
put it in a paragraph."""
def __init__(self, textPattern, targetKey):
self.textPattern = textPattern
self.target = targetKey
self.paraStyle = ParagraphStyle('tmp')
self._lastPageNum = None
self._pageNum = -999
self._para = None
def beforeBuild(self):
self._lastPageNum = self._pageNum
def notify(self, kind, stuff):
if kind == 'Target':
(key, pageNum) = stuff
if key == self.target:
self._pageNum = pageNum
def wrap(self, availWidth, availHeight):
text = self.textPattern % self._lastPageNum
self._para = Paragraph(text, self.paraStyle)
return self._para.wrap(availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
self._para.drawOn(canvas, x, y, _sW) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,msm8996-apcc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm clock controller for MSM8996 CPUs
maintainers:
- Loic Poulain <loic.poulain@linaro.org>
description: |
Qualcomm CPU clock controller for MSM8996 CPUs, clock 0 is for Power cluster
and clock 1 is for Perf cluster.
properties:
compatible:
enum:
- qcom,msm8996-apcc
reg:
maxItems: 1
'#clock-cells':
const: 1
clocks:
items:
- description: XO source
- description: SYS APCS AUX clock
clock-names:
items:
- const: xo
- const: sys_apcs_aux
required:
- compatible
- reg
- '#clock-cells'
- clocks
- clock-names
additionalProperties: false
examples:
- |
kryocc: clock-controller@6400000 {
compatible = "qcom,msm8996-apcc";
reg = <0x6400000 0x90000>;
#clock-cells = <1>;
clocks = <&xo_board>, <&apcs_glb>;
clock-names = "xo", "sys_apcs_aux";
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/qcom,msm8996-apcc.yaml |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fastdraw'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None} | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\HttpCache;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\HttpCache\Esi;
use Symfony\Component\HttpKernel\HttpCache\HttpCache as BaseHttpCache;
use Symfony\Component\HttpKernel\HttpCache\Store;
use Symfony\Component\HttpKernel\HttpCache\StoreInterface;
use Symfony\Component\HttpKernel\HttpCache\SurrogateInterface;
use Symfony\Component\HttpKernel\KernelInterface;
/**
* Manages HTTP cache objects in a Container.
*
* @author Fabien Potencier <fabien@symfony.com>
*/
class HttpCache extends BaseHttpCache
{
protected ?string $cacheDir = null;
private ?StoreInterface $store = null;
private array $options;
/**
* @param $cache The cache directory (default used if null) or the storage instance
*/
public function __construct(
protected KernelInterface $kernel,
string|StoreInterface|null $cache = null,
private ?SurrogateInterface $surrogate = null,
?array $options = null,
) {
$this->options = $options ?? [];
if ($cache instanceof StoreInterface) {
$this->store = $cache;
} else {
$this->cacheDir = $cache;
}
if (null === $options && $kernel->isDebug()) {
$this->options = ['debug' => true];
}
if ($this->options['debug'] ?? false) {
$this->options += ['stale_if_error' => 0];
}
parent::__construct($kernel, $this->createStore(), $this->createSurrogate(), array_merge($this->options, $this->getOptions()));
}
protected function forward(Request $request, bool $catch = false, ?Response $entry = null): Response
{
$this->getKernel()->boot();
$this->getKernel()->getContainer()->set('cache', $this);
return parent::forward($request, $catch, $entry);
}
/**
* Returns an array of options to customize the Cache configuration.
*/
protected function getOptions(): array
{
return [];
}
protected function createSurrogate(): SurrogateInterface
{
return $this->surrogate ?? new Esi();
}
protected function createStore(): StoreInterface
{
return $this->store ?? new Store($this->cacheDir ?: ($this->kernel->getShareDir() ?? $this->kernel->getCacheDir()).'/http_cache');
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/HttpCache/HttpCache.php |
def test_namedtuple():
from collections import namedtuple
assert namedtuple("Point", "x y")(1, 2).x == 1
assert namedtuple(u"Point", u"x y", verbose=False, rename=False)(1, 2).x == 1
assert namedtuple(b"Point", b"x y", verbose=True, rename=True)(1, 2).x == 1
assert namedtuple("Point", ["x", "y"])(1, 2).x == 1
assert namedtuple(u"Point", [u"x", u"y"])(1, 2).x == 1
assert namedtuple(b"Point", [b"x", b"y"])(1, 2).x == 1
assert namedtuple("Point", [b"x", u"y"])(1, 2).x == 1
Point = namedtuple('Point', 'x y')
p = Point(1, 2)
assert p == Point(1, 2)
assert p._replace(y=3.14).y == 3.14
assert p._asdict()['x'] == 1
assert p._fields == ('x', 'y')
assert p == (1, 2)
assert (p.x, p.y) == (1, 2)
assert p[0] + p[1] == 3
assert p.index(1) == 0
assert Point._make([1, 3.14]).y == 3.14
def test_deque():
from collections import deque
d = deque([2])
assert list(deque([1, 2, 3])) == [1, 2, 3]
assert list(deque([1, 2, 3], 2)) == [2, 3]
assert deque([1, 2, 3]).maxlen is None
assert deque([1, 2, 3], 2).maxlen == 2
d.append(3)
d.appendleft(1)
assert list(d) == [1, 2, 3]
d.clear()
assert len(d) == 0
d.extend([1, 2, 3])
d.extendleft([5, 3, 1])
assert d.count(3) == 2
assert list(d) == [1, 3, 5, 1, 2, 3]
assert d.pop() == 3
assert d.popleft() == 1
d.remove(5)
assert list(d) == [3, 1, 2]
d.reverse()
assert list(d) == [2, 1, 3]
d.rotate(1)
assert list(d) == [3, 2, 1]
d.rotate(-2)
assert list(d) == [1, 3, 2]
assert len(d) == 3
assert 3 in d
assert d[1] == 3
d[1] = 4
assert list(d) == [1, 4, 2]
assert list(reversed(d)) == [2, 4, 1]
def test_counter():
from collections import Counter
c = Counter()
assert Counter("abc") == {"a": 1, "b": 1, "c": 1}
assert Counter({"a": 1, "b": 2, "c": 3}) == {"a": 1, "b": 2, "c": 3}
assert Counter(a=1, b=2, c=3) == {"a": 1, "b": 2, "c": 3}
c["abc"] = 1
c["def"] = 2
assert c == {"abc": 1, "def": 2}
c["def"] = 0
assert c == {"abc": 1, "def": 0}
del c["def"]
assert c["ghi"] == 0
c.update({"ghi": 2})
assert list(c.elements()) == ["abc", "ghi", "ghi"]
c.update(["a", "a", "b", "a", "a", "a", "b", "a"])
assert c == {"abc": 1, "ghi": 2, "a": 6, "b": 2}
c.update(a=-3, b=-1)
assert c == {"abc": 1, "ghi": 2, "a": 3, "b": 1}
assert c.most_common(2) == [("a", 3), ("ghi", 2)]
assert c.most_common() == [("a", 3), ("ghi", 2), ("abc", 1), ("b", 1)]
c.subtract({"abc": 1, "ghi": -2, "a": 3, "b": -1})
assert c == {"abc": 0, "ghi": 4, "a": 0, "b": 2}
c = Counter(a=3, b=1)
d = Counter(a=1, b=2)
assert c + d == Counter({'a': 4, 'b': 3})
assert c - d == Counter({'a': 2})
assert c & d == Counter({'a': 1, 'b': 1})
assert c | d == Counter({'a': 3, 'b': 2})
c = Counter(a=3, b=1)
c += d
assert c == Counter({'a': 4, 'b': 3})
c = Counter(a=3, b=1)
c -= d
assert c == Counter({'a': 2})
c = Counter(a=3, b=1)
c &= d
assert c == Counter({'a': 1, 'b': 1})
c = Counter(a=3, b=1)
c |= d
assert c == Counter({'a': 3, 'b': 2})
c = Counter(a=3, b=1)
c.subtract(["a", "b"])
assert c == {"a": 2, "b": 0}
def test_ordered_dict():
from collections import OrderedDict
od = OrderedDict([("a", 1), ("b", 2), ("c", 3), ("d", 4)])
assert od.popitem() == ("d", 4)
assert od == OrderedDict([("a", 1), ("b", 2), ("c", 3)])
assert od.popitem(last=False) == ("a", 1)
assert od == OrderedDict([("b", 2), ("c", 3)])
assert od.popitem(last=True) == ("c", 3)
assert od == OrderedDict([("b", 2)])
assert od == {"b": 2}
def test_defaultdict():
from collections import defaultdict
assert defaultdict() == {}
assert defaultdict(k1=1, k2=2) == {"k1": 1, "k2": 2}
assert defaultdict(lambda: 1) == {}
assert defaultdict(lambda: 2, {"k1": 1, "k2": 2}) == {"k1": 1, "k2": 2}
assert defaultdict(lambda: 3, [("k1", 1), ("k2", 2)]) == {"k1": 1, "k2": 2}
assert defaultdict(None) == {}
assert defaultdict(None, {"k1": 1, "k2": 2}) == {"k1": 1, "k2": 2}
assert defaultdict(None, [("k1", 1), ("k2", 2)]) == {"k1": 1, "k2": 2}
assert defaultdict(lambda: 4).__missing__("key") == 4
def test_abc():
from collections import (Container, Hashable, Iterable, Iterator, Sized, Callable, Sequence, MutableSequence, Set,
MutableSet, Mapping, MutableMapping, MappingView, ItemsView, KeysView, ValuesView)
assert [Container, Hashable, Iterable, Iterator, Sized, Callable, Sequence, MutableSequence, Set, MutableSet,
Mapping, MutableMapping, MappingView, ItemsView, KeysView, ValuesView] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
###############################################################################
#
# GetComments
# Returns all comments for a specific company update.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetComments(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetComments Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetComments, self).__init__(temboo_session, '/Library/LinkedIn/Companies/GetComments')
def new_input_set(self):
return GetCommentsInputSet()
def _make_result_set(self, result, path):
return GetCommentsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetCommentsChoreographyExecution(session, exec_id, path)
class GetCommentsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetComments
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by LinkedIn (AKA the Client ID).)
"""
super(GetCommentsInputSet, self)._set_input('APIKey', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process (AKA the OAuth User Secret).)
"""
super(GetCommentsInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process (AKA the OAuth User Token).)
"""
super(GetCommentsInputSet, self)._set_input('AccessToken', value)
def set_CompanyID(self, value):
"""
Set the value of the CompanyID input for this Choreo. ((required, integer) A LinkedIn assigned ID associated with the company.)
"""
super(GetCommentsInputSet, self)._set_input('CompanyID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(GetCommentsInputSet, self)._set_input('ResponseFormat', value)
def set_SecretKey(self, value):
"""
Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by LinkedIn (AKA the Client Secret).)
"""
super(GetCommentsInputSet, self)._set_input('SecretKey', value)
def set_UpdateKey(self, value):
"""
Set the value of the UpdateKey input for this Choreo. ((required, string) The key of the update to retrieve comments for.)
"""
super(GetCommentsInputSet, self)._set_input('UpdateKey', value)
class GetCommentsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetComments Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LinkedIn.)
"""
return self._output.get('Response', None)
class GetCommentsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetCommentsResultSet(response, path) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2024 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.utils.io
private const val DEVELOPMENT_MODE_KEY: String = "io.ktor.development"
internal actual val DEVELOPMENT_MODE: Boolean
get() = System.getProperty(DEVELOPMENT_MODE_KEY)?.toBoolean() == true | kotlin | github | https://github.com/ktorio/ktor | ktor-io/jvm/src/io/ktor/utils/io/ByteChannel.jvm.kt |
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
import sys
import ujson
def error(*args):
raise Exception('We cannot enqueue because settings.USING_RABBITMQ is False.')
class Command(BaseCommand):
help = """Read JSON lines from a file and enqueue them to a worker queue.
Each line in the file should either be a JSON payload or two tab-separated
fields, the second of which is a JSON payload. (The latter is to accomodate
the format of error files written by queue workers that catch exceptions--their
first field is a timestamp that we ignore.)
You can use "-" to represent stdin.
"""
def add_arguments(self, parser):
parser.add_argument('queue_name', metavar='<queue>', type=str,
help="name of worker queue to enqueue to")
parser.add_argument('file_name', metavar='<file>', type=str,
help="name of file containing JSON lines")
def handle(self, *args, **options):
queue_name = options['queue_name']
file_name = options['file_name']
if file_name == '-':
f = sys.stdin
else:
f = open(file_name)
while True:
line = f.readline()
if not line:
break
line = line.strip()
try:
payload = line.split('\t')[1]
except IndexError:
payload = line
print 'Queueing to queue %s: %s' % (queue_name, payload)
# Verify that payload is valid json.
data = ujson.loads(payload)
queue_json_publish(queue_name, data, error) | unknown | codeparrot/codeparrot-clean | ||
from nltk.parse.stanford import StanfordParser
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import math
class SentenceParser:
__parser = None
__alpha = 1.0
__beta = 1.0
__gamma = 0.1
__var_d = 0.0
__var_s = 0.0
def __init__(self):
self.__parser = StanfordParser()
self.__var_d = 12.0/math.log(2.0)
self.__var_s = 4.0 * 1.0/math.log(2)
def __parse_sent(self, sentence):
result = self.__parser.raw_parse(sentence)
return result.next()
def __obtain_nps(self, sentence):
parse_tree = self.__parse_sent(sentence)
nps = set()
for phrase in parse_tree.subtrees():
if phrase.label() != "NP": continue
nps.add(' '.join(phrase.leaves()))
sent_tokens = " ".join(parse_tree.leaves())
#Get the smallest NPs
nps_smallest = set()
for np1 in nps:
if all(np2 not in np1 for np2 in nps if np2 != np1):
nps_smallest.add(np1)
return sent_tokens, nps_smallest
def __gaussian_weight(self, distance, variance):
return math.exp(-0.5 * (distance**2)/variance)
def __weight_tokens(self, mid, nps, sentences, sent_id):
st = PorterStemmer()
sent_target = sentences[sent_id]
token_id = [idx for idx, token in enumerate(sent_target.strip().split(" ")) if mid in token][0]
sent_lengths= [len(s.split(" ")) for s in sentences]
nps_base = {np:" ".join(st.stem(token) for token in np.split(" ")) for np in nps}
nps_proc = {}
for sent_idx, sent in enumerate(sentences):
sent_stem = " ".join(st.stem(token) for token in sent.split(" "))
for np_ori, np in nps_base.iteritems():
if np_ori not in nps_proc: nps_proc[np_ori] = {}
if "dist_sent" not in nps_proc[np_ori] or abs(sent_idx - sent_id) < nps_proc[np_ori]["dist_sent"]:
#always update the info
if np not in sent_stem:
continue
np_idx = sent_stem.rindex(np)
np_token_idx= len(sent_target[:np_idx].strip().split(" "))
dist_start = len(sent_stem[:np_idx].strip().split(" "))
dist_end = len(sent_stem[np_idx+len(np):].strip().split(" "))
dist_sent = abs(sent_idx - sent_id)
dist_token = -1
if dist_sent == 0:
if mid in np_ori:
dist_token = 0
elif np_token_idx < token_id:
dist_token = token_id - np_token_idx - (len(np.split(" ")) - 1) - 1
elif np_token_idx > token_id:
dist_token = np_token_idx - token_id - 1
elif sent_idx < sent_id:
dist_token = dist_end + sum(sent_lengths[sent_idx+1:sent_id]) + token_id
elif sent_idx > sent_id:
dist_token = (len(sent_target.strip().split(" "))-1-token_id) + sum(sent_lengths[sent_id+1:sent_idx]) + dist_start
nps_proc[np_ori]["dist_sent"] = dist_sent
nps_proc[np_ori]["dist_token"] = dist_token
np_count = sent_stem.count(np)
nps_proc[np_ori]["tf"] = (nps_proc[np_ori].get("tf") or 0) + np_count
nps_weight = {}
for np, vals in nps_proc.iteritems():
term1 = self.__alpha * self.__gaussian_weight(vals["dist_token"], self.__var_d)
term2 = self.__beta * self.__gaussian_weight(vals["dist_sent"], self.__var_s)
term3 = self.__gamma * vals["tf"]
nps_weight[np] = (term1 + term2 + term3) / (self.__alpha + self.__beta + self.__gamma)
return nps_weight
def obtain_nps_from_sentences(self, mid, text):
lst_sentences = sent_tokenize(text)
lst_sent_pr = []
set_nps = set()
sent_match_id= -1
for sent_idx, sent in enumerate(lst_sentences):
if sent_match_id == -1 and mid in sent:
sent_match_id = sent_idx
sent_tokens, nps = self.__obtain_nps(sent)
lst_sent_pr.append(sent_tokens)
set_nps.update(nps)
dct_nps_weight = self.__weight_tokens(mid, set_nps, lst_sent_pr, sent_match_id)
return lst_sent_pr, dct_nps_weight | unknown | codeparrot/codeparrot-clean | ||
urlpatterns = []
class HandlerView:
@classmethod
def as_view(cls):
def view():
pass
return view
handler400 = HandlerView.as_view()
handler403 = HandlerView.as_view()
handler404 = HandlerView.as_view()
handler500 = HandlerView.as_view() | python | github | https://github.com/django/django | tests/check_framework/urls/bad_class_based_error_handlers.py |
from django.conf.urls import include, url
from . import views
from .tests import URLObject
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
newappobj1 = URLObject('newapp')
urlpatterns = [
url(r'^normal/$', views.empty_view, name='normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='normal-view'),
url(r'^resolver_match/$', views.pass_resolver_match_view, name='test-resolver-match'),
url(r'^\+\\\$\*/$', views.empty_view, name='special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', views.empty_view, name='no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance, name='view-class'),
url(r'^unnamed/normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view),
url(r'^unnamed/view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance),
url(r'^test1/', include(testobj1.urls)),
url(r'^test2/', include(testobj2.urls)),
url(r'^default/', include(default_testobj.urls)),
url(r'^other1/', include(otherobj1.urls)),
url(r'^other[246]/', include(otherobj2.urls)),
url(r'^newapp1/', include(newappobj1.app_urls, 'new-ns1')),
url(r'^new-default/', include(newappobj1.app_urls)),
url(r'^app-included[135]/', include('urlpatterns_reverse.included_app_urls', namespace='app-ns1')),
url(r'^app-included2/', include('urlpatterns_reverse.included_app_urls', namespace='app-ns2')),
url(r'^ns-included[135]/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
url(r'^ns-included2/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
url(r'^app-included/', include('urlpatterns_reverse.included_namespace_urls', 'inc-app', 'inc-app')),
url(r'^included/', include('urlpatterns_reverse.included_namespace_urls')),
url(r'^inc(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls', namespace='inc-ns5')),
url(r'^included/([0-9]+)/', include('urlpatterns_reverse.included_namespace_urls')),
url(
r'^ns-outer/(?P<outer>[0-9]+)/',
include('urlpatterns_reverse.included_namespace_urls', namespace='inc-outer')
),
url(r'^\+\\\$\*/', include('urlpatterns_reverse.namespace_urls', namespace='special')),
] | unknown | codeparrot/codeparrot-clean | ||
# orm/relationships.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from .. import sql, util, exc as sa_exc, schema, log
from .util import CascadeOptions, _orm_annotate, _orm_deannotate
from . import dependency
from . import attributes
from ..sql.util import (
ClauseAdapter,
join_condition, _shallow_annotate, visit_binary_product,
_deep_deannotate, selectables_overlap
)
from ..sql import operators, expression, visitors
from .interfaces import MANYTOMANY, MANYTOONE, ONETOMANY, StrategizedProperty, PropComparator
from ..inspection import inspect
from . import mapper as mapperlib
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"remote": True})
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"foreign": True})
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
See also:
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = 'relationship'
_dependency_processor = None
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None,
info=None):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship. The
constructed class is an instance of :class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper
initialization" time, which happens only when mappers are first used, and
is assumed to be after all mappings have been constructed. This can be
used to resolve order-of-declaration and other dependency issues, such as
if ``Child`` is declared below ``Parent`` in the same file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to :func:`.relationship`.
These string arguments are converted into callables that evaluate
the string as Python code, using the Declarative
class-registry as a namespace. This allows the lookup of related
classes to be automatic via their string name, and removes the need to
import related classes at all into the local module space::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and reference
documentation for :func:`.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing the
target of the relationship.
:paramref:`~.relationship.argument` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`.Table`.
In less common circumstances, the argument may also be specified
as an :class:`.Alias` construct, or even a :class:`.Join` construct.
:paramref:`~.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`.Table` that is
present in the :class:`.MetaData` collection associated with the
parent-mapped :class:`.Table`.
The :paramref:`~.relationship.secondary` keyword argument is typically
applied in the case where the intermediary :class:`.Table` is not
otherwise exprssed in any direct class mapping. If the "secondary" table
is also explicitly mapped elsewhere
(e.g. as in :ref:`association_pattern`), one should consider applying
the :paramref:`~.relationship.viewonly` flag so that this :func:`.relationship`
is not used for persistence operations which may conflict with those
of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using many-to-many
in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to :paramref:`~.relationship.secondary`
when composing association table relationships, allowing additional
attributes to be specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which in some
cases can enable complex :func:`.relationship` SQL conditions
to be used.
.. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works
more effectively when referring to a :class:`.Join` instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`.relationship`
configuration when using :paramref:`~.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as :paramref:`~.relationship.backref`,
except the complementing property is **not** created automatically,
and instead must be configured explicitly on the other mapper. The
complementing property should also indicate :paramref:`~.relationship.back_populates`
to this relationship to ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.backref` - alternative form
of backref specification.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`~.relationship.cascade_backrefs` option is used.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator` which
provides custom SQL clause generation for comparison operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the innermost
SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionadded:: 0.8.3 - :paramref:`~.relationship.distinct_target_key`
allows the
subquery eager loader to apply a DISTINCT modifier to the
innermost SELECT.
.. versionchanged:: 0.9.0 - :paramref:`~.relationship.distinct_target_key`
now defaults to ``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
.. deprecated:: 0.7 Please see :class:`.AttributeEvents`.
:param foreign_keys:
a list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`.relationship`
object's :paramref:`~.relationship.primaryjoin` condition.
That is, if the :paramref:`~.relationship.primaryjoin`
condition of this :func:`.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`.relationship` is ``b.a_id``.
In normal cases, the :paramref:`~.relationship.foreign_keys`
parameter is **not required.** :func:`.relationship` will
automatically determine which columns in the
:paramref:`~.relationship.primaryjoin` conditition are to be
considered "foreign key" columns based on those
:class:`.Column` objects that specify :class:`.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`.ForeignKeyConstraint` construct.
:paramref:`~.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`.relationship` to consider just those columns specified
here as "foreign".
.. versionchanged:: 0.8
A multiple-foreign key join ambiguity can be resolved by
setting the :paramref:`~.relationship.foreign_keys` parameter alone, without the
need to explicitly set :paramref:`~.relationship.primaryjoin` as well.
2. The :class:`.Table` being mapped does not actually have
:class:`.ForeignKey` or :class:`.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`~.relationship.primaryjoin` argument is used to construct a non-standard
join condition, which makes use of columns or expressions that do
not normally refer to their "parent" column, such as a join condition
expressed by a complex comparison using a SQL function.
The :func:`.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`~.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`.relationship` doesn't raise any exceptions, the
:paramref:`~.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`~.relationship.foreign_keys` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign" columns
within a :paramref:`~.relationship.primaryjoin` condition.
.. versionadded:: 0.8
The :func:`.foreign` annotation can also be applied
directly to the :paramref:`~.relationship.primaryjoin` expression, which is an alternate,
more specific system of describing which columns in a particular
:paramref:`~.relationship.primaryjoin` should be considered "foreign".
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not nullable,
or when the reference is one-to-one or a collection that is guaranteed
to have one or at least one entry.
If the joined-eager load is chained onto an existing LEFT OUTER JOIN,
``innerjoin=True`` will be bypassed and the join will continue to
chain as LEFT OUTER JOIN so that the results don't change. As an alternative,
specify the value ``"nested"``. This will instead nest the join
on the right side, e.g. using the form "a LEFT OUTER JOIN (b JOIN c)".
.. versionadded:: 0.9.4 Added ``innerjoin="nested"`` option to support
nesting of eager "inner" joins.
.. seealso::
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:paramref:`.joinedload.innerjoin` - loader option version
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is first
accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`~.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to a
subquery of the original statement, for each collection requested.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading` - Full documentation on relationship loader
configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it has
never been flushed. This may take effect for a pending object when
autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`~.relationship.load_on_pending` flag does not improve behavior
when the ORM is used normally - object references should be constructed
at the object level, not at the foreign key level, so that they
are present in an ordinary way before a flush proceeds. This flag
is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method establishes
"load on pending" behavior for the whole object, and also allows
loading on objects that remain transient or detached.
:param order_by:
indicates the ordering that should be applied when loading these
items. :paramref:`~.relationship.order_by` is expected to refer to one
of the :class:`.Column`
objects to which the target class is mapped, or
the attribute itself bound to the target class which refers
to the column.
:paramref:`~.relationship.order_by` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`~.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
a SQL expression that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or association
table).
:paramref:`~.relationship.primaryjoin` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`.relationship.remote_side` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
.. versionchanged:: 0.8
The :func:`.remote` annotation can also be applied
directly to the ``primaryjoin`` expression, which is an alternate,
more specific system of describing which columns in a particular
``primaryjoin`` should be considered "remote".
.. seealso::
:ref:`self_referential` - in-depth explaination of how
:paramref:`~.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the same
purpose as :paramref:`~.relationship.remote_side`, typically
when a custom :paramref:`~.relationship.primaryjoin` condition
is used.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic" relationship
loaders.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association and
child tables.
:paramref:`~.relationship.secondaryjoin` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The :func:`.relationship`
construct itself will raise an error instructing when this option
is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`~.relationship.single_parent` flag may be appropriate.
:param uselist:
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`~.relationship.uselist` to False.
The :paramref:`~.relationship.uselist` flag is also available on an
existing :func:`.relationship` construct as a read-only attribute, which
can be used to determine if this :func:`.relationship` deals with
collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to one"
relationship pattern, which is typically when the
:paramref:`~.relationship.uselist` flag is needed.
:param viewonly=False:
when set to True, the relationship is used only for loading objects,
and not for any persistence operation. A :func:`.relationship`
which specifies :paramref:`~.relationship.viewonly` can work
with a wider range of SQL operations within the :paramref:`~.relationship.primaryjoin`
condition, including operations that feature the use of
a variety of comparison operators as well as SQL functions such
as :func:`~.sql.expression.cast`. The :paramref:`~.relationship.viewonly`
flag is also of general use when defining any kind of :func:`~.relationship`
that doesn't represent the full set of related objects, to prevent
modifications of the collection from resulting in persistence operations.
"""
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
if strategy_class:
self.strategy_class = strategy_class
else:
self.strategy_class = self._strategy_lookup(("lazy", self.lazy))
self._reverse_property = set()
self.cascade = cascade if cascade is not False \
else "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief overview
of ORM level operator definition.
See also:
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(self, prop, parentmapper, adapt_to_entity=None, of_type=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(self.property, self._parentmapper,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type)
@util.memoized_property
def mapper(self):
"""The target :class:`.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type = inspect(self._of_type).mapper
else:
of_type = None
pj, sj, source, dest, \
secondary, target_adapter = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type=of_type)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self._parentmapper,
adapt_to_entity=self._adapt_to_entity,
of_type=cls)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple many-to-one, use '
'in_() against the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a colle"
"ction to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.property._optimized_compare(other,
adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = \
info.mapper, info.selectable, info.is_aliased_class
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter and not is_aliased_class:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{'no_replacement_traverse': True})
crit = j & sql.True_._ifnone(criterion)
ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest)
if secondary is not None:
ex = ex.correlate_except(secondary)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(other,
adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(x, unique=True, callable_=lambda: \
self.property.mapper._get_committed_attr_by_column(o, col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x == y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
mapper.\
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return sql.or_(*[x != None for x in
self.property._calculated_foreign_keys])
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return self.__negated_contains_or_equals(other)
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._lazy_strategy.lazy_clause(value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if not "merge" in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(dest_state,
dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def _value_as_iterable(self, state, dict_, key,
passive=attributes.PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, 'get_collection'):
return [
(attributes.instance_state(o), o) for o in
impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(self, type_, state, dict_,
visited_states, halt_on=None):
#assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(state, dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self._cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError('reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' % (key, self, other,
self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError('%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?'
% (other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if util.callable(self.argument) and \
not isinstance(self.argument, (type, mapperlib.Mapper)):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
mapper_ = mapperlib.class_mapper(argument,
configure=False)
elif isinstance(self.argument, mapperlib.Mapper):
mapper_ = argument
else:
raise sa_exc.ArgumentError("relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument)))
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`.
"""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_selectable=self.parent.mapped_table,
child_selectable=self.mapper.mapped_table,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.mapper.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped
)
self.primaryjoin = jc.deannotated_primaryjoin
self.secondaryjoin = jc.deannotated_secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
and not mapperlib.class_mapper(
self.parent.class_,
configure=False).has_property(self.key):
raise sa_exc.ArgumentError("Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " % (self.key,
self.parent.class_.__name__,
self.parent.class_.__name__))
# check for conflicting relationship() on superclass
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _get_cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if 'mapper' in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
cascade = property(_get_cascade, _set_cascade)
def _check_cascade_settings(self, cascade):
if cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
raise sa_exc.ArgumentError(
'On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
if self.passive_deletes == 'all' and \
("delete" in cascade or
"delete-orphan" in cascade):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
check = set(mapper.iterate_to_root()).\
union(mapper.self_and_descendants)
for m in check:
if m.has_property(backref_key):
raise sa_exc.ArgumentError("Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" % (backref_key,
self, m))
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop('primaryjoin',
self._join_condition.secondaryjoin_minus_local)
sj = kwargs.pop('secondaryjoin',
self._join_condition.primaryjoin_minus_local)
else:
pj = kwargs.pop('primaryjoin',
self._join_condition.primaryjoin_reverse_remote)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent, self.secondary,
pj, sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \
self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (primaryjoin, secondaryjoin, source_selectable,
dest_selectable, secondary, target_adapter)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(self,
parent_selectable,
child_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True
):
self.parent_selectable = parent_selectable
self.parent_local_selectable = parent_local_selectable
self.child_selectable = child_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info('%s setup primary join %s', self.prop,
self.primaryjoin)
log.info('%s setup secondary join %s', self.prop,
self.secondaryjoin)
log.info('%s synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
log.info('%s secondary synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
log.info('%s local/remote pairs [%s]', self.prop,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
log.info('%s remote columns [%s]', self.prop,
','.join('%s' % col for col in self.remote_columns)
)
log.info('%s local columns [%s]', self.prop,
','.join('%s' % col for col in self.local_columns)
)
log.info('%s relationship direction %s', self.prop,
self.direction)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = \
join_condition(
self.child_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
else:
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.child_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError("Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions."
% (self.prop, self.secondary))
else:
raise sa_exc.NoForeignKeysError("Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression."
% self.prop)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary))
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v['remote']
v['local'] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v['local']
v['remote'] = True
return element._with_annotations(v)
return visitors.replacement_traverse(
self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(self.primaryjoin,
values=("local", "remote"))
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin,
{},
check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin,
{},
check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and \
isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if "foreign" not in binary.left._annotations and \
"foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate(
{"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True})
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin,
{},
{"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_selectable
mt = self.child_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause) and \
isinstance(f, expression.ColumnClause) and \
pt.is_derived_from(c.table) and \
pt.is_derived_from(f.table) and \
mt.is_derived_from(c.table) and \
mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(self.parent_selectable, self.child_selectable)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(lambda col: "foreign" in col._annotations)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl)
def _annotate_selfref(self, fn):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
else:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument.")
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(binary.left,
binary.right)
binary.right, binary.left = proc_left_right(binary.right,
binary.left)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_selectable.c.contains_column(element) and \
(
not self.parent_local_selectable.c.\
contains_column(element)
or self.child_local_selectable.c.\
contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side."
% self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set([l for (l, r)
in self._local_remote_pairs])
else:
local_side = util.column_set(self.parent_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and \
elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError('Relationship %s could '
'not determine any unambiguous local/remote column '
'pairs based on join condition and remote_side '
'arguments. '
'Consider using the remote() annotation to '
'accurately mark those elements of the join '
'condition that are on the remote side of '
'the relationship.'
% (self.prop, ))
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign")
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
(not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = "Could not locate any simple equality expressions "\
"involving locally mapped foreign key columns for "\
"%s join condition "\
"'%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
" Ensure that referencing columns are associated "\
"with a ForeignKey or ForeignKeyConstraint, or are "\
"annotated in the join condition with the foreign() "\
"annotation. To allow comparison operators other than "\
"'==', the relationship can be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
err = "Could not locate any relevant foreign key columns "\
"for %s join condition '%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
' Ensure that referencing columns are associated '\
'with a ForeignKey or ForeignKeyConstraint, or are '\
'annotated in the join condition with the foreign() '\
'annotation.'
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_selectable.c)
targetcols = util.column_set(self.child_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = self.remote_columns.\
intersection(self.foreign_key_columns).\
difference(self_equated)
manytoone_local = self.local_columns.\
intersection(self.foreign_key_columns).\
difference(self_equated)
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError("Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate())
for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs)
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = \
self._deannotate_pairs(secondary_sync_pairs)
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
@util.memoized_property
def deannotated_primaryjoin(self):
return _deep_deannotate(self.primaryjoin)
@util.memoized_property
def deannotated_secondaryjoin(self):
if self.secondaryjoin is not None:
return _deep_deannotate(self.secondaryjoin)
else:
return None
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(
self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation)
)
return set([x._deannotate() for x in s])
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set([
col for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
])
def join_targets(self, source_selectable,
dest_selectable,
aliased,
single_crit=None):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse': True})
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.child_equivalents).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(source_selectable,
equivalents=self.parent_equivalents))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return primaryjoin, secondaryjoin, secondary, \
target_adapter, dest_selectable
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
lookup = util.column_dict()
equated_columns = util.column_dict()
being_replaced = set()
if reverse_direction and self.secondaryjoin is None:
for l, r in self.local_remote_pairs:
_list = lookup.setdefault(r, [])
_list.append((r, l))
equated_columns[l] = r
else:
# replace all "local side" columns, which is
# anything that isn't marked "remote"
being_replaced.update(self.local_columns)
for l, r in self.local_remote_pairs:
_list = lookup.setdefault(l, [])
_list.append((l, r))
equated_columns[r] = l
def col_to_bind(col):
if col in being_replaced or col in lookup:
if col in lookup:
for tobind, equated in lookup[col]:
if equated in binds:
return None
else:
assert not reverse_direction
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = self.deannotated_primaryjoin
if self.deannotated_secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if self.deannotated_secondaryjoin is not None:
secondaryjoin = self.deannotated_secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations | unknown | codeparrot/codeparrot-clean | ||
from sympy import S, Integral, sin, cos, pi, sqrt, symbols
from sympy.physics.vector import (Dyadic, Point, ReferenceFrame, \
Vector)
from sympy.physics.vector import (cross, dot, express, \
time_derivative, kinematic_equations, \
outer, partial_velocity, \
get_motion_params)
from sympy.physics.vector.functions import dynamicsymbols
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_dot():
assert dot(A.x, A.x) == 1
assert dot(A.x, A.y) == 0
assert dot(A.x, A.z) == 0
assert dot(A.y, A.x) == 0
assert dot(A.y, A.y) == 1
assert dot(A.y, A.z) == 0
assert dot(A.z, A.x) == 0
assert dot(A.z, A.y) == 0
assert dot(A.z, A.z) == 1
def test_dot_different_frames():
assert dot(N.x, A.x) == cos(q1)
assert dot(N.x, A.y) == -sin(q1)
assert dot(N.x, A.z) == 0
assert dot(N.y, A.x) == sin(q1)
assert dot(N.y, A.y) == cos(q1)
assert dot(N.y, A.z) == 0
assert dot(N.z, A.x) == 0
assert dot(N.z, A.y) == 0
assert dot(N.z, A.z) == 1
assert dot(N.x, A.x + A.y) == sqrt(2)*cos(q1 + pi/4) == dot(A.x + A.y, N.x)
assert dot(A.x, C.x) == cos(q3)
assert dot(A.x, C.y) == 0
assert dot(A.x, C.z) == sin(q3)
assert dot(A.y, C.x) == sin(q2)*sin(q3)
assert dot(A.y, C.y) == cos(q2)
assert dot(A.y, C.z) == -sin(q2)*cos(q3)
assert dot(A.z, C.x) == -cos(q2)*sin(q3)
assert dot(A.z, C.y) == sin(q2)
assert dot(A.z, C.z) == cos(q2)*cos(q3)
def test_cross():
assert cross(A.x, A.x) == 0
assert cross(A.x, A.y) == A.z
assert cross(A.x, A.z) == -A.y
assert cross(A.y, A.x) == -A.z
assert cross(A.y, A.y) == 0
assert cross(A.y, A.z) == A.x
assert cross(A.z, A.x) == A.y
assert cross(A.z, A.y) == -A.x
assert cross(A.z, A.z) == 0
def test_cross_different_frames():
assert cross(N.x, A.x) == sin(q1)*A.z
assert cross(N.x, A.y) == cos(q1)*A.z
assert cross(N.x, A.z) == -sin(q1)*A.x - cos(q1)*A.y
assert cross(N.y, A.x) == -cos(q1)*A.z
assert cross(N.y, A.y) == sin(q1)*A.z
assert cross(N.y, A.z) == cos(q1)*A.x - sin(q1)*A.y
assert cross(N.z, A.x) == A.y
assert cross(N.z, A.y) == -A.x
assert cross(N.z, A.z) == 0
assert cross(N.x, A.x) == sin(q1)*A.z
assert cross(N.x, A.y) == cos(q1)*A.z
assert cross(N.x, A.x + A.y) == sin(q1)*A.z + cos(q1)*A.z
assert cross(A.x + A.y, N.x) == -sin(q1)*A.z - cos(q1)*A.z
assert cross(A.x, C.x) == sin(q3)*C.y
assert cross(A.x, C.y) == -sin(q3)*C.x + cos(q3)*C.z
assert cross(A.x, C.z) == -cos(q3)*C.y
assert cross(C.x, A.x) == -sin(q3)*C.y
assert cross(C.y, A.x) == sin(q3)*C.x - cos(q3)*C.z
assert cross(C.z, A.x) == cos(q3)*C.y
def test_operator_match():
"""Test that the output of dot, cross, outer functions match
operator behavior.
"""
A = ReferenceFrame('A')
v = A.x + A.y
d = v | v
zerov = Vector(0)
zerod = Dyadic(0)
# dot products
assert d & d == dot(d, d)
assert d & zerod == dot(d, zerod)
assert zerod & d == dot(zerod, d)
assert d & v == dot(d, v)
assert v & d == dot(v, d)
assert d & zerov == dot(d, zerov)
assert zerov & d == dot(zerov, d)
raises(TypeError, lambda: dot(d, S(0)))
raises(TypeError, lambda: dot(S(0), d))
raises(TypeError, lambda: dot(d, 0))
raises(TypeError, lambda: dot(0, d))
assert v & v == dot(v, v)
assert v & zerov == dot(v, zerov)
assert zerov & v == dot(zerov, v)
raises(TypeError, lambda: dot(v, S(0)))
raises(TypeError, lambda: dot(S(0), v))
raises(TypeError, lambda: dot(v, 0))
raises(TypeError, lambda: dot(0, v))
# cross products
raises(TypeError, lambda: cross(d, d))
raises(TypeError, lambda: cross(d, zerod))
raises(TypeError, lambda: cross(zerod, d))
assert d ^ v == cross(d, v)
assert v ^ d == cross(v, d)
assert d ^ zerov == cross(d, zerov)
assert zerov ^ d == cross(zerov, d)
assert zerov ^ d == cross(zerov, d)
raises(TypeError, lambda: cross(d, S(0)))
raises(TypeError, lambda: cross(S(0), d))
raises(TypeError, lambda: cross(d, 0))
raises(TypeError, lambda: cross(0, d))
assert v ^ v == cross(v, v)
assert v ^ zerov == cross(v, zerov)
assert zerov ^ v == cross(zerov, v)
raises(TypeError, lambda: cross(v, S(0)))
raises(TypeError, lambda: cross(S(0), v))
raises(TypeError, lambda: cross(v, 0))
raises(TypeError, lambda: cross(0, v))
# outer products
raises(TypeError, lambda: outer(d, d))
raises(TypeError, lambda: outer(d, zerod))
raises(TypeError, lambda: outer(zerod, d))
raises(TypeError, lambda: outer(d, v))
raises(TypeError, lambda: outer(v, d))
raises(TypeError, lambda: outer(d, zerov))
raises(TypeError, lambda: outer(zerov, d))
raises(TypeError, lambda: outer(zerov, d))
raises(TypeError, lambda: outer(d, S(0)))
raises(TypeError, lambda: outer(S(0), d))
raises(TypeError, lambda: outer(d, 0))
raises(TypeError, lambda: outer(0, d))
assert v | v == outer(v, v)
assert v | zerov == outer(v, zerov)
assert zerov | v == outer(zerov, v)
raises(TypeError, lambda: outer(v, S(0)))
raises(TypeError, lambda: outer(S(0), v))
raises(TypeError, lambda: outer(v, 0))
raises(TypeError, lambda: outer(0, v))
def test_express():
assert express(Vector(0), N) == Vector(0)
assert express(S(0), N) == S(0)
assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z
assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \
sin(q2)*cos(q3)*C.z
assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \
cos(q2)*cos(q3)*C.z
assert express(A.x, N) == cos(q1)*N.x + sin(q1)*N.y
assert express(A.y, N) == -sin(q1)*N.x + cos(q1)*N.y
assert express(A.z, N) == N.z
assert express(A.x, A) == A.x
assert express(A.y, A) == A.y
assert express(A.z, A) == A.z
assert express(A.x, B) == B.x
assert express(A.y, B) == cos(q2)*B.y - sin(q2)*B.z
assert express(A.z, B) == sin(q2)*B.y + cos(q2)*B.z
assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z
assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \
sin(q2)*cos(q3)*C.z
assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \
cos(q2)*cos(q3)*C.z
# Check to make sure UnitVectors get converted properly
assert express(N.x, N) == N.x
assert express(N.y, N) == N.y
assert express(N.z, N) == N.z
assert express(N.x, A) == (cos(q1)*A.x - sin(q1)*A.y)
assert express(N.y, A) == (sin(q1)*A.x + cos(q1)*A.y)
assert express(N.z, A) == A.z
assert express(N.x, B) == (cos(q1)*B.x - sin(q1)*cos(q2)*B.y +
sin(q1)*sin(q2)*B.z)
assert express(N.y, B) == (sin(q1)*B.x + cos(q1)*cos(q2)*B.y -
sin(q2)*cos(q1)*B.z)
assert express(N.z, B) == (sin(q2)*B.y + cos(q2)*B.z)
assert express(N.x, C) == (
(cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*C.x -
sin(q1)*cos(q2)*C.y +
(sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*C.z)
assert express(N.y, C) == (
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x +
cos(q1)*cos(q2)*C.y +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z)
assert express(N.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z)
assert express(A.x, N) == (cos(q1)*N.x + sin(q1)*N.y)
assert express(A.y, N) == (-sin(q1)*N.x + cos(q1)*N.y)
assert express(A.z, N) == N.z
assert express(A.x, A) == A.x
assert express(A.y, A) == A.y
assert express(A.z, A) == A.z
assert express(A.x, B) == B.x
assert express(A.y, B) == (cos(q2)*B.y - sin(q2)*B.z)
assert express(A.z, B) == (sin(q2)*B.y + cos(q2)*B.z)
assert express(A.x, C) == (cos(q3)*C.x + sin(q3)*C.z)
assert express(A.y, C) == (sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
sin(q2)*cos(q3)*C.z)
assert express(A.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z)
assert express(B.x, N) == (cos(q1)*N.x + sin(q1)*N.y)
assert express(B.y, N) == (-sin(q1)*cos(q2)*N.x +
cos(q1)*cos(q2)*N.y + sin(q2)*N.z)
assert express(B.z, N) == (sin(q1)*sin(q2)*N.x -
sin(q2)*cos(q1)*N.y + cos(q2)*N.z)
assert express(B.x, A) == A.x
assert express(B.y, A) == (cos(q2)*A.y + sin(q2)*A.z)
assert express(B.z, A) == (-sin(q2)*A.y + cos(q2)*A.z)
assert express(B.x, B) == B.x
assert express(B.y, B) == B.y
assert express(B.z, B) == B.z
assert express(B.x, C) == (cos(q3)*C.x + sin(q3)*C.z)
assert express(B.y, C) == C.y
assert express(B.z, C) == (-sin(q3)*C.x + cos(q3)*C.z)
assert express(C.x, N) == (
(cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*N.x +
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*N.y -
sin(q3)*cos(q2)*N.z)
assert express(C.y, N) == (
-sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z)
assert express(C.z, N) == (
(sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*N.x +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*N.y +
cos(q2)*cos(q3)*N.z)
assert express(C.x, A) == (cos(q3)*A.x + sin(q2)*sin(q3)*A.y -
sin(q3)*cos(q2)*A.z)
assert express(C.y, A) == (cos(q2)*A.y + sin(q2)*A.z)
assert express(C.z, A) == (sin(q3)*A.x - sin(q2)*cos(q3)*A.y +
cos(q2)*cos(q3)*A.z)
assert express(C.x, B) == (cos(q3)*B.x - sin(q3)*B.z)
assert express(C.y, B) == B.y
assert express(C.z, B) == (sin(q3)*B.x + cos(q3)*B.z)
assert express(C.x, C) == C.x
assert express(C.y, C) == C.y
assert express(C.z, C) == C.z == (C.z)
# Check to make sure Vectors get converted back to UnitVectors
assert N.x == express((cos(q1)*A.x - sin(q1)*A.y), N)
assert N.y == express((sin(q1)*A.x + cos(q1)*A.y), N)
assert N.x == express((cos(q1)*B.x - sin(q1)*cos(q2)*B.y +
sin(q1)*sin(q2)*B.z), N)
assert N.y == express((sin(q1)*B.x + cos(q1)*cos(q2)*B.y -
sin(q2)*cos(q1)*B.z), N)
assert N.z == express((sin(q2)*B.y + cos(q2)*B.z), N)
"""
These don't really test our code, they instead test the auto simplification
(or lack thereof) of SymPy.
assert N.x == express((
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C.x -
sin(q1)*cos(q2)*C.y +
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C.z), N)
assert N.y == express((
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x +
cos(q1)*cos(q2)*C.y +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z), N)
assert N.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z), N)
"""
assert A.x == express((cos(q1)*N.x + sin(q1)*N.y), A)
assert A.y == express((-sin(q1)*N.x + cos(q1)*N.y), A)
assert A.y == express((cos(q2)*B.y - sin(q2)*B.z), A)
assert A.z == express((sin(q2)*B.y + cos(q2)*B.z), A)
assert A.x == express((cos(q3)*C.x + sin(q3)*C.z), A)
# Tripsimp messes up here too.
#print express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
# sin(q2)*cos(q3)*C.z), A)
assert A.y == express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
sin(q2)*cos(q3)*C.z), A)
assert A.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z), A)
assert B.x == express((cos(q1)*N.x + sin(q1)*N.y), B)
assert B.y == express((-sin(q1)*cos(q2)*N.x +
cos(q1)*cos(q2)*N.y + sin(q2)*N.z), B)
assert B.z == express((sin(q1)*sin(q2)*N.x -
sin(q2)*cos(q1)*N.y + cos(q2)*N.z), B)
assert B.y == express((cos(q2)*A.y + sin(q2)*A.z), B)
assert B.z == express((-sin(q2)*A.y + cos(q2)*A.z), B)
assert B.x == express((cos(q3)*C.x + sin(q3)*C.z), B)
assert B.z == express((-sin(q3)*C.x + cos(q3)*C.z), B)
"""
assert C.x == express((
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N.x +
(sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N.y -
sin(q3)*cos(q2)*N.z), C)
assert C.y == express((
-sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z), C)
assert C.z == express((
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N.x +
(sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N.y +
cos(q2)*cos(q3)*N.z), C)
"""
assert C.x == express((cos(q3)*A.x + sin(q2)*sin(q3)*A.y -
sin(q3)*cos(q2)*A.z), C)
assert C.y == express((cos(q2)*A.y + sin(q2)*A.z), C)
assert C.z == express((sin(q3)*A.x - sin(q2)*cos(q3)*A.y +
cos(q2)*cos(q3)*A.z), C)
assert C.x == express((cos(q3)*B.x - sin(q3)*B.z), C)
assert C.z == express((sin(q3)*B.x + cos(q3)*B.z), C)
def test_time_derivative():
#The use of time_derivative for calculations pertaining to scalar
#fields has been tested in test_coordinate_vars in test_essential.py
A = ReferenceFrame('A')
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
B = A.orientnew('B', 'Axis', [q, A.z])
d = A.x | A.x
assert time_derivative(d, B) == (-qd) * (A.y | A.x) + \
(-qd) * (A.x | A.y)
d1 = A.x | B.y
assert time_derivative(d1, A) == - qd*(A.x|B.x)
assert time_derivative(d1, B) == - qd*(A.y|B.y)
d2 = A.x | B.x
assert time_derivative(d2, A) == qd*(A.x|B.y)
assert time_derivative(d2, B) == - qd*(A.y|B.x)
d3 = A.x | B.z
assert time_derivative(d3, A) == 0
assert time_derivative(d3, B) == - qd*(A.y|B.z)
q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4')
q1d, q2d, q3d, q4d = dynamicsymbols('q1 q2 q3 q4', 1)
q1dd, q2dd, q3dd, q4dd = dynamicsymbols('q1 q2 q3 q4', 2)
C = B.orientnew('C', 'Axis', [q4, B.x])
v1 = q1 * A.z
v2 = q2*A.x + q3*B.y
v3 = q1*A.x + q2*A.y + q3*A.z
assert time_derivative(B.x, C) == 0
assert time_derivative(B.y, C) == - q4d*B.z
assert time_derivative(B.z, C) == q4d*B.y
assert time_derivative(v1, B) == q1d*A.z
assert time_derivative(v1, C) == - q1*sin(q)*q4d*A.x + \
q1*cos(q)*q4d*A.y + q1d*A.z
assert time_derivative(v2, A) == q2d*A.x - q3*qd*B.x + q3d*B.y
assert time_derivative(v2, C) == q2d*A.x - q2*qd*A.y + \
q2*sin(q)*q4d*A.z + q3d*B.y - q3*q4d*B.z
assert time_derivative(v3, B) == (q2*qd + q1d)*A.x + \
(-q1*qd + q2d)*A.y + q3d*A.z
assert time_derivative(d, C) == - qd*(A.y|A.x) + \
sin(q)*q4d*(A.z|A.x) - qd*(A.x|A.y) + sin(q)*q4d*(A.x|A.z)
def test_get_motion_methods():
#Initialization
t = dynamicsymbols._t
s1, s2, s3 = symbols('s1 s2 s3')
S1, S2, S3 = symbols('S1 S2 S3')
S4, S5, S6 = symbols('S4 S5 S6')
t1, t2 = symbols('t1 t2')
a, b, c = dynamicsymbols('a b c')
ad, bd, cd = dynamicsymbols('a b c', 1)
a2d, b2d, c2d = dynamicsymbols('a b c', 2)
v0 = S1*N.x + S2*N.y + S3*N.z
v01 = S4*N.x + S5*N.y + S6*N.z
v1 = s1*N.x + s2*N.y + s3*N.z
v2 = a*N.x + b*N.y + c*N.z
v2d = ad*N.x + bd*N.y + cd*N.z
v2dd = a2d*N.x + b2d*N.y + c2d*N.z
#Test position parameter
assert get_motion_params(frame = N) == (0, 0, 0)
assert get_motion_params(N, position=v1) == (0, 0, v1)
assert get_motion_params(N, position=v2) == (v2dd, v2d, v2)
#Test velocity parameter
assert get_motion_params(N, velocity=v1) == (0, v1, v1 * t)
assert get_motion_params(N, velocity=v1, position=v0, timevalue1=t1) == \
(0, v1, v0 + v1*(t - t1))
answer = get_motion_params(N, velocity=v1, position=v2, timevalue1=t1)
answer_expected = (0, v1, v1*t - v1*t1 + v2.subs(t, t1))
assert answer == answer_expected
answer = get_motion_params(N, velocity=v2, position=v0, timevalue1=t1)
integral_vector = Integral(a, (t, t1, t))*N.x + Integral(b, (t, t1, t))*N.y \
+ Integral(c, (t, t1, t))*N.z
answer_expected = (v2d, v2, v0 + integral_vector)
assert answer == answer_expected
#Test acceleration parameter
assert get_motion_params(N, acceleration=v1) == \
(v1, v1 * t, v1 * t**2/2)
assert get_motion_params(N, acceleration=v1, velocity=v0,
position=v2, timevalue1=t1, timevalue2=t2) == \
(v1, (v0 + v1*t - v1*t2),
-v0*t1 + v1*t**2/2 + v1*t2*t1 - \
v1*t1**2/2 + t*(v0 - v1*t2) + \
v2.subs(t, t1))
assert get_motion_params(N, acceleration=v1, velocity=v0,
position=v01, timevalue1=t1, timevalue2=t2) == \
(v1, v0 + v1*t - v1*t2,
-v0*t1 + v01 + v1*t**2/2 + \
v1*t2*t1 - v1*t1**2/2 + \
t*(v0 - v1*t2))
answer = get_motion_params(N, acceleration=a*N.x, velocity=S1*N.x,
position=S2*N.x, timevalue1=t1, timevalue2=t2)
i1 = Integral(a, (t, t2, t))
answer_expected = (a*N.x, (S1 + i1)*N.x, \
(S2 + Integral(S1 + i1, (t, t1, t)))*N.x)
assert answer == answer_expected
def test_kin_eqs():
q0, q1, q2, q3 = dynamicsymbols('q0 q1 q2 q3')
q0d, q1d, q2d, q3d = dynamicsymbols('q0 q1 q2 q3', 1)
u1, u2, u3 = dynamicsymbols('u1 u2 u3')
kds = kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'quaternion')
assert kds == [-0.5 * q0 * u1 - 0.5 * q2 * u3 + 0.5 * q3 * u2 + q1d,
-0.5 * q0 * u2 + 0.5 * q1 * u3 - 0.5 * q3 * u1 + q2d,
-0.5 * q0 * u3 - 0.5 * q1 * u2 + 0.5 * q2 * u1 + q3d,
0.5 * q1 * u1 + 0.5 * q2 * u2 + 0.5 * q3 * u3 + q0d]
def test_partial_velocity():
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
u4, u5 = dynamicsymbols('u4, u5')
r = symbols('r')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
C = Point('C')
C.set_vel(N, u4 * L.x + u5 * (Y.z ^ L.x))
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
vel_list = [Dmc.vel(N), C.vel(N), R.ang_vel_in(N)]
u_list = [u1, u2, u3, u4, u5]
assert (partial_velocity(vel_list, u_list, N) ==
[[- r*L.y, r*L.x, 0, L.x, cos(q2)*L.y - sin(q2)*L.z],
[0, 0, 0, L.x, cos(q2)*L.y - sin(q2)*L.z],
[L.x, L.y, L.z, 0, 0]]) | unknown | codeparrot/codeparrot-clean | ||
from pippi import dsp, tune
from hcj import snds, keys
key = 'g'
rhodes = snds.load('hcj/rhodes1.wav')
rhodes = dsp.transpose(rhodes, 16.0/15.0)
def chord(length, freqs, amp):
layers = [ keys.rhodes(length, freq, amp * dsp.rand(0.25, 0.5)) for freq in freqs ]
layers = [ dsp.pan(layer, dsp.rand()) for layer in layers ]
return dsp.mix(layers)
def makeStab(length, i):
freqs = tune.fromdegrees([ dsp.randchoose([1,2,3,4,5,6,8]) for _ in range(dsp.randint(2,4)) ], octave=3, root=key)
stab = chord(length, freqs, dsp.rand(0.25, 0.75))
stab = dsp.taper(stab, 40)
stab = dsp.fill(stab, length, silence=True)
return stab
def makePulse(length, i):
freqs = tune.fromdegrees([ dsp.randchoose([1,2,3,4,5,6,8]) for _ in range(dsp.randint(2,4)) ], octave=2, root=key)
pulse = chord(length, freqs, dsp.rand(0.5, 0.75))
pulse = dsp.taper(pulse, 40)
pulse = dsp.amp(pulse, dsp.rand(0.5, 1))
pulse = dsp.fill(pulse, length, silence=True)
return pulse
def makeLongChord(seg):
degrees = [ dsp.randint(1, 9) for _ in range(dsp.randint(2,4)) ]
long_chord = chord(sum(seg), [ freq * 2**dsp.randint(0, 5) for freq in tune.fromdegrees(degrees, octave=1, root=key) ], dsp.rand(0.15, 0.35))
long_chord = dsp.fill(long_chord, sum(seg))
return long_chord
def makeGlitch(length, i):
g = dsp.cut(long_chord, dsp.randint(0, dsp.flen(long_chord) - length), length)
g = dsp.alias(g)
g = dsp.fill(g, length)
return g | unknown | codeparrot/codeparrot-clean | ||
:host {
display: block;
}
.insert-container {
border: 1px solid #dddddd;
margin-top: 1em;
padding: 20px 20px 0px 20px;
font-weight: bold;
font-size: 20px;
opacity: 1;
transition: opacity 200ms ease-in;
@starting-style {
opacity: 0;
}
}
.deleting {
opacity: 0;
transform: translateY(20px);
transition:
opacity 500ms ease-out,
transform 500ms ease-out;
}
.toggle-btn {
background: transparent;
border: 1px solid var(--primary-contrast, black);
color: var(--primary-contrast, black);
padding: 10px 24px;
border-radius: 8px;
cursor: pointer;
} | css | github | https://github.com/angular/angular | adev/src/content/examples/animations/src/app/native-css/remove.css |
"""
This test file will run through some XBlock test scenarios regarding the
recommender system
"""
from copy import deepcopy
import json
import itertools
import StringIO
import unittest
from ddt import ddt, data
from nose.plugins.attrib import attr
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.lms_xblock.runtime import quote_slashes
class TestRecommender(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that Recommender state is saved properly
"""
STUDENTS = [
{'email': 'view@test.com', 'password': 'foo'},
{'email': 'view2@test.com', 'password': 'foo'}
]
XBLOCK_NAMES = ['recommender', 'recommender_second']
@classmethod
def setUpClass(cls):
# Nose runs setUpClass methods even if a class decorator says to skip
# the class: https://github.com/nose-devs/nose/issues/946
# So, skip the test class here if we are not in the LMS.
if settings.ROOT_URLCONF != 'lms.urls':
raise unittest.SkipTest('Test only valid in lms')
super(TestRecommender, cls).setUpClass()
cls.course = CourseFactory.create(
display_name='Recommender_Test_Course'
)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.chapter = ItemFactory.create(
parent=cls.course, display_name='Overview'
)
cls.section = ItemFactory.create(
parent=cls.chapter, display_name='Welcome'
)
cls.unit = ItemFactory.create(
parent=cls.section, display_name='New Unit'
)
cls.xblock = ItemFactory.create(
parent=cls.unit,
category='recommender',
display_name='recommender'
)
cls.xblock2 = ItemFactory.create(
parent=cls.unit,
category='recommender',
display_name='recommender_second'
)
cls.course_url = reverse(
'courseware_section',
kwargs={
'course_id': cls.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome',
}
)
cls.resource_urls = [
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/"
"SP13_Periodic_Trends_and_Bonding/"
),
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/SP13_Covalent_Bonding/"
)
]
cls.test_recommendations = {
cls.resource_urls[0]: {
"title": "Covalent bonding and periodic trends",
"url": cls.resource_urls[0],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage1.png"
),
"descriptionText": (
"short description for Covalent bonding "
"and periodic trends"
)
},
cls.resource_urls[1]: {
"title": "Polar covalent bonds and electronegativity",
"url": cls.resource_urls[1],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage2.png"
),
"descriptionText": (
"short description for Polar covalent "
"bonds and electronegativity"
)
}
}
def setUp(self):
super(TestRecommender, self).setUp()
for idx, student in enumerate(self.STUDENTS):
username = "u{}".format(idx)
self.create_account(username, student['email'], student['password'])
self.activate_user(student['email'])
self.staff_user = GlobalStaffFactory()
def get_handler_url(self, handler, xblock_name=None):
"""
Get url for the specified xblock handler
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
return reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(self.course.id.make_usage_key('recommender', xblock_name).to_deprecated_string()),
'handler': handler,
'suffix': ''
})
def enroll_student(self, email, password):
"""
Student login and enroll for the course
"""
self.login(email, password)
self.enroll(self.course, verify=True)
def enroll_staff(self, staff):
"""
Staff login and enroll for the course
"""
email = staff.email
password = 'test'
self.login(email, password)
self.enroll(self.course, verify=True)
def initialize_database_by_id(self, handler, resource_id, times, xblock_name=None):
"""
Call a ajax event (vote, delete, endorse) on a resource by its id
several times
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
for _ in range(times):
self.client.post(url, json.dumps({'id': resource_id}), '')
def call_event(self, handler, resource, xblock_name=None):
"""
Call a ajax event (add, edit, flag, etc.) by specifying the resource
it takes
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
return self.client.post(url, json.dumps(resource), '')
def check_event_response_by_key(self, handler, resource, resp_key, resp_val, xblock_name=None):
"""
Call the event specified by the handler with the resource, and check
whether the key (resp_key) in response is as expected (resp_val)
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
resp = json.loads(self.call_event(handler, resource, xblock_name).content)
self.assertEqual(resp[resp_key], resp_val)
self.assert_request_status_code(200, self.course_url)
def check_event_response_by_http_status(self, handler, resource, http_status_code, xblock_name=None):
"""
Call the event specified by the handler with the resource, and check
whether the http_status in response is as expected
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
resp = self.call_event(handler, resource, xblock_name)
self.assertEqual(resp.status_code, http_status_code)
self.assert_request_status_code(200, self.course_url)
@attr('shard_1')
class TestRecommenderCreateFromEmpty(TestRecommender):
"""
Check whether we can add resources to an empty database correctly
"""
def test_add_resource(self):
"""
Verify the addition of new resource is handled correctly
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Check whether adding new resource is successful
for resource_id, resource in self.test_recommendations.iteritems():
for xblock_name in self.XBLOCK_NAMES:
result = self.call_event('add_resource', resource, xblock_name)
expected_result = {
'upvotes': 0,
'downvotes': 0,
'id': resource_id
}
for field in resource:
expected_result[field] = resource[field]
self.assertDictEqual(json.loads(result.content), expected_result)
self.assert_request_status_code(200, self.course_url)
@attr('shard_1')
class TestRecommenderResourceBase(TestRecommender):
"""Base helper class for tests with resources."""
def setUp(self):
super(TestRecommenderResourceBase, self).setUp()
self.resource_id = self.resource_urls[0]
self.resource_id_second = self.resource_urls[1]
self.non_existing_resource_id = 'An non-existing id'
self.set_up_resources()
def set_up_resources(self):
"""
Set up resources and enroll staff
"""
self.logout()
self.enroll_staff(self.staff_user)
# Add resources, assume correct here, tested in test_add_resource
for resource, xblock_name in itertools.product(self.test_recommendations.values(), self.XBLOCK_NAMES):
self.call_event('add_resource', resource, xblock_name)
def generate_edit_resource(self, resource_id):
"""
Based on the given resource (specified by resource_id), this function
generate a new one for testing 'edit_resource' event
"""
resource = {"id": resource_id}
edited_recommendations = {
key: value + " edited" for key, value in self.test_recommendations[self.resource_id].iteritems()
}
resource.update(edited_recommendations)
return resource
@attr('shard_1')
class TestRecommenderWithResources(TestRecommenderResourceBase):
"""
Check whether we can add/edit/flag/export resources correctly
"""
def test_add_redundant_resource(self):
"""
Verify the addition of a redundant resource (url) is rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
self.check_event_response_by_http_status('add_resource', resource, 409)
def test_add_removed_resource(self):
"""
Verify the addition of a removed resource (url) is rejected
"""
self.call_event('remove_resource', {"id": self.resource_id, 'reason': ''})
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
self.check_event_response_by_http_status('add_resource', resource, 405)
def test_edit_resource_non_existing(self):
"""
Edit a non-existing resource
"""
self.check_event_response_by_http_status(
'edit_resource',
self.generate_edit_resource(self.non_existing_resource_id),
400
)
def test_edit_redundant_resource(self):
"""
Check whether changing the url to the one of 'another' resource is
rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
self.check_event_response_by_http_status('edit_resource', resource, 409)
def test_edit_removed_resource(self):
"""
Check whether changing the url to the one of a removed resource is
rejected
"""
self.call_event('remove_resource', {"id": self.resource_id_second, 'reason': ''})
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
self.check_event_response_by_http_status('edit_resource', resource, 405)
def test_edit_resource(self):
"""
Check whether changing the content of resource is successful
"""
self.check_event_response_by_http_status(
'edit_resource',
self.generate_edit_resource(self.resource_id),
200
)
def test_edit_resource_same_url(self):
"""
Check whether changing the content (except for url) of resource is successful
"""
resource = self.generate_edit_resource(self.resource_id)
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource['url'] = self.resource_id + suffix
self.check_event_response_by_http_status('edit_resource', resource, 200)
def test_edit_then_add_resource(self):
"""
Check whether we can add back an edited resource
"""
self.call_event('edit_resource', self.generate_edit_resource(self.resource_id))
# Test
self.check_event_response_by_key(
'add_resource',
self.test_recommendations[self.resource_id],
'id',
self.resource_id
)
def test_edit_resources_in_different_xblocks(self):
"""
Check whether changing the content of resource is successful in two
different xblocks
"""
resource = self.generate_edit_resource(self.resource_id)
for xblock_name in self.XBLOCK_NAMES:
self.check_event_response_by_http_status('edit_resource', resource, 200, xblock_name)
def test_flag_resource_wo_reason(self):
"""
Flag a resource as problematic, without providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': ''}
# Test
self.check_event_response_by_key('flag_resource', resource, 'reason', '')
def test_flag_resource_w_reason(self):
"""
Flag a resource as problematic, with providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
self.check_event_response_by_key('flag_resource', resource, 'reason', 'reason 0')
def test_flag_resource_change_reason(self):
"""
Flag a resource as problematic twice, with different reasons
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
# Test
resource['reason'] = 'reason 1'
resp = json.loads(self.call_event('flag_resource', resource).content)
self.assertEqual(resp['oldReason'], 'reason 0')
self.assertEqual(resp['reason'], 'reason 1')
self.assert_request_status_code(200, self.course_url)
def test_flag_resources_in_different_xblocks(self):
"""
Flag resources as problematic in two different xblocks
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
for xblock_name in self.XBLOCK_NAMES:
self.check_event_response_by_key('flag_resource', resource, 'reason', 'reason 0', xblock_name)
def test_flag_resources_by_different_users(self):
"""
Different users can't see the flag result of each other
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resp = json.loads(self.call_event('flag_resource', resource).content)
# The second user won't see the reason provided by the first user
self.assertNotIn('oldReason', resp)
self.assertEqual(resp['reason'], 'reason 0')
self.assert_request_status_code(200, self.course_url)
def test_export_resources(self):
"""
Test the function for exporting all resources from the Recommender.
"""
self.call_event('remove_resource', {"id": self.resource_id, 'reason': ''})
self.call_event('endorse_resource', {"id": self.resource_id_second, 'reason': ''})
# Test
resp = json.loads(self.call_event('export_resources', {}).content)
self.assertIn(self.resource_id_second, resp['export']['recommendations'])
self.assertNotIn(self.resource_id, resp['export']['recommendations'])
self.assertIn(self.resource_id_second, resp['export']['endorsed_recommendation_ids'])
self.assertIn(self.resource_id, resp['export']['removed_recommendations'])
self.assert_request_status_code(200, self.course_url)
@attr('shard_1')
@ddt
class TestRecommenderVoteWithResources(TestRecommenderResourceBase):
"""
Check whether we can vote resources correctly
"""
@data(
{'event': 'recommender_upvote'},
{'event': 'recommender_downvote'}
)
def test_vote_resource_non_existing(self, test_case):
"""
Vote a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'event': test_case['event']}
self.check_event_response_by_http_status('handle_vote', resource, 400)
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_once(self, test_case):
"""
Vote a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 0},
{'event': 'recommender_downvote', 'new_votes': 0}
)
def test_vote_resource_twice(self, test_case):
"""
Vote a resource twice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_thrice(self, test_case):
"""
Vote a resource thrice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
for _ in range(2):
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'event_second': 'recommender_downvote', 'new_votes': -1},
{'event': 'recommender_downvote', 'event_second': 'recommender_upvote', 'new_votes': 1}
)
def test_switch_vote_resource(self, test_case):
"""
Switch the vote of a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['event'] = test_case['event_second']
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_different_resources(self, test_case):
"""
Vote two different resources
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['id'] = self.resource_id_second
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resources_in_different_xblocks(self, test_case):
"""
Vote two resources in two different xblocks
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_key(
'handle_vote', resource, 'newVotes', test_case['new_votes'], self.XBLOCK_NAMES[1]
)
@data(
{'event': 'recommender_upvote', 'new_votes': 2},
{'event': 'recommender_downvote', 'new_votes': -2}
)
def test_vote_resource_by_different_users(self, test_case):
"""
Vote resource by two different users
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
self.check_event_response_by_key('handle_vote', resource, 'newVotes', test_case['new_votes'])
@attr('shard_1')
@ddt
class TestRecommenderStaffFeedbackWithResources(TestRecommenderResourceBase):
"""
Check whether we can remove/endorse resources correctly
"""
@data('remove_resource', 'endorse_resource')
def test_remove_or_endorse_resource_non_existing(self, test_case):
"""
Remove/endorse a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'reason': ''}
self.check_event_response_by_http_status(test_case, resource, 400)
@data(
{'times': 1, 'key': 'status', 'val': 'endorsement'},
{'times': 2, 'key': 'status', 'val': 'undo endorsement'},
{'times': 3, 'key': 'status', 'val': 'endorsement'}
)
def test_endorse_resource_multiple_times(self, test_case):
"""
Endorse a resource once/twice/thrice
"""
resource = {"id": self.resource_id, 'reason': ''}
for _ in range(test_case['times'] - 1):
self.call_event('endorse_resource', resource)
# Test
self.check_event_response_by_key('endorse_resource', resource, test_case['key'], test_case['val'])
@data(
{'times': 1, 'status': 200},
{'times': 2, 'status': 400},
{'times': 3, 'status': 400}
)
def test_remove_resource_multiple_times(self, test_case):
"""
Remove a resource once/twice/thrice
"""
resource = {"id": self.resource_id, 'reason': ''}
for _ in range(test_case['times'] - 1):
self.call_event('remove_resource', resource)
# Test
self.check_event_response_by_http_status('remove_resource', resource, test_case['status'])
@data(
{'handler': 'remove_resource', 'status': 200},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_remove_or_endorse_different_resources(self, test_case):
"""
Remove/endorse two different resources
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id_second, 'reason': ''}
if test_case['handler'] == 'remove_resource':
self.check_event_response_by_http_status(test_case['handler'], resource, test_case['status'])
else:
self.check_event_response_by_key(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'remove_resource', 'status': 200},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_remove_or_endorse_resources_in_different_xblocks(self, test_case):
"""
Remove/endorse two resources in two different xblocks
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id, 'reason': ''}
if test_case['handler'] == 'remove_resource':
self.check_event_response_by_http_status(
test_case['handler'], resource, test_case['status'], self.XBLOCK_NAMES[1]
)
else:
self.check_event_response_by_key(
test_case['handler'], resource, test_case['key'], test_case['val'], self.XBLOCK_NAMES[1]
)
@data(
{'handler': 'remove_resource', 'status': 400},
{'handler': 'endorse_resource', 'status': 400}
)
def test_remove_or_endorse_resource_by_student(self, test_case):
"""
Remove/endorse resource by a student
"""
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_http_status(test_case['handler'], resource, test_case['status'])
@attr('shard_1')
@ddt
class TestRecommenderFileUploading(TestRecommender):
"""
Check whether we can handle file uploading correctly
"""
def setUp(self):
super(TestRecommenderFileUploading, self).setUp()
self.initial_configuration = {
'flagged_accum_resources': {},
'endorsed_recommendation_reasons': [],
'endorsed_recommendation_ids': [],
'removed_recommendations': {},
'recommendations': self.test_recommendations[self.resource_urls[0]]
}
def attempt_upload_file_and_verify_result(self, test_case, event_name, content=None):
"""
Running on a test case, creating a temp file, uploading it by
calling the corresponding ajax event, and verifying that upload
happens or is rejected as expected.
"""
if 'magic_number' in test_case:
f_handler = StringIO.StringIO(test_case['magic_number'].decode('hex'))
elif content is not None:
f_handler = StringIO.StringIO(json.dumps(content, sort_keys=True))
else:
f_handler = StringIO.StringIO('')
f_handler.content_type = test_case['mimetypes']
f_handler.name = 'file' + test_case['suffixes']
url = self.get_handler_url(event_name)
resp = self.client.post(url, {'file': f_handler})
self.assertEqual(resp.status_code, test_case['status'])
self.assert_request_status_code(200, self.course_url)
@data(
{
'suffixes': '.csv',
'magic_number': 'ffff',
'mimetypes': 'text/plain',
'status': 415
}, # Upload file with wrong extension name
{
'suffixes': '.gif',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/gif',
'status': 415
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/jpeg',
'status': 415
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': '474946383761',
'mimetypes': 'image/png',
'status': 415
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '474946383761',
'mimetypes': 'image/jpeg',
'status': 415
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/png',
'status': 415
}, # Upload file with wrong magic number
{
'suffixes': '.gif',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/gif',
'status': 415
}
)
def test_upload_screenshot_wrong_file_type(self, test_case):
"""
Verify the file uploading fails correctly when file with wrong type
(extension/magic number) is provided
"""
self.enroll_staff(self.staff_user)
# Upload file with wrong extension name or magic number
self.attempt_upload_file_and_verify_result(test_case, 'upload_screenshot')
@data(
{
'suffixes': '.png',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/png',
'status': 200
},
{
'suffixes': '.gif',
'magic_number': '474946383961',
'mimetypes': 'image/gif',
'status': 200
},
{
'suffixes': '.gif',
'magic_number': '474946383761',
'mimetypes': 'image/gif',
'status': 200
},
{
'suffixes': '.jpg',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/jpeg',
'status': 200
}
)
def test_upload_screenshot_correct_file_type(self, test_case):
"""
Verify the file type checking in the file uploading method is
successful.
"""
self.enroll_staff(self.staff_user)
# Upload file with correct extension name and magic number
self.attempt_upload_file_and_verify_result(test_case, 'upload_screenshot')
@data(
{
'suffixes': '.json',
'mimetypes': 'application/json',
'status': 403
}
)
def test_import_resources_by_student(self, test_case):
"""
Test the function for importing all resources into the Recommender
by a student.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
self.attempt_upload_file_and_verify_result(test_case, 'import_resources', self.initial_configuration)
@data(
{
'suffixes': '.csv',
'mimetypes': 'application/json',
'status': 415
}, # Upload file with wrong extension name
{
'suffixes': '.json',
'mimetypes': 'application/json',
'status': 200
}
)
def test_import_resources(self, test_case):
"""
Test the function for importing all resources into the Recommender.
"""
self.enroll_staff(self.staff_user)
self.attempt_upload_file_and_verify_result(test_case, 'import_resources', self.initial_configuration)
@data(
{
'suffixes': '.json',
'mimetypes': 'application/json',
'status': 415
}
)
def test_import_resources_wrong_format(self, test_case):
"""
Test the function for importing empty dictionary into the Recommender.
This should fire an error.
"""
self.enroll_staff(self.staff_user)
self.attempt_upload_file_and_verify_result(test_case, 'import_resources', {}) | unknown | codeparrot/codeparrot-clean | ||
import { NextRequest, NextResponse } from "next/server";
import { revalidateTag } from "next/cache";
export async function POST(request: NextRequest) {
const requestHeaders = new Headers(request.headers);
const secret = requestHeaders.get("x-vercel-reval-key");
if (secret !== process.env.CONTENTFUL_REVALIDATE_SECRET) {
return NextResponse.json({ message: "Invalid secret" }, { status: 401 });
}
revalidateTag("posts");
return NextResponse.json({ revalidated: true, now: Date.now() });
} | typescript | github | https://github.com/vercel/next.js | examples/cms-contentful/app/api/revalidate/route.ts |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'complexity': "normal",
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships, membership products (schemes), etc.
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'update_xml': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo_xml': [],
'installable': True,
'auto_install': False,
'certificate': '0078696047261',
'images': ['images/association1.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
"""
Created on 22/02/2017
@author: jrkarki
"""
class Reanimator:
"""This class represents lifeless bodies that can be reanimated"""
def __init__(self, corpse, alive=False):
"""
:param corpse: The name of the corpse
:type corpse: str
:param alive: Is the corpse alive? Defaults to False.
:type alive: bool
"""
self.corps = corpse
self.alive = alive
def reanimate(self):
"""Reanimates a corpse, or raises a ValueError if it's already alive"""
if self.alive:
raise ValueError("Can't reanimate something that's alive")
self.alive = True
def __str__(self):
if self.alive:
return "{}... it's alive!!!".format(self.corps)
return "The lifeless body of {}".format(self.corps)
def __repr__(self):
return str(self)
def build_bodies(list_of_str):
"""Makes a corpse object for each name in the list
:param list_of_str: list of strings of names for corpses
:type list_of_str: iter of str
:return: A list of corpses
:rtype: list of Reanimator
"""
return [Reanimator(s) for s in list_of_str] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import image_metadata
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
import nova.image
ALIAS = 'image-metadata'
class ImageMetadataController(wsgi.Controller):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors((403, 404))
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@extensions.expected_errors((403, 404))
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.create)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
for key, value in six.iteritems(body['metadata']):
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.update)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
meta = body['meta']
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.update_all)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body['metadata']
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@extensions.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
class ImageMetadata(extensions.V21APIExtensionBase):
"""Image Metadata API."""
name = "ImageMetadata"
alias = ALIAS
version = 1
def get_resources(self):
parent = {'member_name': 'image',
'collection_name': 'images'}
resources = [extensions.ResourceExtension('metadata',
ImageMetadataController(),
member_name='image_meta',
parent=parent,
custom_routes_fn=
self.image_metadata_map
)]
return resources
def get_controller_extensions(self):
return []
def image_metadata_map(self, mapper, wsgi_resource):
mapper.connect("metadata",
"/{project_id}/images/{image_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']}) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019-2024 Tauri Programme within The Commons Conservancy
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
# These are supported funding model platforms
github: tauri-apps
patreon: #
open_collective: tauri
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
custom: # Replace with a single custom sponsorship URL | unknown | github | https://github.com/tauri-apps/tauri | .github/FUNDING.yml |
import asyncio
import traceback
import inspect
from collections.abc import Iterable
from itertools import groupby
from typing import Union, List, Any, Callable, Type
import time
import ray
from ray.async_compat import sync_to_async
from ray.serve.utils import (parse_request_item, _get_logger, chain_future,
unpack_future)
from ray.serve.exceptions import RayServeException
from ray.experimental import metrics
from ray.serve.config import BackendConfig
from ray.serve.router import Query
from ray.serve.constants import DEFAULT_LATENCY_BUCKET_MS
from ray.exceptions import RayTaskError
logger = _get_logger()
class BatchQueue:
def __init__(self, max_batch_size: int, timeout_s: float) -> None:
self.queue = asyncio.Queue()
self.full_batch_event = asyncio.Event()
self.max_batch_size = max_batch_size
self.timeout_s = timeout_s
def set_config(self, max_batch_size: int, timeout_s: float) -> None:
self.max_batch_size = max_batch_size
self.timeout_s = timeout_s
def put(self, request: Query) -> None:
self.queue.put_nowait(request)
# Signal when the full batch is ready. The event will be reset
# in wait_for_batch.
if self.queue.qsize() == self.max_batch_size:
self.full_batch_event.set()
def qsize(self) -> int:
return self.queue.qsize()
async def wait_for_batch(self) -> List[Query]:
"""Wait for batch respecting self.max_batch_size and self.timeout_s.
Returns a batch of up to self.max_batch_size items, waiting for up
to self.timeout_s for a full batch. After the timeout, returns as many
items as are ready.
Always returns a batch with at least one item - will block
indefinitely until an item comes in.
"""
curr_timeout = self.timeout_s
batch = []
while len(batch) == 0:
loop_start = time.time()
# If the timeout is 0, wait for any item to be available on the
# queue.
if curr_timeout == 0:
batch.append(await self.queue.get())
# If the timeout is nonzero, wait for either the timeout to occur
# or the max batch size to be ready.
else:
try:
await asyncio.wait_for(self.full_batch_event.wait(),
curr_timeout)
except asyncio.TimeoutError:
pass
# Pull up to the max_batch_size requests off the queue.
while len(batch) < self.max_batch_size and not self.queue.empty():
batch.append(self.queue.get_nowait())
# Reset the event if there are fewer than max_batch_size requests
# in the queue.
if (self.queue.qsize() < self.max_batch_size
and self.full_batch_event.is_set()):
self.full_batch_event.clear()
# Adjust the timeout based on the time spent in this iteration.
curr_timeout = max(0, curr_timeout - (time.time() - loop_start))
return batch
def create_backend_worker(func_or_class: Union[Callable, Type[Callable]]):
"""Creates a worker class wrapping the provided function or class."""
if inspect.isfunction(func_or_class):
is_function = True
elif inspect.isclass(func_or_class):
is_function = False
else:
assert False, "func_or_class must be function or class."
# TODO(architkulkarni): Add type hints after upgrading cloudpickle
class RayServeWrappedWorker(object):
def __init__(self, backend_tag, replica_tag, init_args,
backend_config: BackendConfig, controller_name: str):
# Set the controller name so that serve.connect() will connect to
# the instance that this backend is running in.
ray.serve.api._set_internal_controller_name(controller_name)
if is_function:
_callable = func_or_class
else:
_callable = func_or_class(*init_args)
self.backend = RayServeWorker(backend_tag, replica_tag, _callable,
backend_config, is_function)
async def handle_request(self, request):
return await self.backend.handle_request(request)
def update_config(self, new_config: BackendConfig):
return self.backend.update_config(new_config)
def ready(self):
pass
RayServeWrappedWorker.__name__ = "RayServeWorker_" + func_or_class.__name__
return RayServeWrappedWorker
def wrap_to_ray_error(exception: Exception) -> RayTaskError:
"""Utility method to wrap exceptions in user code."""
try:
# Raise and catch so we can access traceback.format_exc()
raise exception
except Exception as e:
traceback_str = ray.utils.format_error_message(traceback.format_exc())
return ray.exceptions.RayTaskError(str(e), traceback_str, e.__class__)
def ensure_async(func: Callable) -> Callable:
if inspect.iscoroutinefunction(func):
return func
else:
return sync_to_async(func)
class RayServeWorker:
"""Handles requests with the provided callable."""
def __init__(self, backend_tag: str, replica_tag: str, _callable: Callable,
backend_config: BackendConfig, is_function: bool) -> None:
self.backend_tag = backend_tag
self.replica_tag = replica_tag
self.callable = _callable
self.is_function = is_function
self.config = backend_config
self.batch_queue = BatchQueue(self.config.max_batch_size or 1,
self.config.batch_wait_timeout)
self.num_ongoing_requests = 0
self.request_counter = metrics.Count(
"backend_request_counter", ("Number of queries that have been "
"processed in this replica"),
"requests", ["backend"])
self.error_counter = metrics.Count("backend_error_counter",
("Number of exceptions that have "
"occurred in the backend"),
"errors", ["backend"])
self.restart_counter = metrics.Count(
"backend_worker_starts",
("The number of time this replica workers "
"has been restarted due to failure."), "restarts",
["backend", "replica_tag"])
self.queuing_latency_tracker = metrics.Histogram(
"backend_queuing_latency_ms",
("The latency for queries waiting in the replica's queue "
"waiting to be processed or batched."), "ms",
DEFAULT_LATENCY_BUCKET_MS, ["backend", "replica_tag"])
self.processing_latency_tracker = metrics.Histogram(
"backend_processing_latency_ms",
"The latency for queries to be processed", "ms",
DEFAULT_LATENCY_BUCKET_MS,
["backend", "replica_tag", "batch_size"])
self.num_queued_items = metrics.Gauge(
"replica_queued_queries",
"Current number of queries queued in the the backend replicas",
"requests", ["backend", "replica_tag"])
self.num_processing_items = metrics.Gauge(
"replica_processing_queries",
"Current number of queries being processed", "requests",
["backend", "replica_tag"])
self.restart_counter.record(1, {
"backend": self.backend_tag,
"replica_tag": self.replica_tag
})
asyncio.get_event_loop().create_task(self.main_loop())
def get_runner_method(self, request_item: Query) -> Callable:
method_name = request_item.metadata.call_method
if not hasattr(self.callable, method_name):
raise RayServeException("Backend doesn't have method {} "
"which is specified in the request. "
"The available methods are {}".format(
method_name, dir(self.callable)))
if self.is_function:
return self.callable
return getattr(self.callable, method_name)
async def invoke_single(self, request_item: Query) -> Any:
method_to_call = ensure_async(self.get_runner_method(request_item))
arg = parse_request_item(request_item)
start = time.time()
try:
result = await method_to_call(arg)
self.request_counter.record(1, {"backend": self.backend_tag})
except Exception as e:
result = wrap_to_ray_error(e)
self.error_counter.record(1, {"backend": self.backend_tag})
self.processing_latency_tracker.record(
(time.time() - start) * 1000, {
"backend": self.backend_tag,
"replica": self.replica_tag,
"batch_size": "1"
})
return result
async def invoke_batch(self, request_item_list: List[Query]) -> List[Any]:
args = []
call_methods = set()
batch_size = len(request_item_list)
# Construct the batch of requests
for item in request_item_list:
args.append(parse_request_item(item))
call_methods.add(self.get_runner_method(item))
timing_start = time.time()
try:
if len(call_methods) != 1:
raise RayServeException(
f"Queries contain mixed calling methods: {call_methods}. "
"Please only send the same type of requests in batching "
"mode.")
self.request_counter.record(batch_size,
{"backend": self.backend_tag})
call_method = ensure_async(call_methods.pop())
result_list = await call_method(args)
if not isinstance(result_list, Iterable) or isinstance(
result_list, (dict, set)):
error_message = ("RayServe expects an ordered iterable object "
"but the worker returned a {}".format(
type(result_list)))
raise RayServeException(error_message)
# Normalize the result into a list type. This operation is fast
# in Python because it doesn't copy anything.
result_list = list(result_list)
if (len(result_list) != batch_size):
error_message = ("Worker doesn't preserve batch size. The "
"input has length {} but the returned list "
"has length {}. Please return a list of "
"results with length equal to the batch size"
".".format(batch_size, len(result_list)))
raise RayServeException(error_message)
except Exception as e:
wrapped_exception = wrap_to_ray_error(e)
self.error_counter.record(1, {"backend": self.backend_tag})
result_list = [wrapped_exception for _ in range(batch_size)]
self.processing_latency_tracker.record(
(time.time() - timing_start) * 1000, {
"backend": self.backend_tag,
"replica_tag": self.replica_tag,
"batch_size": str(batch_size)
})
return result_list
async def main_loop(self) -> None:
while True:
# NOTE(simon): There's an issue when user updated batch size and
# batch wait timeout during the execution, these values will not be
# updated until after the current iteration.
batch = await self.batch_queue.wait_for_batch()
# Record metrics
self.num_queued_items.record(self.batch_queue.qsize(), {
"backend": self.backend_tag,
"replica_tag": self.replica_tag
})
self.num_processing_items.record(
self.num_ongoing_requests - self.batch_queue.qsize(), {
"backend": self.backend_tag,
"replica_tag": self.replica_tag
})
for query in batch:
queuing_time = (time.time() - query.tick_enter_replica) * 1000
self.queuing_latency_tracker.record(queuing_time, {
"backend": self.backend_tag,
"replica_tag": self.replica_tag
})
all_evaluated_futures = []
if not self.config.internal_metadata.accepts_batches:
query = batch[0]
evaluated = asyncio.ensure_future(self.invoke_single(query))
all_evaluated_futures = [evaluated]
chain_future(evaluated, query.async_future)
else:
get_call_method = (
lambda query: query.metadata.call_method # noqa: E731
)
sorted_batch = sorted(batch, key=get_call_method)
for _, group in groupby(sorted_batch, key=get_call_method):
group = list(group)
evaluated = asyncio.ensure_future(self.invoke_batch(group))
all_evaluated_futures.append(evaluated)
result_futures = [q.async_future for q in group]
chain_future(
unpack_future(evaluated, len(group)), result_futures)
if self.config.internal_metadata.is_blocking:
# We use asyncio.wait here so if the result is exception,
# it will not be raised.
await asyncio.wait(all_evaluated_futures)
def update_config(self, new_config: BackendConfig) -> None:
self.config = new_config
self.batch_queue.set_config(self.config.max_batch_size or 1,
self.config.batch_wait_timeout)
async def handle_request(self,
request: Union[Query, bytes]) -> asyncio.Future:
if isinstance(request, bytes):
request = Query.ray_deserialize(request)
request.tick_enter_replica = time.time()
logger.debug("Worker {} got request {}".format(self.replica_tag,
request))
request.async_future = asyncio.get_event_loop().create_future()
self.num_ongoing_requests += 1
self.batch_queue.put(request)
result = await request.async_future
self.num_ongoing_requests -= 1
return result | unknown | codeparrot/codeparrot-clean | ||
#include <ATen/cpu/FlushDenormal.h>
#include <ATen/cpu/vec/intrinsics.h>
#if !defined(__s390x__) && !defined(__powerpc__)
#include <cpuinfo.h>
#endif
namespace at::cpu {
#if defined(__SSE__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
static constexpr unsigned int DENORMALS_ZERO = 0x0040;
static constexpr unsigned int FLUSH_ZERO = 0x8000;
bool set_flush_denormal(bool on) {
// Compile if we have SSE support (GCC), x86-64 (MSVC), or x86 with SSE (MSVC)
// Denormals-Are-Zero is supported by most SSE2 processors, with the exception
// of some early Pentium 4 processors. We guard it with a runtime check.
// Flush-To-Zero (FTZ) only requires SSE.
if (cpuinfo_has_x86_daz()) {
unsigned int csr = _mm_getcsr();
csr &= ~DENORMALS_ZERO;
csr &= ~FLUSH_ZERO;
if (on) {
csr |= DENORMALS_ZERO;
csr |= FLUSH_ZERO;
}
_mm_setcsr(csr);
return true;
}
return false;
}
#elif defined(__ARM_FP) && (__ARM_FP > 0)
// Imported from TensorFlow, tensorflow/third_party/xla/third_party/tsl/tsl/platform/denormal.cc
// Copyright 2015 The TensorFlow Authors. All Rights Reserved.
// Flush-to-zero bit on the ARM floating-point control register.
#define ARM_FPCR_FZ (1 << 24)
static inline void ArmSetFloatingPointControlRegister(uint32_t fpcr) {
#if defined(__aarch64__)
__asm__ __volatile__("msr fpcr, %[fpcr]"
:
: [fpcr] "r"(static_cast<uint64_t>(fpcr)));
#else
__asm__ __volatile__("vmsr fpscr, %[fpcr]" : : [fpcr] "r"(fpcr));
#endif
}
static inline uint32_t ArmGetFloatingPointControlRegister() {
uint32_t fpcr;
#if defined(__aarch64__)
uint64_t fpcr64;
__asm__ __volatile__("mrs %[fpcr], fpcr" : [fpcr] "=r"(fpcr64));
fpcr = static_cast<uint32_t>(fpcr64);
#else
__asm__ __volatile__("vmrs %[fpcr], fpscr" : [fpcr] "=r"(fpcr));
#endif
return fpcr;
}
bool set_flush_denormal(bool on) {
uint32_t fpcr = ArmGetFloatingPointControlRegister();
if (on) {
fpcr |= ARM_FPCR_FZ;
} else {
fpcr &= ~ ARM_FPCR_FZ;
}
ArmSetFloatingPointControlRegister(fpcr);
return true;
}
#else
bool set_flush_denormal(bool on) {
return false;
}
#endif
} // namespace at::cpu | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/cpu/FlushDenormal.cpp |
/* Copyright (c) 2015, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef TEMP_TABLE_PARAM_INCLUDED
#define TEMP_TABLE_PARAM_INCLUDED
#include <sys/types.h>
#include <vector>
#include "my_base.h"
#include "my_inttypes.h"
#include "sql/field.h"
#include "sql/mem_root_array.h"
#include "sql/thr_malloc.h"
class KEY;
class Item;
class Window;
struct CHARSET_INFO;
struct MEM_ROOT;
enum Copy_func_type : int;
/**
Helper class for copy_funcs(); represents an Item to copy from table to
next tmp table.
*/
class Func_ptr {
public:
Func_ptr(Item *item, Field *result_field, Item *result_item = nullptr);
Item *func() const { return m_func; }
void set_func(Item *func);
Field *result_field() const { return m_result_field; }
Item *result_item() const;
bool should_copy(Copy_func_type type) const {
return m_func_bits & (1 << type);
}
private:
Item *m_func;
Field *m_result_field;
// A premade Item for m_result_field (may be nullptr if allocation failed).
// This has two purposes:
//
// - It avoids repeated constructions if the field is used multiple times
// (e.g., first in a SELECT list, then in a sort order).
// - It gives a canonical, unique item, so that we can compare it with ==
// (in FindReplacementItem(), where ->eq would have a metadata issues).
// This is important if we are to replace it with something else again
// later.
//
// It is created on-demand to avoid getting into the thd->stmt_arena field
// list for a temporary table that is freed later anyway.
// It is usually an Item_field, but if supplied from constructor, can be of
// any type.
mutable Item *m_result_item = nullptr;
// A bitmap where all CFT_* enums are bit indexes, and we have a 1 if m_func
// is of the type given by that enum. E.g., if m_func is an Item_field,
// (1 << CFT_FIELDS) will be set here. This is used for quickly finding out
// which items to copy in copy_funcs(), without having to look at the actual
// items (which involves virtual function calls).
int m_func_bits;
};
/// Used by copy_funcs()
typedef Mem_root_array<Func_ptr> Func_ptr_array;
/**
Object containing parameters used when creating and using temporary
tables. Temporary tables created with the help of this object are
used only internally by the query execution engine.
*/
class Temp_table_param {
public:
Mem_root_array<Copy_field> copy_fields;
uchar *group_buff;
Func_ptr_array *items_to_copy; /* Fields in tmp table */
/**
After temporary table creation, points to an index on the table
created depending on the purpose of the table - grouping,
duplicate elimination, etc. There is at most one such index.
*/
KEY *keyinfo;
/**
LIMIT (maximum number of rows) for this temp table, or HA_POS_ERROR
for no limit. Enforced by MaterializeIterator when writing to the table.
*/
ha_rows end_write_records{HA_POS_ERROR};
/**
Number of items in the query. Includes both aggregate functions (e.g., SUM),
and non-aggregates (e.g., RAND), window functions and fields.
Also counts functions referred to from windowing or aggregate functions,
i.e., "SELECT SUM(RAND())" sets this counter to 2.
@see count_field_types
*/
uint func_count;
/**
Number of fields in the query that have aggregate functions. Note
that the optimizer may choose to optimize away these fields by
replacing them with constants, in which case sum_func_count will
need to be updated.
@see optimize_aggregated_query, count_field_types
*/
uint sum_func_count;
uint hidden_field_count;
uint group_parts, group_length, group_null_parts;
/**
Whether we allow running GROUP BY processing into a temporary table,
i.e., keeping many different aggregations going at once without
having ordered input. This is usually the case, but is currently not
supported for aggregation UDFs, aggregates with DISTINCT, or ROLLUP.
Note that even if this is true, the optimizer may choose to not use
a temporary table, as it is often more efficient to just read along
an index.
*/
bool allow_group_via_temp_table{true};
/**
Number of outer_sum_funcs i.e the number of set functions that are
aggregated in a query block outer to this subquery.
@see count_field_types
*/
uint outer_sum_func_count;
/**
Enabled when we have at least one outer_sum_func. Needed when used
along with distinct.
@see create_tmp_table
*/
bool using_outer_summary_function;
CHARSET_INFO *table_charset;
bool schema_table;
/*
True if GROUP BY and its aggregate functions are already computed
by a table access method (e.g. by loose index scan). In this case
query execution should not perform aggregation and should treat
aggregate functions as normal functions.
*/
bool precomputed_group_by;
bool force_copy_fields;
/**
true <=> don't actually create table handler when creating the result
table. This allows range optimizer to add indexes later.
Used for materialized derived tables/views.
@see Table_ref::update_derived_keys.
*/
bool skip_create_table;
/// Whether the UNIQUE index can be promoted to PK
bool can_use_pk_for_unique;
/// Whether UNIQUE keys should always be implemented by way of a hidden hash
/// field, never a unique index. Needed for materialization of mixed
/// UNION ALL / UNION DISTINCT queries (see comments in create_result_table())
/// and for DISTINCT deduplication using materialization (See
/// CreateTemporaryTableFromSelectList()).
bool force_hash_field_for_unique{false};
/// This tmp table is used for a window's frame buffer
bool m_window_frame_buffer{false};
/// For INTERSECT and EXCEPT computation
enum {
TTP_UNION_OR_TABLE,
TTP_EXCEPT,
TTP_INTERSECT
} m_operation{TTP_UNION_OR_TABLE};
/// The tempoary table rows need a counter to keep track of its
/// duplicates: needed for EXCEPT and INTERSECT computation.
bool needs_set_counter() { return m_operation != TTP_UNION_OR_TABLE; }
/// For INTERSECT and EXCEPT computation.
/// Cf. TABLE::m_last_operation_is_distinct.
bool m_last_operation_is_distinct{false};
/// If this is the out table of a window: the said window
Window *m_window;
explicit Temp_table_param(MEM_ROOT *mem_root = *THR_MALLOC)
: copy_fields(mem_root),
group_buff(nullptr),
items_to_copy(nullptr),
keyinfo(nullptr),
func_count(0),
sum_func_count(0),
hidden_field_count(0),
group_parts(0),
group_length(0),
group_null_parts(0),
outer_sum_func_count(0),
using_outer_summary_function(false),
table_charset(nullptr),
schema_table(false),
precomputed_group_by(false),
force_copy_fields(false),
skip_create_table(false),
can_use_pk_for_unique(true),
m_window(nullptr) {}
Temp_table_param(MEM_ROOT *mem_root, const Temp_table_param &other)
: copy_fields(mem_root),
group_buff(other.group_buff),
items_to_copy(other.items_to_copy),
keyinfo(other.keyinfo),
end_write_records(other.end_write_records),
func_count(other.func_count),
sum_func_count(other.sum_func_count),
hidden_field_count(other.hidden_field_count),
group_parts(other.group_parts),
group_length(other.group_length),
group_null_parts(other.group_null_parts),
allow_group_via_temp_table(other.allow_group_via_temp_table),
outer_sum_func_count(other.outer_sum_func_count),
using_outer_summary_function(other.using_outer_summary_function),
table_charset(other.table_charset),
schema_table(other.schema_table),
precomputed_group_by(other.precomputed_group_by),
force_copy_fields(other.force_copy_fields),
skip_create_table(other.skip_create_table),
can_use_pk_for_unique(other.can_use_pk_for_unique),
force_hash_field_for_unique(other.force_hash_field_for_unique),
m_window_frame_buffer(other.m_window_frame_buffer),
m_window(other.m_window) {}
// Used by CTE derived table clones to set correct info, see
// Common_table_expr::clone_tmp_table. The info may be consulted e.g.
// by get_hidden_field_count_for_derived(), e.g. by HW.
Temp_table_param &operator=(const Temp_table_param &other) {
if (this == &other) {
return *this;
}
for (const auto &cf : other.copy_fields) copy_fields.push_back(cf);
group_buff = other.group_buff;
items_to_copy = other.items_to_copy;
keyinfo = other.keyinfo;
end_write_records = other.end_write_records;
func_count = other.func_count;
sum_func_count = other.sum_func_count;
hidden_field_count = other.hidden_field_count;
group_parts = other.group_parts;
group_length = other.group_length;
group_null_parts = other.group_null_parts;
allow_group_via_temp_table = other.allow_group_via_temp_table;
outer_sum_func_count = other.outer_sum_func_count;
using_outer_summary_function = other.using_outer_summary_function;
table_charset = other.table_charset;
schema_table = other.schema_table;
precomputed_group_by = other.precomputed_group_by;
force_copy_fields = other.force_copy_fields;
skip_create_table = other.skip_create_table;
can_use_pk_for_unique = other.can_use_pk_for_unique;
force_hash_field_for_unique = other.force_hash_field_for_unique;
m_window_frame_buffer = other.m_window_frame_buffer;
m_window = other.m_window;
return *this;
}
void cleanup() { copy_fields.clear(); }
};
#endif // TEMP_TABLE_PARAM_INCLUDED | c | github | https://github.com/mysql/mysql-server | sql/temp_table_param.h |
# encoding: UTF-8
from datetime import datetime
from pymongo import Connection
from pymongo.errors import *
from eventEngine import *
# 常量定义
OFFSET_OPEN = '0' # 开仓
OFFSET_CLOSE = '1' # 平仓
DIRECTION_BUY = '0' # 买入
DIRECTION_SELL = '1' # 卖出
PRICETYPE_LIMIT = '2' # 限价
########################################################################
class Tick:
"""Tick数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.openPrice = 0 # OHLC
self.highPrice = 0
self.lowPrice = 0
self.lastPrice = 0
self.volume = 0 # 成交量
self.openInterest = 0 # 持仓量
self.upperLimit = 0 # 涨停价
self.lowerLimit = 0 # 跌停价
self.time = '' # 更新时间和毫秒
self.ms= 0
self.bidPrice1 = 0 # 深度行情
self.bidPrice2 = 0
self.bidPrice3 = 0
self.bidPrice4 = 0
self.bidPrice5 = 0
self.askPrice1 = 0
self.askPrice2 = 0
self.askPrice3 = 0
self.askPrice4 = 0
self.askPrice5 = 0
self.bidVolume1 = 0
self.bidVolume2 = 0
self.bidVolume3 = 0
self.bidVolume4 = 0
self.bidVolume5 = 0
self.askVolume1 = 0
self.askVolume2 = 0
self.askVolume3 = 0
self.askVolume4 = 0
self.askVolume5 = 0
########################################################################
class Trade:
"""成交数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.orderRef = '' # 报单号
self.tradeID = '' # 成交编号
self.direction = None # 方向
self.offset = None # 开平
self.price = 0 # 成交价
self.volume = 0 # 成交量
########################################################################
class Order:
"""报单数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.orderRef = '' # 报单编号
self.direction = None # 方向
self.offset = None # 开平
self.price = 0 # 委托价
self.volumeOriginal = 0 # 报单量
self.volumeTraded = 0 # 已成交数量
self.insertTime = '' # 报单时间
self.cancelTime = '' # 撤单时间
self.frontID = 0 # 前置机编号
self.sessionID = 0 # 会话编号
self.status = '' # 报单状态代码
########################################################################
class StopOrder:
"""
停止单对象
用于实现价格突破某一水平后自动追入
即通常的条件单和止损单
"""
#----------------------------------------------------------------------
def __init__(self, symbol, direction, offset, price, volume, strategy):
"""Constructor"""
self.symbol = symbol
self.direction = direction
self.offset = offset
self.price = price
self.volume = volume
self.strategy = strategy
########################################################################
class StrategyEngine(object):
"""策略引擎"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, mainEngine):
"""Constructor"""
self.__eventEngine = eventEngine
self.mainEngine = mainEngine
# 获取代表今日的datetime
t = datetime.today()
self.today = t.replace(hour=0, minute=0, second=0, microsecond=0)
# 保存所有报单数据的字典
self.__dictOrder = {}
# 保存策略对象的字典
# key为策略名称
# value为策略对象
self.dictStrategy = {}
# 保存合约代码和策略对象映射关系的字典
# key为合约代码
# value为交易该合约的策略列表
self.__dictSymbolStrategy = {}
# 保存报单编号和策略对象映射关系的字典
# key为报单编号
# value为策略对象
self.__dictOrderRefStrategy = {}
# 保存合约代码和相关停止单的字典
# key为合约代码
# value为该合约相关的停止单列表
self.__dictStopOrder = {}
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
self.__mongoTickDB = None
# 调用函数
self.__connectMongo()
self.__registerEvent()
#----------------------------------------------------------------------
def createStrategy(self, strategyName, strategySymbol, strategyClass, strategySetting):
"""创建策略"""
strategy = strategyClass(strategyName, strategySymbol, self)
self.dictStrategy[strategyName] = strategy
strategy.loadSetting(strategySetting)
# 订阅合约行情,注意这里因为是CTP,所以ExchangeID可以忽略
self.mainEngine.subscribe(strategySymbol, None)
# 注册策略监听
self.registerStrategy(strategySymbol, strategy)
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = Connection()
self.__mongoConnected = True
self.__mongoTickDB = self.__mongoConnection['TickDB']
self.writeLog(u'策略引擎连接MongoDB成功')
except ConnectionFailure:
self.writeLog(u'策略引擎连接MongoDB失败')
#----------------------------------------------------------------------
def __recordTick(self, data):
"""将Tick数据插入到MongoDB中"""
if self.__mongoConnected:
symbol = data['InstrumentID']
data['date'] = self.today
self.__mongoTickDB[symbol].insert(data)
#----------------------------------------------------------------------
def loadTick(self, symbol, dt):
"""从MongoDB中读取Tick数据"""
if self.__mongoConnected:
collection = self.__mongoTickDB[symbol]
cx = collection.find({'date':{'$gte':dt}})
return cx
else:
return None
#----------------------------------------------------------------------
def __updateMarketData(self, event):
"""行情更新"""
data = event.dict_['data']
symbol = data['InstrumentID']
# 检查是否存在交易该合约的策略
if symbol in self.__dictSymbolStrategy:
# 创建TICK数据对象并更新数据
tick = Tick(symbol)
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
tick.time = data['UpdateTime']
tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['BidPrice1']
tick.bidPrice2 = data['BidPrice2']
tick.bidPrice3 = data['BidPrice3']
tick.bidPrice4 = data['BidPrice4']
tick.bidPrice5 = data['BidPrice5']
tick.askPrice1 = data['AskPrice1']
tick.askPrice2 = data['AskPrice2']
tick.askPrice3 = data['AskPrice3']
tick.askPrice4 = data['AskPrice4']
tick.askPrice5 = data['AskPrice5']
tick.bidVolume1 = data['BidVolume1']
tick.bidVolume2 = data['BidVolume2']
tick.bidVolume3 = data['BidVolume3']
tick.bidVolume4 = data['BidVolume4']
tick.bidVolume5 = data['BidVolume5']
tick.askVolume1 = data['AskVolume1']
tick.askVolume2 = data['AskVolume2']
tick.askVolume3 = data['AskVolume3']
tick.askVolume4 = data['AskVolume4']
tick.askVolume5 = data['AskVolume5']
# 首先检查停止单是否需要发出
self.__processStopOrder(tick)
# 将该TICK数据推送给每个策略
for strategy in self.__dictSymbolStrategy[symbol]:
strategy.onTick(tick)
# 将数据插入MongoDB数据库,实盘建议另开程序记录TICK数据
self.__recordTick(data)
#----------------------------------------------------------------------
def __processStopOrder(self, tick):
"""处理停止单"""
symbol = tick.symbol
lastPrice = tick.lastPrice
upperLimit = tick.upperLimit
lowerLimit = tick.lowerLimit
# 如果当前有该合约上的止损单
if symbol in self.__dictStopOrder:
# 获取止损单列表
listSO = self.__dictStopOrder[symbol] # SO:stop order
# 准备一个空的已发止损单列表
listSent = []
for so in listSO:
# 如果是买入停止单,且最新成交价大于停止触发价
if so.direction == DIRECTION_BUY and lastPrice >= so.price:
# 以当日涨停价发出限价单买入
ref = self.sendOrder(symbol, DIRECTION_BUY, so.offset,
upperLimit, so.volume, strategy)
# 触发策略的止损单发出更新
so.strategy.onStopOrder(ref)
# 将该止损单对象保存到已发送列表中
listSent.append(so)
# 如果是卖出停止单,且最新成交价小于停止触发价
elif so.direction == DIRECTION_SELL and lastPrice <= so.price:
ref = self.sendOrder(symbol, DIRECTION_SELL, so.offset,
lowerLimit, so.volume, strategy)
so.strategy.onStopOrder(ref)
listSent.append(so)
# 从停止单列表中移除已经发单的停止单对象
if listSent:
for so in listSent:
listSO.remove(so)
# 检查停止单列表是否为空,若为空,则从停止单字典中移除该合约代码
if not listSO:
del self.__dictStopOrder[symbol]
#----------------------------------------------------------------------
def __updateOrder(self, event):
"""报单更新"""
data = event.dict_['data']
orderRef = data['OrderRef']
# 检查是否存在监听该报单的策略
if orderRef in self.__dictOrderRefStrategy:
# 创建Order数据对象
order = Order(data['InstrumentID'])
order.orderRef = data['OrderRef']
order.direction = data['Direction']
order.offset = data['CombOffsetFlag']
order.price = data['LimitPrice']
order.volumeOriginal = data['VolumeTotalOriginal']
order.volumeTraded = data['VolumeTraded']
order.insertTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
order.status = data['OrderStatus']
# 推送给策略
strategy = self.__dictOrderRefStrategy[orderRef]
strategy.onOrder(order)
# 记录该Order的数据
self.__dictOrder[orderRef] = data
#----------------------------------------------------------------------
def __updateTrade(self, event):
"""成交更新"""
print 'updateTrade'
data = event.dict_['data']
orderRef = data['OrderRef']
print 'trade:', orderRef
if orderRef in self.__dictOrderRefStrategy:
# 创建Trade数据对象
trade = Trade(data['InstrumentID'])
trade.orderRef = orderRef
trade.tradeID = data['TradeID']
trade.direction = data['Direction']
trade.offset = data['OffsetFlag']
trade.price = data['Price']
trade.volume = data['Volume']
# 推送给策略
strategy = self.__dictOrderRefStrategy[orderRef]
strategy.onTrade(trade)
#----------------------------------------------------------------------
def sendOrder(self, symbol, direction, offset, price, volume, strategy):
"""
发单(仅允许限价单)
symbol:合约代码
direction:方向,DIRECTION_BUY/DIRECTION_SELL
offset:开平,OFFSET_OPEN/OFFSET_CLOSE
price:下单价格
volume:下单手数
strategy:策略对象
"""
contract = self.mainEngine.selectInstrument(symbol)
if contract:
ref = self.mainEngine.sendOrder(symbol,
contract['ExchangeID'],
price,
PRICETYPE_LIMIT,
volume,
direction,
offset)
self.__dictOrderRefStrategy[ref] = strategy
print 'ref:', ref
print 'strategy:', strategy.name
return ref
#----------------------------------------------------------------------
def cancelOrder(self, orderRef):
"""
撤单
"""
order = self.__dictOrder[orderRef]
symbol = order['InstrumentID']
contract = self.mainEngine.selectInstrument(symbol)
if contract:
self.mainEngine.cancelOrder(symbol,
contract['ExchangeID'],
orderRef,
order['FrontID'],
order['SessionID'])
#----------------------------------------------------------------------
def __registerEvent(self):
"""注册事件监听"""
self.__eventEngine.register(EVENT_MARKETDATA, self.__updateMarketData)
self.__eventEngine.register(EVENT_ORDER, self.__updateOrder)
self.__eventEngine.register(EVENT_TRADE ,self.__updateTrade)
#----------------------------------------------------------------------
def writeLog(self, log):
"""写日志"""
event = Event(type_=EVENT_LOG)
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def registerStrategy(self, symbol, strategy):
"""注册策略对合约TICK数据的监听"""
# 尝试获取监听该合约代码的策略的列表,若无则创建
try:
listStrategy = self.__dictSymbolStrategy[symbol]
except KeyError:
listStrategy = []
self.__dictSymbolStrategy[symbol] = listStrategy
# 防止重复注册
if strategy not in listStrategy:
listStrategy.append(strategy)
#----------------------------------------------------------------------
def placeStopOrder(self, symbol, direction, offset, price, volume, strategy):
"""
下停止单(运行于本地引擎中)
注意这里的price是停止单的触发价
"""
# 创建止损单对象
so = StopOrder(symbol, direction, offset, price, volume, strategy)
# 获取该合约相关的止损单列表
try:
listSO = self.__dictStopOrder[symbol]
except KeyError:
listSO = []
self.__dictStopOrder[symbol] = listSO
# 将该止损单插入列表中
listSO.append(so)
return so
#----------------------------------------------------------------------
def cancelStopOrder(self, so):
"""撤销停止单"""
symbol = so.symbol
try:
listSO = self.__dictStopOrder[symbol]
if so in listSO:
listSO.remove(so)
if not listSO:
del self.__dictStopOrder[symbol]
except KeyError:
pass
#----------------------------------------------------------------------
def startAll(self):
"""启动所有策略"""
for strategy in self.dictStrategy.values():
strategy.start()
#----------------------------------------------------------------------
def stopAll(self):
"""停止所有策略"""
for strategy in self.dictStrategy.values():
strategy.stop()
########################################################################
class StrategyTemplate(object):
"""策略模板"""
#----------------------------------------------------------------------
def __init__(self, name, symbol, engine):
"""Constructor"""
self.name = name # 策略名称(注意唯一性)
self.symbol = symbol # 策略交易的合约
self.engine = engine # 策略引擎对象
self.trading = False # 策略是否启动交易
#----------------------------------------------------------------------
def onTick(self, tick):
"""行情更新"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTrade(self, trade):
"""交易更新"""
raise NotImplementedError
#----------------------------------------------------------------------
def onOrder(self, order):
"""报单更新"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStopOrder(self, orderRef):
"""停止单更新"""
raise NotImplementedError
#----------------------------------------------------------------------
def onBar(self, o, h, l, c, volume, time):
"""K线数据更新"""
raise NotImplementedError
#----------------------------------------------------------------------
def start(self):
"""
启动交易
这里是最简单的改变self.trading
有需要可以重新实现更复杂的操作
"""
self.trading = True
self.engine.writeLog(self.name + u'开始运行')
#----------------------------------------------------------------------
def stop(self):
"""
停止交易
同上
"""
self.trading = False
self.engine.writeLog(self.name + u'停止运行')
#----------------------------------------------------------------------
def loadSetting(self, setting):
"""
载入设置
setting通常是一个包含了参数设置的字典
"""
raise NotImplementedError
#----------------------------------------------------------------------
def buy(self, price, volume, stopOrder=False):
"""买入开仓"""
if self.trading:
if stopOrder:
so = self.engine.placeStopOrder(self.symbol, DIRECTION_BUY,
OFFSET_OPEN, price, volume, self)
return so
else:
ref = self.engine.sendOrder(self.symbol, DIRECTION_BUY,
OFFSET_OPEN, price, volume, self)
return ref
else:
return None
#----------------------------------------------------------------------
def cover(self, price, volume, StopOrder=False):
"""买入平仓"""
if self.trading:
if stopOrder:
so = self.engine.placeStopOrder(self.symbol, DIRECTION_BUY,
OFFSET_CLOSE, price, volume, self)
return so
else:
ref = self.engine.sendOrder(self.symbol, DIRECTION_BUY,
OFFSET_CLOSE, price, volume, self)
return ref
else:
return None
#----------------------------------------------------------------------
def sell(self, price, volume, stopOrder=False):
"""卖出平仓"""
if self.trading:
if stopOrder:
so = self.engine.placeStopOrder(self.symbol, DIRECTION_SELL,
OFFSET_CLOSE, price, volume, self)
return so
else:
ref = self.engine.sendOrder(self.symbol, DIRECTION_SELL,
OFFSET_CLOSE, price, volume, self)
return ref
else:
return None
#----------------------------------------------------------------------
def short(self, price, volume, stopOrder=False):
"""卖出开仓"""
if self.trading:
if stopOrder:
so = self.engine.placeStopOrder(self.symbol, DIRECTION_SELL,
OFFSET_OPEN, price, volume, self)
return so
else:
ref = self.engine.sendOrder(self.symbol, DIRECTION_SELL,
OFFSET_OPEN, price, volume, self)
return ref
else:
return None
#----------------------------------------------------------------------
def cancelOrder(self, orderRef):
"""撤单"""
self.engine.cancelOrder(orderRef)
#----------------------------------------------------------------------
def cancelStopOrder(self, so):
"""撤销停止单"""
self.engine.cancelStopOrder(so) | unknown | codeparrot/codeparrot-clean | ||
import traceback
import maya.cmds as cmds
import _functions as fnt
""" Attach to existing """
def attachTimeAndFile(node, jobInfo, isConstant=False):
connAttr = cmds.connectionInfo(node+".inTime", sfd=True)
if connAttr == "" and not isConstant:
cmds.connectAttr(jobInfo.timeCtrl+".outTime", node+".inTime")
node = node + ".fileName" # compute once, used 2-3 times
connAttr = cmds.connectionInfo(node, sfd=True)
if connAttr != None and connAttr != "":
cmds.disconnectAttr(connAttr, node)
cmds.connectAttr(jobInfo.filenode+".outFileName", node)
pass
def attachXform(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachXform")
try:
conX = cmds.listConnections(name+".translate")
if conX:
# already receiving transformation from another node!
conX = conX[0]
if cmds.objectType(conX) == "ExocortexAlembicXform":
attachTimeAndFile(conX, jobInfo, isConstant)
return [conX]
else:
return ["!", "Cannot attach Xform to " + name + ", it's attach to a node that is not an \"ExocortexAlembicXform\""]
newXform = cmds.createNode("ExocortexAlembicXform")
cmds.setAttr(newXform+".identifier", identifier, type="string")
cmds.connectAttr(newXform+".translate", name+".translate")
cmds.connectAttr(newXform+".rotate", name+".rotate")
cmds.connectAttr(newXform+".scale", name+".scale")
cmds.connectAttr(newXform+".outVisibility", name+".visibility")
attachTimeAndFile(newXform, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachXform")
return [newXform]
def attachPolyMesh(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachPolyMesh")
try:
if cmds.objectType(name) != "mesh":
return ["!", "Only mesh can be attached too!"]
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicPolyMeshDeform")
if conX: # it's already attached to a deform, simply change the file reference
polyObj = conX[0]
attachTimeAndFile(polyObj, jobInfo, isConstant)
return [polyObj]
# create deformer, and attach time and file
newDform = cmds.deformer(name, type="ExocortexAlembicPolyMeshDeform")[0]
cmds.setAttr(newDform+".identifier", identifier, type="string")
attachTimeAndFile(newDform, jobInfo, isConstant)
if jobInfo.useFaceSets:
cmds.ExocortexAlembic_createFaceSets(f=cmds.getAttr(jobInfo.filenode+".outFileName"), i=identifier, o=name)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachPolyMesh")
return [newDform]
def attachCamera(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachCamera")
try:
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicCamera")
if conX:
camObj = conX[0]
attachTimeAndFile(camObj, jobInfo, isConstant)
return [camObj]
reader = cmds.createNode("ExocortexAlembicCamera")
cmds.connectAttr(reader+".focalLength", name+".focalLength")
cmds.connectAttr(reader+".focusDistance", name+".focusDistance")
cmds.connectAttr(reader+".lensSqueezeRatio", name+".lensSqueezeRatio")
cmds.connectAttr(reader+".horizontalFilmAperture", name+".horizontalFilmAperture")
cmds.connectAttr(reader+".verticalFilmAperture", name+".verticalFilmAperture")
cmds.connectAttr(reader+".horizontalFilmOffset", name+".horizontalFilmOffset")
cmds.connectAttr(reader+".verticalFilmOffset", name+".verticalFilmOffset")
cmds.connectAttr(reader+".fStop", name+".fStop")
cmds.connectAttr(reader+".shutterAngle", name+".shutterAngle")
attachTimeAndFile(reader, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachCamera")
return [reader]
def attachCurves(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachCurves")
try:
conX = (cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurvesDeform") or
cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurves"))
if conX:
curObj = conX[0]
attachTimeAndFile(curObj, jobInfo, isConstant)
return [curObj]
# create deformer, and attach time and file
newDform = cmds.deformer(name, type="ExocortexAlembicCurvesDeform")[0]
cmds.setAttr(newDform+".identifier", identifier, type="string")
attachTimeAndFile(newDform, jobInfo, isConstant)
# get curObj new "output" attribute connection
conX = cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurvesDeform")
if conX:
curObj = conX[0]
originalCur = cmds.connectionInfo(curObj+".output", sfd=True).split('.')[0]
cmds.delete(curObj)
curObj = cmds.createNode("ExocortexAlembicCurves")
attachTimeAndFile(curObj, jobInfo, isConstant)
cmds.connectAttr(curObj+".outCurve", originalCur+".create")
cmds.connectAttr(jobInfo.filenode+".outFileName", curObj+".fileName")
cmds.setAttr(curObj+".identifier", identifier, type="string")
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachCurves")
return [curObj]
def attachPoints(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachPoints")
try:
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicPoints")
if conX:
ptsObj = conX[0]
attachTimeAndFile(ptsObj, jobInfo, isConstant)
return [ptsObj]
reader = cmds.createNode("ExocortexAlembicPoints")
cmds.addAttr(name, ln="rgbPP", dt="vectorArray")
cmds.addAttr(name, ln="opacityPP", dt="doubleArray")
cmds.addAttr(name, ln="agePP", dt="doubleArray")
cmds.addAttr(name, ln="shapeInstanceIdPP", dt="doubleArray")
cmds.addAttr(name, ln="orientationPP", dt="vectorArray")
cmds.connectAttr(reader+".output[0]", name+".newParticles[0]")
cmds.connectAttr(jobInfo.timeCtrl+".outTime", name+".currentTime")
cmds.setAttr(name+".conserve", 0)
attachTimeAndFile(reader, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachPoints")
return [reader] | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
test_description='signed tag tests'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY/lib-gpg.sh"
test_expect_success GPGSSH 'create signed tags ssh' '
test_when_finished "test_unconfig commit.gpgsign" &&
test_config gpg.format ssh &&
test_config user.signingkey "${GPGSSH_KEY_PRIMARY}" &&
echo 1 >file && git add file &&
test_tick && git commit -m initial &&
git tag -s -m initial initial &&
git branch side &&
echo 2 >file && test_tick && git commit -a -m second &&
git tag -s -m second second &&
git checkout side &&
echo 3 >elif && git add elif &&
test_tick && git commit -m "third on side" &&
git checkout main &&
test_tick && git merge -S side &&
git tag -s -m merge merge &&
echo 4 >file && test_tick && git commit -a -S -m "fourth unsigned" &&
git tag -a -m fourth-unsigned fourth-unsigned &&
test_tick && git commit --amend -S -m "fourth signed" &&
git tag -s -m fourth fourth-signed &&
echo 5 >file && test_tick && git commit -a -m "fifth" &&
git tag fifth-unsigned &&
git config commit.gpgsign true &&
echo 6 >file && test_tick && git commit -a -m "sixth" &&
git tag -a -m sixth sixth-unsigned &&
test_tick && git rebase -f HEAD^^ && git tag -s -m 6th sixth-signed HEAD^ &&
git tag -m seventh -s seventh-signed &&
echo 8 >file && test_tick && git commit -a -m eighth &&
git tag -u"${GPGSSH_KEY_UNTRUSTED}" -m eighth eighth-signed-alt
'
test_expect_success GPGSSH,GPGSSH_VERIFYTIME 'create signed tags with keys having defined lifetimes' '
test_when_finished "test_unconfig commit.gpgsign" &&
test_config gpg.format ssh &&
echo expired >file && test_tick && git commit -a -m expired -S"${GPGSSH_KEY_EXPIRED}" &&
git tag -s -u "${GPGSSH_KEY_EXPIRED}" -m expired-signed expired-signed &&
echo notyetvalid >file && test_tick && git commit -a -m notyetvalid -S"${GPGSSH_KEY_NOTYETVALID}" &&
git tag -s -u "${GPGSSH_KEY_NOTYETVALID}" -m notyetvalid-signed notyetvalid-signed &&
echo timeboxedvalid >file && test_tick && git commit -a -m timeboxedvalid -S"${GPGSSH_KEY_TIMEBOXEDVALID}" &&
git tag -s -u "${GPGSSH_KEY_TIMEBOXEDVALID}" -m timeboxedvalid-signed timeboxedvalid-signed &&
echo timeboxedinvalid >file && test_tick && git commit -a -m timeboxedinvalid -S"${GPGSSH_KEY_TIMEBOXEDINVALID}" &&
git tag -s -u "${GPGSSH_KEY_TIMEBOXEDINVALID}" -m timeboxedinvalid-signed timeboxedinvalid-signed
'
test_expect_success GPGSSH 'verify and show ssh signatures' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
(
for tag in initial second merge fourth-signed sixth-signed seventh-signed
do
git verify-tag $tag 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo $tag OK || exit 1
done
) &&
(
for tag in fourth-unsigned fifth-unsigned sixth-unsigned
do
test_must_fail git verify-tag $tag 2>actual &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo $tag OK || exit 1
done
) &&
(
for tag in eighth-signed-alt
do
test_must_fail git verify-tag $tag 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_UNTRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
grep "${GPGSSH_KEY_NOT_TRUSTED}" actual &&
echo $tag OK || exit 1
done
)
'
test_expect_success GPGSSH,GPGSSH_VERIFYTIME 'verify-tag exits failure on expired signature key' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
test_must_fail git verify-tag expired-signed 2>actual &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual
'
test_expect_success GPGSSH,GPGSSH_VERIFYTIME 'verify-tag exits failure on not yet valid signature key' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
test_must_fail git verify-tag notyetvalid-signed 2>actual &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual
'
test_expect_success GPGSSH,GPGSSH_VERIFYTIME 'verify-tag succeeds with tag date and key validity matching' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git verify-tag timeboxedvalid-signed 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual
'
test_expect_success GPGSSH,GPGSSH_VERIFYTIME 'verify-tag fails with tag date outside of key validity' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
test_must_fail git verify-tag timeboxedinvalid-signed 2>actual &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual
'
test_expect_success GPGSSH 'detect fudged ssh signature' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git cat-file tag seventh-signed >raw &&
sed -e "/^tag / s/seventh/7th-forged/" raw >forged1 &&
git hash-object -w -t tag forged1 >forged1.tag &&
test_must_fail git verify-tag $(cat forged1.tag) 2>actual1 &&
grep "${GPGSSH_BAD_SIGNATURE}" actual1 &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual1 &&
! grep "${GPGSSH_GOOD_SIGNATURE_UNTRUSTED}" actual1
'
test_expect_success GPGSSH 'verify ssh signatures with --raw' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
(
for tag in initial second merge fourth-signed sixth-signed seventh-signed
do
git verify-tag --raw $tag 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo $tag OK || exit 1
done
) &&
(
for tag in fourth-unsigned fifth-unsigned sixth-unsigned
do
test_must_fail git verify-tag --raw $tag 2>actual &&
! grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo $tag OK || exit 1
done
) &&
(
for tag in eighth-signed-alt
do
test_must_fail git verify-tag --raw $tag 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_UNTRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo $tag OK || exit 1
done
)
'
test_expect_success GPGSSH 'verify signatures with --raw ssh' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git verify-tag --raw sixth-signed 2>actual &&
grep "${GPGSSH_GOOD_SIGNATURE_TRUSTED}" actual &&
! grep "${GPGSSH_BAD_SIGNATURE}" actual &&
echo sixth-signed OK
'
test_expect_success GPGSSH 'verify multiple tags ssh' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
tags="seventh-signed sixth-signed" &&
for i in $tags
do
git verify-tag -v --raw $i || return 1
done >expect.stdout 2>expect.stderr.1 &&
grep "^${GPGSSH_GOOD_SIGNATURE_TRUSTED}" <expect.stderr.1 >expect.stderr &&
git verify-tag -v --raw $tags >actual.stdout 2>actual.stderr.1 &&
grep "^${GPGSSH_GOOD_SIGNATURE_TRUSTED}" <actual.stderr.1 >actual.stderr &&
test_cmp expect.stdout actual.stdout &&
test_cmp expect.stderr actual.stderr
'
test_expect_success GPGSSH 'verifying tag with --format - ssh' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
cat >expect <<-\EOF &&
tagname : fourth-signed
EOF
git verify-tag --format="tagname : %(tag)" "fourth-signed" >actual &&
test_cmp expect actual
'
test_expect_success GPGSSH 'verifying a forged tag with --format should fail silently - ssh' '
test_must_fail git verify-tag --format="tagname : %(tag)" $(cat forged1.tag) >actual-forged &&
test_must_be_empty actual-forged
'
test_expect_success GPGSSH 'rev-list --format=%G' '
test_config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git rev-list -1 --format="%G? %H" sixth-signed >actual &&
cat >expect <<-EOF &&
commit $(git rev-parse sixth-signed^0)
G $(git rev-parse sixth-signed^0)
EOF
test_cmp expect actual
'
test_done | unknown | github | https://github.com/git/git | t/t7031-verify-tag-signed-ssh.sh |
const idx = process.execArgv.indexOf('--cpu-prof')
if (idx >= 0) process.execArgv.splice(idx, 1)
module.exports = {
eslint: {
ignoreDuringBuilds: true,
},
} | javascript | github | https://github.com/vercel/next.js | bench/nested-deps-app-router-many-pages/next.config.js |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_scheduling_policies_facts
short_description: Retrieve facts about one or more oVirt scheduling policies
author: "Ondra Machacek (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt scheduling policies."
notes:
- "This module creates a new top-level C(ovirt_scheduling_policies) fact,
which contains a list of scheduling policies."
options:
id:
description:
- "ID of the scheduling policy."
required: true
name:
description:
- "Name of the scheduling policy, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all scheduling policies with name InClusterUpgrade:
- ovirt_scheduling_policies_facts:
name: InClusterUpgrade
- debug:
var: ovirt_scheduling_policies
'''
RETURN = '''
ovirt_scheduling_policies:
description: "List of dictionaries describing the scheduling policies.
Scheduling policies attributes are mapped to dictionary keys,
all scheduling policies attributes can be found at following
url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
id=dict(default=None),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
system_service = connection.system_service()
sched_policies_service = system_service.scheduling_policies_service()
if module.params['name']:
sched_policies = [
e for e in sched_policies_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
elif module.params['id']:
sched_policies = [
sched_policies_service.service(module.params['id']).get()
]
else:
sched_policies = sched_policies_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_scheduling_policies=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in sched_policies
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
import sys
"""
Make Html a list and store
"""
class myContentHandler(ContentHandler):
def __init__ (self):
self.inItem = False
self.inContent = False
self.Content = ""
self.Html = []
self.Activity = {}
self.inTitle = False
self.inType = False
self.inPrice = False
self.inDate = False
self.inEndDate = False
self.inHour = False
self.inDuration = False
self.inUrl = False
def startElement (self, name, attrs):
if name == 'contenido':
self.inItem = True
elif self.inItem:
if name == 'atributo':
self.inContent = True
nombre = attrs.getValue(u'nombre')
if nombre == 'TITULO':
self.inTitle = True
elif nombre == 'TIPO':
self.inType = True
elif nombre == 'PRECIO':
self.inPrice = True
elif nombre == 'FECHA-EVENTO':
self.inDate = True
elif nombre == 'FECHA-FIN-EVENTO':
self.inEndDate = True
elif nombre == 'HORA-EVENTO':
self.inHour = True
elif nombre == 'EVENTO-LARGA-DURACION':
self.inDuration = True
elif nombre == 'CONTENT-URL':
self.inUrl = True
def endElement (self, name):
if name == 'contenido':
self.inItem = False
elif self.inItem:
if name == 'atributo':
if self.inTitle:
self.Activity['title'] = self.Content
self.inTitle = False
elif self.inPrice:
self.Activity['precio'] = self.Content
self.inPrice = False
elif self.inDate:
self.Activity['fecha'] = self.Content
self.inDate = False
elif self.inEndDate:
self.Activity['final'] = self.Content
self.inEndDate = False
elif self.inHour:
self.Activity['hora'] = self.Content
self.inHour = False
elif self.inDuration:
self.Activity['duracion'] = self.Content
self.inDuration = False
elif self.inUrl:
self.Activity['url'] = self.Content
self.inUrl = False
elif self.inType:
self.Activity['tipo'] = self.Content
self.inType = False
self.Html.append(self.Activity)
self.Activity = {}
# To avoid Unicode trouble
self.inContent = False
self.Content = ""
def characters (self, chars):
if self.inContent:
self.Content = self.Content + chars
# --- Main prog
def getNews():
# Load parser and driver
theParser = make_parser()
theHandler = myContentHandler()
theParser.setContentHandler(theHandler)
# Ready, set, go!
theParser.parse("http://datos.madrid.es/portal/site/egob/" +
"menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/" +
"?vgnextoid=00149033f2201410VgnVCM100000171f5" +
"a0aRCRD&format=xml&file=0&filename=206974-0-" +
"agenda-eventos-culturales-100&mgmtid=6c0b6d01" +
"df986410VgnVCM2000000c205a0aRCRD")
return theHandler.Html | unknown | codeparrot/codeparrot-clean | ||
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
import sys
import logging
def setup_logging():
if sys.version_info < (2, 5):
fmt = '%(asctime)s [%(levelname)s] @%(filename)s:%(lineno)d\n%(message)s\n'
else:
fmt = '%(asctime)s [%(levelname)s] %(funcName)s() @%(filename)s:%(lineno)d\n%(message)s\n'
logging.basicConfig(level=logging.INFO, format=fmt) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::{
error::Context,
helpers::{config::Config as TauriConfig, template},
mobile::ios::LIB_OUTPUT_FILE_NAME,
Error, ErrorExt, Result,
};
use cargo_mobile2::{
apple::{
config::{Config, Metadata},
deps,
target::Target,
},
config::app::DEFAULT_ASSET_DIR,
target::TargetTrait as _,
util::{self, cli::TextWrapper},
};
use handlebars::Handlebars;
use include_dir::{include_dir, Dir};
use std::{
ffi::OsString,
fs::{create_dir_all, OpenOptions},
path::{Component, PathBuf},
};
const TEMPLATE_DIR: Dir<'_> = include_dir!("$CARGO_MANIFEST_DIR/templates/mobile/ios");
// unprefixed app_root seems pretty dangerous!!
// TODO: figure out what cargo-mobile meant by that
#[allow(clippy::too_many_arguments)]
pub fn gen(
tauri_config: &TauriConfig,
config: &Config,
metadata: &Metadata,
(handlebars, mut map): (Handlebars, template::JsonMap),
wrapper: &TextWrapper,
non_interactive: bool,
reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<()> {
if !skip_targets_install {
let installed_targets =
crate::interface::rust::installation::installed_targets().unwrap_or_default();
let missing_targets = Target::all()
.values()
.filter(|t| !installed_targets.contains(&t.triple().into()))
.collect::<Vec<&Target>>();
if !missing_targets.is_empty() {
log::info!("Installing iOS Rust targets...");
for target in missing_targets {
log::info!("Installing target {}", target.triple());
target.install().map_err(|error| Error::CommandFailed {
command: "rustup target add".to_string(),
error,
})?;
}
}
}
deps::install_all(wrapper, non_interactive, true, reinstall_deps).map_err(|error| {
Error::CommandFailed {
command: "pod install".to_string(),
error: std::io::Error::other(error),
}
})?;
let dest = config.project_dir();
let rel_prefix = util::relativize_path(config.app().root_dir(), &dest);
let source_dirs = vec![rel_prefix.join("src")];
let asset_catalogs = metadata.ios().asset_catalogs().unwrap_or_default();
let ios_pods = metadata.ios().pods().unwrap_or_default();
let macos_pods = metadata.macos().pods().unwrap_or_default();
#[cfg(target_arch = "aarch64")]
let default_archs = ["arm64"];
#[cfg(not(target_arch = "aarch64"))]
let default_archs = ["arm64", "x86_64"];
map.insert("lib-output-file-name", LIB_OUTPUT_FILE_NAME);
map.insert("file-groups", &source_dirs);
map.insert("ios-frameworks", metadata.ios().frameworks());
map.insert("ios-valid-archs", default_archs);
map.insert("ios-vendor-frameworks", metadata.ios().vendor_frameworks());
map.insert("ios-vendor-sdks", metadata.ios().vendor_sdks());
map.insert("macos-frameworks", metadata.macos().frameworks());
map.insert(
"macos-vendor-frameworks",
metadata.macos().vendor_frameworks(),
);
map.insert("macos-vendor-sdks", metadata.macos().vendor_frameworks());
map.insert("asset-catalogs", asset_catalogs);
map.insert("ios-pods", ios_pods);
map.insert("macos-pods", macos_pods);
map.insert(
"ios-additional-targets",
metadata.ios().additional_targets(),
);
map.insert(
"macos-additional-targets",
metadata.macos().additional_targets(),
);
map.insert("ios-pre-build-scripts", metadata.ios().pre_build_scripts());
map.insert(
"ios-post-compile-scripts",
metadata.ios().post_compile_scripts(),
);
map.insert(
"ios-post-build-scripts",
metadata.ios().post_build_scripts(),
);
map.insert(
"macos-pre-build-scripts",
metadata.macos().pre_build_scripts(),
);
map.insert(
"macos-post-compile-scripts",
metadata.macos().post_compile_scripts(),
);
map.insert(
"macos-post-build-scripts",
metadata.macos().post_build_scripts(),
);
map.insert(
"ios-command-line-arguments",
metadata.ios().command_line_arguments(),
);
map.insert(
"macos-command-line-arguments",
metadata.macos().command_line_arguments(),
);
let mut created_dirs = Vec::new();
template::render_with_generator(
&handlebars,
map.inner(),
&TEMPLATE_DIR,
&dest,
&mut |path| {
let mut components: Vec<_> = path.components().collect();
let mut new_component = None;
for component in &mut components {
if let Component::Normal(c) = component {
let c = c.to_string_lossy();
if c.contains("{{app.name}}") {
new_component.replace(OsString::from(
&c.replace("{{app.name}}", config.app().name()),
));
*component = Component::Normal(new_component.as_ref().unwrap());
break;
}
}
}
let path = dest.join(components.iter().collect::<PathBuf>());
let parent = path.parent().unwrap().to_path_buf();
if !created_dirs.contains(&parent) {
create_dir_all(&parent)?;
created_dirs.push(parent);
}
let mut options = OpenOptions::new();
options.write(true);
if !path.exists() {
options.create(true).open(path).map(Some)
} else {
Ok(None)
}
},
)
.with_context(|| "failed to process template")?;
if let Some(template_path) = tauri_config.bundle.ios.template.as_ref() {
let template = std::fs::read_to_string(template_path).fs_context(
"failed to read custom Xcode project template",
template_path.to_path_buf(),
)?;
let mut output_file = std::fs::File::create(dest.join("project.yml")).fs_context(
"failed to create project.yml file",
dest.join("project.yml"),
)?;
handlebars
.render_template_to_write(&template, map.inner(), &mut output_file)
.expect("Failed to render template");
}
let mut dirs_to_create = asset_catalogs.to_vec();
dirs_to_create.push(dest.join(DEFAULT_ASSET_DIR));
dirs_to_create.push(dest.join("Externals"));
dirs_to_create.push(dest.join(format!("{}_iOS", config.app().name())));
// Create all required project directories if they don't already exist
for dir in &dirs_to_create {
std::fs::create_dir_all(dir).fs_context("failed to create directory", dir.to_path_buf())?;
}
// Note that Xcode doesn't always reload the project nicely; reopening is
// often necessary.
println!("Generating Xcode project...");
duct::cmd(
"xcodegen",
[
"generate",
"--spec",
&dest.join("project.yml").to_string_lossy(),
],
)
.stdout_file(os_pipe::dup_stdout().unwrap())
.stderr_file(os_pipe::dup_stderr().unwrap())
.run()
.map_err(|error| Error::CommandFailed {
command: "xcodegen".to_string(),
error,
})?;
if !ios_pods.is_empty() || !macos_pods.is_empty() {
duct::cmd(
"pod",
[
"install",
&format!("--project-directory={}", dest.display()),
],
)
.stdout_file(os_pipe::dup_stdout().unwrap())
.stderr_file(os_pipe::dup_stderr().unwrap())
.run()
.map_err(|error| Error::CommandFailed {
command: "pod install".to_string(),
error,
})?;
}
Ok(())
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-cli/src/mobile/ios/project.rs |
"""Unsupervised nearest neighbors learner"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.base import _fit_context
from sklearn.neighbors._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
.. versionadded:: 0.9
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
p : float (positive), default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
effective_metric_ : str
Metric used to compute distances to neighbors.
effective_metric_params_ : dict
Parameters for the metric used to compute distances to neighbors.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
vote.
RadiusNeighborsClassifier : Classifier implementing a vote among neighbors
within a given radius.
KNeighborsRegressor : Regression based on k-nearest neighbors.
RadiusNeighborsRegressor : Regression based on neighbors within a fixed
radius.
BallTree : Space partitioning data structure for organizing points in a
multi-dimensional space, used for nearest neighbor search.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
>>> neigh.fit(samples)
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors(
... [[0, 0, 1.3]], 0.4, return_distance=False
... )
>>> np.asarray(nbrs[0][0])
array(2)
"""
def __init__(
self,
*,
n_neighbors=5,
radius=1.0,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
@_fit_context(
# NearestNeighbors.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the nearest neighbors estimator from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : NearestNeighbors
The fitted nearest neighbors estimator.
"""
return self._fit(X) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/neighbors/_unsupervised.py |
#!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import pmt
import numpy
def make_tag(key, value, offset, srcid=None):
tag = gr.tag_t()
tag.key = pmt.string_to_symbol(key)
tag.value = pmt.to_pmt(value)
tag.offset = offset
if srcid is not None:
tag.srcid = pmt.to_pmt(srcid)
return tag
class test_skiphead(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.src_data = [int(x) for x in range(65536)]
def tearDown(self):
self.tb = None
def test_skip_0(self):
skip_cnt = 0
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1(self):
skip_cnt = 1
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1023(self):
skip_cnt = 1023
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_6339(self):
skip_cnt = 6339
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_12678(self):
skip_cnt = 12678
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_all(self):
skip_cnt = len(self.src_data)
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_tags(self):
skip_cnt = 25
expected_result = tuple(self.src_data[skip_cnt:])
src_tags = tuple([make_tag('foo', 'bar', 1, 'src'),
make_tag('baz', 'qux', 50, 'src')])
src1 = blocks.vector_source_i(self.src_data, tags=src_tags)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
self.assertEqual(dst1.tags()[0].offset, 25, "Tag offset is incorrect")
self.assertEqual(len(dst1.tags()), 1, "Wrong number of tags received")
self.assertEqual(pmt.to_python(
dst1.tags()[0].key), "baz", "Tag key is incorrect")
self.assertEqual(pmt.to_python(
dst1.tags()[0].value), "qux", "Tag value is incorrect")
if __name__ == '__main__':
gr_unittest.run(test_skiphead, "test_skiphead.xml") | unknown | codeparrot/codeparrot-clean | ||
name: JS Tests
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
env:
FORCE_COLOR: 2
NODE: 22
permissions:
contents: read
jobs:
run:
permissions:
# allow coverallsapp/github-action to create new checks issues and fetch code
checks: write
contents: read
name: JS Tests
runs-on: ubuntu-latest
steps:
- name: Clone repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set up Node.js
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version: ${{ env.NODE }}
cache: npm
- name: Install npm dependencies
run: npm ci
- name: Run dist
run: npm run js
- name: Run JS tests
run: npm run js-test
- name: Run Coveralls
uses: coverallsapp/github-action@5cbfd81b66ca5d10c19b062c04de0199c215fb6e # v2.3.7
if: ${{ !github.event.repository.fork }}
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
path-to-lcov: "./js/coverage/lcov.info" | unknown | github | https://github.com/twbs/bootstrap | .github/workflows/js.yml |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static java.lang.Math.log;
import com.google.common.annotations.GwtIncompatible;
import java.util.Set;
import org.jspecify.annotations.NullUnmarked;
@GwtIncompatible
@NullUnmarked
public class CompactHashSetFloodingTest extends AbstractHashFloodingTest<Set<Object>> {
public CompactHashSetFloodingTest() {
super(
ImmutableList.of(Construction.setFromElements(CompactHashSet::create)),
n -> n * log(n),
ImmutableList.of(QueryOp.SET_CONTAINS));
}
} | java | github | https://github.com/google/guava | guava-tests/test/com/google/common/collect/CompactHashSetFloodingTest.java |
from __future__ import absolute_import, division
import __builtin__
import math
import random
import time
def median(x, use_float=True):
# there exist better algorithms...
y = sorted(x)
if not y:
raise ValueError('empty sequence!')
left = (len(y) - 1)//2
right = len(y)//2
sum = y[left] + y[right]
if use_float:
return sum/2
else:
return sum//2
def mean(x):
total = 0
count = 0
for y in x:
total += y
count += 1
return total/count
def shuffled(x):
x = list(x)
random.shuffle(x)
return x
def shift_left(n, m):
# python: :(
if m >= 0:
return n << m
return n >> -m
def clip(x, (low, high)):
if x < low:
return low
elif x > high:
return high
else:
return x
add_to_range = lambda x, (low, high): (min(low, x), max(high, x))
def nth(i, n=0):
i = iter(i)
for _ in xrange(n):
i.next()
return i.next()
def geometric(p):
if p <= 0 or p > 1:
raise ValueError('p must be in the interval (0.0, 1.0]')
if p == 1:
return 1
return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
def add_dicts_ext(add_func=lambda a, b: a+b, zero=0):
def add_dicts(*dicts):
res = {}
for d in dicts:
for k, v in d.iteritems():
res[k] = add_func(res.get(k, zero), v)
return dict((k, v) for k, v in res.iteritems() if v != zero)
return add_dicts
add_dicts = add_dicts_ext()
mult_dict = lambda c, x: dict((k, c*v) for k, v in x.iteritems())
def format(x, add_space=False):
prefixes = 'kMGTPEZY'
count = 0
while x >= 100000 and count < len(prefixes) - 2:
x = x//1000
count += 1
s = '' if count == 0 else prefixes[count - 1]
if add_space and s:
s = ' ' + s
return '%i' % (x,) + s
def format_dt(dt):
for value, name in [
(365.2425*60*60*24, 'years'),
(60*60*24, 'days'),
(60*60, 'hours'),
(60, 'minutes'),
(1, 'seconds'),
]:
if dt > value:
break
return '%.01f %s' % (dt/value, name)
perfect_round = lambda x: int(x + random.random())
def erf(x):
# save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def find_root(y_over_dy, start, steps=10, bounds=(None, None)):
guess = start
for i in xrange(steps):
prev, guess = guess, guess - y_over_dy(guess)
if bounds[0] is not None and guess < bounds[0]: guess = bounds[0]
if bounds[1] is not None and guess > bounds[1]: guess = bounds[1]
if guess == prev:
break
return guess
def ierf(z):
return find_root(lambda x: (erf(x) - z)/(2*math.e**(-x**2)/math.sqrt(math.pi)), 0)
def binomial_conf_interval(x, n, conf=0.95):
assert 0 <= x <= n and 0 <= conf < 1
if n == 0:
left = random.random()*(1 - conf)
return left, left + conf
# approximate - Wilson score interval
z = math.sqrt(2)*ierf(conf)
p = x/n
topa = p + z**2/2/n
topb = z * math.sqrt(p*(1-p)/n + z**2/4/n**2)
bottom = 1 + z**2/n
return [clip(x, (0, 1)) for x in add_to_range(x/n, [(topa - topb)/bottom, (topa + topb)/bottom])]
minmax = lambda x: (min(x), max(x))
def format_binomial_conf(x, n, conf=0.95, f=lambda x: x):
if n == 0:
return '???'
left, right = minmax(map(f, binomial_conf_interval(x, n, conf)))
return '~%.1f%% (%.f-%.f%%)' % (100*f(x/n), math.floor(100*left), math.ceil(100*right))
def reversed(x):
try:
return __builtin__.reversed(x)
except TypeError:
return reversed(list(x))
class Object(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def add_tuples(res, *tuples):
for t in tuples:
if len(t) != len(res):
raise ValueError('tuples must all be the same length')
res = tuple(a + b for a, b in zip(res, t))
return res
def flatten_linked_list(x):
while x is not None:
x, cur = x
yield cur
def weighted_choice(choices):
choices = list((item, weight) for item, weight in choices)
target = random.randrange(sum(weight for item, weight in choices))
for item, weight in choices:
if weight > target:
return item
target -= weight
raise AssertionError()
def natural_to_string(n, alphabet=None):
if n < 0:
raise TypeError('n must be a natural')
if alphabet is None:
s = ('%x' % (n,)).lstrip('0')
if len(s) % 2:
s = '0' + s
return s.decode('hex')
else:
assert len(set(alphabet)) == len(alphabet)
res = []
while n:
n, x = divmod(n, len(alphabet))
res.append(alphabet[x])
res.reverse()
return ''.join(res)
def string_to_natural(s, alphabet=None):
if alphabet is None:
assert not s.startswith('\x00')
return int(s.encode('hex'), 16) if s else 0
else:
assert len(set(alphabet)) == len(alphabet)
assert not s.startswith(alphabet[0])
return sum(alphabet.index(char) * len(alphabet)**i for i, char in enumerate(reversed(s)))
class RateMonitor(object):
def __init__(self, max_lookback_time):
self.max_lookback_time = max_lookback_time
self.datums = []
self.first_timestamp = None
def _prune(self):
start_time = time.time() - self.max_lookback_time
for i, (ts, datum) in enumerate(self.datums):
if ts > start_time:
self.datums[:] = self.datums[i:]
return
def get_datums_in_last(self, dt=None):
if dt is None:
dt = self.max_lookback_time
assert dt <= self.max_lookback_time
self._prune()
now = time.time()
return [datum for ts, datum in self.datums if ts > now - dt], min(dt, now - self.first_timestamp) if self.first_timestamp is not None else 0
def add_datum(self, datum):
self._prune()
t = time.time()
if self.first_timestamp is None:
self.first_timestamp = t
else:
self.datums.append((t, datum))
def merge_dicts(*dicts):
res = {}
for d in dicts: res.update(d)
return res | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const memoize = require("./util/memoize");
/** @typedef {import("./ChunkGraph")} ChunkGraph */
/** @typedef {import("./DependenciesBlock")} DependenciesBlock */
/** @typedef {import("./Module")} Module */
/** @typedef {import("./ModuleGraph")} ModuleGraph */
/** @typedef {import("./ModuleGraphConnection")} ModuleGraphConnection */
/** @typedef {import("./ModuleGraphConnection").ConnectionState} ConnectionState */
/** @typedef {import("./RuntimeTemplate")} RuntimeTemplate */
/** @typedef {import("./WebpackError")} WebpackError */
/** @typedef {import("./serialization/ObjectMiddleware").ObjectDeserializerContext} ObjectDeserializerContext */
/** @typedef {import("./serialization/ObjectMiddleware").ObjectSerializerContext} ObjectSerializerContext */
/** @typedef {import("./util/Hash")} Hash */
/** @typedef {import("./util/runtime").RuntimeSpec} RuntimeSpec */
/** @typedef {import("./dependencies/ModuleDependency")} ModuleDependency */
/**
* @typedef {object} UpdateHashContext
* @property {ChunkGraph} chunkGraph
* @property {RuntimeSpec} runtime
* @property {RuntimeTemplate=} runtimeTemplate
*/
/**
* @typedef {object} SourcePosition
* @property {number} line
* @property {number=} column
*/
/**
* @typedef {object} RealDependencyLocation
* @property {SourcePosition} start
* @property {SourcePosition=} end
* @property {number=} index
*/
/**
* @typedef {object} SyntheticDependencyLocation
* @property {string} name
* @property {number=} index
*/
/** @typedef {SyntheticDependencyLocation | RealDependencyLocation} DependencyLocation */
/** @typedef {string} ExportInfoName */
/**
* @typedef {object} ExportSpec
* @property {ExportInfoName} name the name of the export
* @property {boolean=} canMangle can the export be renamed (defaults to true)
* @property {boolean=} terminalBinding is the export a terminal binding that should be checked for export star conflicts
* @property {(string | ExportSpec)[]=} exports nested exports
* @property {ModuleGraphConnection=} from when reexported: from which module
* @property {string[] | null=} export when reexported: from which export
* @property {number=} priority when reexported: with which priority
* @property {boolean=} hidden export is not visible, because another export blends over it
*/
/** @typedef {Set<string>} ExportsSpecExcludeExports */
/**
* @typedef {object} ExportsSpec
* @property {(string | ExportSpec)[] | true | null} exports exported names, true for unknown exports or null for no exports
* @property {ExportsSpecExcludeExports=} excludeExports when exports = true, list of unaffected exports
* @property {(Set<string> | null)=} hideExports list of maybe prior exposed, but now hidden exports
* @property {ModuleGraphConnection=} from when reexported: from which module
* @property {number=} priority when reexported: with which priority
* @property {boolean=} canMangle can the export be renamed (defaults to true)
* @property {boolean=} terminalBinding are the exports terminal bindings that should be checked for export star conflicts
* @property {Module[]=} dependencies module on which the result depends on
*/
/**
* @typedef {object} ReferencedExport
* @property {string[]} name name of the referenced export
* @property {boolean=} canMangle when false, referenced export can not be mangled, defaults to true
*/
/** @typedef {string[][]} RawReferencedExports */
/** @typedef {(string[] | ReferencedExport)[]} ReferencedExports */
/** @typedef {(moduleGraphConnection: ModuleGraphConnection, runtime: RuntimeSpec) => ConnectionState} GetConditionFn */
const TRANSITIVE = Symbol("transitive");
const getIgnoredModule = memoize(() => {
const RawModule = require("./RawModule");
const module = new RawModule("/* (ignored) */", "ignored", "(ignored)");
module.factoryMeta = { sideEffectFree: true };
return module;
});
class Dependency {
constructor() {
/** @type {Module | undefined} */
this._parentModule = undefined;
/** @type {DependenciesBlock | undefined} */
this._parentDependenciesBlock = undefined;
/** @type {number} */
this._parentDependenciesBlockIndex = -1;
// TODO check if this can be moved into ModuleDependency
/** @type {boolean} */
this.weak = false;
// TODO check if this can be moved into ModuleDependency
/** @type {boolean | undefined} */
this.optional = false;
this._locSL = 0;
this._locSC = 0;
this._locEL = 0;
this._locEC = 0;
/** @type {undefined | number} */
this._locI = undefined;
/** @type {undefined | string} */
this._locN = undefined;
/** @type {undefined | DependencyLocation} */
this._loc = undefined;
}
/**
* @returns {string} a display name for the type of dependency
*/
get type() {
return "unknown";
}
/**
* @returns {string} a dependency category, typical categories are "commonjs", "amd", "esm"
*/
get category() {
return "unknown";
}
/**
* @returns {DependencyLocation} location
*/
get loc() {
if (this._loc !== undefined) return this._loc;
/** @type {SyntheticDependencyLocation & RealDependencyLocation} */
const loc = {};
if (this._locSL > 0) {
loc.start = { line: this._locSL, column: this._locSC };
}
if (this._locEL > 0) {
loc.end = { line: this._locEL, column: this._locEC };
}
if (this._locN !== undefined) {
loc.name = this._locN;
}
if (this._locI !== undefined) {
loc.index = this._locI;
}
return (this._loc = loc);
}
set loc(loc) {
if ("start" in loc && typeof loc.start === "object") {
this._locSL = loc.start.line || 0;
this._locSC = loc.start.column || 0;
} else {
this._locSL = 0;
this._locSC = 0;
}
if ("end" in loc && typeof loc.end === "object") {
this._locEL = loc.end.line || 0;
this._locEC = loc.end.column || 0;
} else {
this._locEL = 0;
this._locEC = 0;
}
this._locI = "index" in loc ? loc.index : undefined;
this._locN = "name" in loc ? loc.name : undefined;
this._loc = loc;
}
/**
* @param {number} startLine start line
* @param {number} startColumn start column
* @param {number} endLine end line
* @param {number} endColumn end column
*/
setLoc(startLine, startColumn, endLine, endColumn) {
this._locSL = startLine;
this._locSC = startColumn;
this._locEL = endLine;
this._locEC = endColumn;
this._locI = undefined;
this._locN = undefined;
this._loc = undefined;
}
/**
* @returns {string | undefined} a request context
*/
getContext() {
return undefined;
}
/**
* @returns {string | null} an identifier to merge equal requests
*/
getResourceIdentifier() {
return null;
}
/**
* @returns {boolean | TRANSITIVE} true, when changes to the referenced module could affect the referencing module; TRANSITIVE, when changes to the referenced module could affect referencing modules of the referencing module
*/
couldAffectReferencingModule() {
return TRANSITIVE;
}
/**
* Returns the referenced module and export
* @deprecated
* @param {ModuleGraph} moduleGraph module graph
* @returns {never} throws error
*/
getReference(moduleGraph) {
throw new Error(
"Dependency.getReference was removed in favor of Dependency.getReferencedExports, ModuleGraph.getModule and ModuleGraph.getConnection().active"
);
}
/**
* Returns list of exports referenced by this dependency
* @param {ModuleGraph} moduleGraph module graph
* @param {RuntimeSpec} runtime the runtime for which the module is analysed
* @returns {ReferencedExports} referenced exports
*/
getReferencedExports(moduleGraph, runtime) {
return Dependency.EXPORTS_OBJECT_REFERENCED;
}
/**
* @param {ModuleGraph} moduleGraph module graph
* @returns {null | false | GetConditionFn} function to determine if the connection is active
*/
getCondition(moduleGraph) {
return null;
}
/**
* Returns the exported names
* @param {ModuleGraph} moduleGraph module graph
* @returns {ExportsSpec | undefined} export names
*/
getExports(moduleGraph) {
return undefined;
}
/**
* Returns warnings
* @param {ModuleGraph} moduleGraph module graph
* @returns {WebpackError[] | null | undefined} warnings
*/
getWarnings(moduleGraph) {
return null;
}
/**
* Returns errors
* @param {ModuleGraph} moduleGraph module graph
* @returns {WebpackError[] | null | undefined} errors
*/
getErrors(moduleGraph) {
return null;
}
/**
* Update the hash
* @param {Hash} hash hash to be updated
* @param {UpdateHashContext} context context
* @returns {void}
*/
updateHash(hash, context) {}
/**
* implement this method to allow the occurrence order plugin to count correctly
* @returns {number} count how often the id is used in this dependency
*/
getNumberOfIdOccurrences() {
return 1;
}
/**
* @param {ModuleGraph} moduleGraph the module graph
* @returns {ConnectionState} how this dependency connects the module to referencing modules
*/
getModuleEvaluationSideEffectsState(moduleGraph) {
return true;
}
/**
* @param {string} context context directory
* @returns {Module} ignored module
*/
createIgnoredModule(context) {
return getIgnoredModule();
}
/**
* @param {ObjectSerializerContext} context context
*/
serialize({ write }) {
write(this.weak);
write(this.optional);
write(this._locSL);
write(this._locSC);
write(this._locEL);
write(this._locEC);
write(this._locI);
write(this._locN);
}
/**
* @param {ObjectDeserializerContext} context context
*/
deserialize({ read }) {
this.weak = read();
this.optional = read();
this._locSL = read();
this._locSC = read();
this._locEL = read();
this._locEC = read();
this._locI = read();
this._locN = read();
}
}
/** @type {RawReferencedExports} */
Dependency.NO_EXPORTS_REFERENCED = [];
/** @type {RawReferencedExports} */
Dependency.EXPORTS_OBJECT_REFERENCED = [[]];
// TODO remove in webpack 6
Object.defineProperty(Dependency.prototype, "module", {
/**
* @deprecated
* @returns {EXPECTED_ANY} throws
*/
get() {
throw new Error(
"module property was removed from Dependency (use compilation.moduleGraph.getModule(dependency) instead)"
);
},
/**
* @deprecated
* @returns {never} throws
*/
set() {
throw new Error(
"module property was removed from Dependency (use compilation.moduleGraph.updateModule(dependency, module) instead)"
);
}
});
/**
* @param {Dependency} dependency dep
* @returns {boolean} true if the dependency is a low priority dependency
*/
Dependency.isLowPriorityDependency = (dependency) =>
/** @type {ModuleDependency} */ (dependency).sourceOrder === Infinity;
// TODO remove in webpack 6
Object.defineProperty(Dependency.prototype, "disconnect", {
/**
* @deprecated
* @returns {EXPECTED_ANY} throws
*/
get() {
throw new Error(
"disconnect was removed from Dependency (Dependency no longer carries graph specific information)"
);
}
});
Dependency.TRANSITIVE = TRANSITIVE;
module.exports = Dependency; | javascript | github | https://github.com/webpack/webpack | lib/Dependency.js |
---
date: 2015-10-01
---
Here is the content. | unknown | github | https://github.com/jekyll/jekyll | test/source/_dates/date_without_time.md |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add a column to track the encryption state of the 'Extra' field in connection
Revision ID: bba5a7cfc896
Revises: bbc73705a13e
Create Date: 2016-01-29 15:10:32.656425
"""
# revision identifiers, used by Alembic.
revision = 'bba5a7cfc896'
down_revision = 'bbc73705a13e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('connection', sa.Column('is_extra_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('connection', 'is_extra_encrypted') | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_USE_PRIVATE_THREAD_POOL_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_USE_PRIVATE_THREAD_POOL_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
// This optimization creates private thread pool for the input pipeline.
class UsePrivateThreadPool : public TFDataOptimizerBase {
public:
UsePrivateThreadPool() = default;
~UsePrivateThreadPool() override = default;
std::string name() const override { return "use_private_thread_pool"; };
bool UsesFunctionLibrary() const override { return false; }
absl::Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
absl::Status OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
} // namespace grappler
} // namespace tensorflow
#endif // TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_USE_PRIVATE_THREAD_POOL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/grappler/optimizers/data/use_private_thread_pool.h |
#ifndef INTERNAL_ENCODING_H /*-*-C-*-vi:se ft=c:*/
#define INTERNAL_ENCODING_H
/**
* @author Ruby developers <ruby-core@ruby-lang.org>
* @copyright This file is a part of the programming language Ruby.
* Permission is hereby granted, to either redistribute and/or
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
* @brief Internal header for Encoding.
*/
#include "ruby/ruby.h" /* for ID */
#include "ruby/encoding.h" /* for rb_encoding */
#define rb_is_usascii_enc(enc) ((enc) == rb_usascii_encoding())
#define rb_is_ascii8bit_enc(enc) ((enc) == rb_ascii8bit_encoding())
#define rb_is_locale_enc(enc) ((enc) == rb_locale_encoding())
/* encoding.c */
ID rb_id_encoding(void);
const char * rb_enc_inspect_name(rb_encoding *enc);
rb_encoding *rb_enc_get_from_index(int index);
rb_encoding *rb_enc_check_str(VALUE str1, VALUE str2);
int rb_encdb_replicate(const char *alias, const char *orig);
int rb_encdb_alias(const char *alias, const char *orig);
int rb_enc_autoload(rb_encoding *enc);
bool rb_enc_autoload_p(rb_encoding *enc);
int rb_encdb_dummy(const char *name);
void rb_encdb_declare(const char *name);
void rb_enc_set_base(const char *name, const char *orig);
int rb_enc_set_dummy(int index);
void rb_enc_raw_set(VALUE obj, rb_encoding *enc);
int rb_enc_registered(const char *name);
PUREFUNC(int rb_data_is_encoding(VALUE obj));
/* vm.c */
void rb_free_global_enc_table(void);
#endif /* INTERNAL_ENCODING_H */ | c | github | https://github.com/ruby/ruby | internal/encoding.h |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2021-06-15 and added to Hugging Face Transformers on 2023-11-22.*
# UnivNet
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The UnivNet model was proposed in [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://huggingface.co/papers/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kin, and Juntae Kim.
The UnivNet model is a generative adversarial network (GAN) trained to synthesize high fidelity speech waveforms. The UnivNet model shared in `transformers` is the *generator*, which maps a conditioning log-mel spectrogram and optional noise sequence to a speech waveform (e.g. a vocoder). Only the generator is required for inference. The *discriminator* used to train the `generator` is not implemented.
The abstract from the paper is the following:
*Most neural vocoders employ band-limited mel-spectrograms to generate waveforms. If full-band spectral features are used as the input, the vocoder can be provided with as much acoustic information as possible. However, in some models employing full-band mel-spectrograms, an over-smoothing problem occurs as part of which non-sharp spectrograms are generated. To address this problem, we propose UnivNet, a neural vocoder that synthesizes high-fidelity waveforms in real time. Inspired by works in the field of voice activity detection, we added a multi-resolution spectrogram discriminator that employs multiple linear spectrogram magnitudes computed using various parameter sets. Using full-band mel-spectrograms as input, we expect to generate high-resolution signals by adding a discriminator that employs spectrograms of multiple resolutions as the input. In an evaluation on a dataset containing information on hundreds of speakers, UnivNet obtained the best objective and subjective results among competing models for both seen and unseen speakers. These results, including the best subjective score for text-to-speech, demonstrate the potential for fast adaptation to new speakers without a need for training from scratch.*
Tips:
- The `noise_sequence` argument for [`UnivNetModel.forward`] should be standard Gaussian noise (such as from `torch.randn`) of shape `([batch_size], noise_length, model.config.model_in_channels)`, where `noise_length` should match the length dimension (dimension 1) of the `input_features` argument. If not supplied, it will be randomly generated; a `torch.Generator` can be supplied to the `generator` argument so that the forward pass can be reproduced. (Note that [`UnivNetFeatureExtractor`] will return generated noise by default, so it shouldn't be necessary to generate `noise_sequence` manually.)
- Padding added by [`UnivNetFeatureExtractor`] can be removed from the [`UnivNetModel`] output through the [`UnivNetFeatureExtractor.batch_decode`] method, as shown in the usage example below.
- Padding the end of each waveform with silence can reduce artifacts at the end of the generated audio sample. This can be done by supplying `pad_end = True` to [`UnivNetFeatureExtractor.__call__`]. See [this issue](https://github.com/seungwonpark/melgan/issues/8) for more details.
Usage Example:
```python
import torch
from scipy.io.wavfile import write
from datasets import Audio, load_dataset
from transformers import UnivNetFeatureExtractor, UnivNetModel
model_id_or_path = "dg845/univnet-dev"
model = UnivNetModel.from_pretrained(model_id_or_path)
feature_extractor = UnivNetFeatureExtractor.from_pretrained(model_id_or_path)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# Resample the audio to the model and feature extractor's sampling rate.
ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
# Pad the end of the converted waveforms to reduce artifacts at the end of the output audio samples.
inputs = feature_extractor(
ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], pad_end=True, return_tensors="pt"
)
with torch.no_grad():
audio = model(**inputs)
# Remove the extra padding at the end of the output.
audio = feature_extractor.batch_decode(**audio)[0]
# Convert to wav file
write("sample_audio.wav", feature_extractor.sampling_rate, audio)
```
This model was contributed by [dg845](https://huggingface.co/dg845).
To the best of my knowledge, there is no official code release, but an unofficial implementation can be found at [maum-ai/univnet](https://github.com/maum-ai/univnet) with pretrained checkpoints [here](https://github.com/maum-ai/univnet#pre-trained-model).
## UnivNetConfig
[[autodoc]] UnivNetConfig
## UnivNetFeatureExtractor
[[autodoc]] UnivNetFeatureExtractor
- __call__
## UnivNetModel
[[autodoc]] UnivNetModel
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/en/model_doc/univnet.md |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnszone(base_resource) :
""" Configuration for DNS zone resource. """
def __init__(self) :
self._zonename = ""
self._proxymode = ""
self._dnssecoffload = ""
self._nsec = ""
self._keyname = []
self._type = ""
self._flags = 0
self.___count = 0
@property
def zonename(self) :
ur"""Name of the zone to create.<br/>Minimum length = 1.
"""
try :
return self._zonename
except Exception as e:
raise e
@zonename.setter
def zonename(self, zonename) :
ur"""Name of the zone to create.<br/>Minimum length = 1
"""
try :
self._zonename = zonename
except Exception as e:
raise e
@property
def proxymode(self) :
ur"""Deploy the zone in proxy mode. Enable in the following scenarios:
* The load balanced DNS servers are authoritative for the zone and all resource records that are part of the zone.
* The load balanced DNS servers are authoritative for the zone, but the NetScaler appliance owns a subset of the resource records that belong to the zone (partial zone ownership configuration). Typically seen in global server load balancing (GSLB) configurations, in which the appliance responds authoritatively to queries for GSLB domain names but forwards queries for other domain names in the zone to the load balanced servers.
In either scenario, do not create the zone's Start of Authority (SOA) and name server (NS) resource records on the appliance.
Disable if the appliance is authoritative for the zone, but make sure that you have created the SOA and NS records on the appliance before you create the zone.<br/>Default value: ENABLED<br/>Possible values = YES, NO.
"""
try :
return self._proxymode
except Exception as e:
raise e
@proxymode.setter
def proxymode(self, proxymode) :
ur"""Deploy the zone in proxy mode. Enable in the following scenarios:
* The load balanced DNS servers are authoritative for the zone and all resource records that are part of the zone.
* The load balanced DNS servers are authoritative for the zone, but the NetScaler appliance owns a subset of the resource records that belong to the zone (partial zone ownership configuration). Typically seen in global server load balancing (GSLB) configurations, in which the appliance responds authoritatively to queries for GSLB domain names but forwards queries for other domain names in the zone to the load balanced servers.
In either scenario, do not create the zone's Start of Authority (SOA) and name server (NS) resource records on the appliance.
Disable if the appliance is authoritative for the zone, but make sure that you have created the SOA and NS records on the appliance before you create the zone.<br/>Default value: ENABLED<br/>Possible values = YES, NO
"""
try :
self._proxymode = proxymode
except Exception as e:
raise e
@property
def dnssecoffload(self) :
ur"""Enable dnssec offload for this zone.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dnssecoffload
except Exception as e:
raise e
@dnssecoffload.setter
def dnssecoffload(self, dnssecoffload) :
ur"""Enable dnssec offload for this zone.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dnssecoffload = dnssecoffload
except Exception as e:
raise e
@property
def nsec(self) :
ur"""Enable nsec generation for dnssec offload.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._nsec
except Exception as e:
raise e
@nsec.setter
def nsec(self, nsec) :
ur"""Enable nsec generation for dnssec offload.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._nsec = nsec
except Exception as e:
raise e
@property
def keyname(self) :
ur"""Name of the public/private DNS key pair with which to sign the zone. You can sign a zone with up to four keys.<br/>Minimum length = 1.
"""
try :
return self._keyname
except Exception as e:
raise e
@keyname.setter
def keyname(self, keyname) :
ur"""Name of the public/private DNS key pair with which to sign the zone. You can sign a zone with up to four keys.<br/>Minimum length = 1
"""
try :
self._keyname = keyname
except Exception as e:
raise e
@property
def type(self) :
ur"""Type of zone to display. Mutually exclusive with the DNS Zone (zoneName) parameter. Available settings function as follows:
* ADNS - Display all the zones for which the NetScaler appliance is authoritative.
* PROXY - Display all the zones for which the NetScaler appliance is functioning as a proxy server.
* ALL - Display all the zones configured on the appliance.<br/>Possible values = ALL, ADNS, PROXY.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
ur"""Type of zone to display. Mutually exclusive with the DNS Zone (zoneName) parameter. Available settings function as follows:
* ADNS - Display all the zones for which the NetScaler appliance is authoritative.
* PROXY - Display all the zones for which the NetScaler appliance is functioning as a proxy server.
* ALL - Display all the zones configured on the appliance.<br/>Possible values = ALL, ADNS, PROXY
"""
try :
self._type = type
except Exception as e:
raise e
@property
def flags(self) :
ur"""Flags controlling display.
"""
try :
return self._flags
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnszone_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnszone
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.zonename is not None :
return str(self.zonename)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add dnszone.
"""
try :
if type(resource) is not list :
addresource = dnszone()
addresource.zonename = resource.zonename
addresource.proxymode = resource.proxymode
addresource.dnssecoffload = resource.dnssecoffload
addresource.nsec = resource.nsec
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].zonename = resource[i].zonename
addresources[i].proxymode = resource[i].proxymode
addresources[i].dnssecoffload = resource[i].dnssecoffload
addresources[i].nsec = resource[i].nsec
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update dnszone.
"""
try :
if type(resource) is not list :
updateresource = dnszone()
updateresource.zonename = resource.zonename
updateresource.proxymode = resource.proxymode
updateresource.dnssecoffload = resource.dnssecoffload
updateresource.nsec = resource.nsec
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].zonename = resource[i].zonename
updateresources[i].proxymode = resource[i].proxymode
updateresources[i].dnssecoffload = resource[i].dnssecoffload
updateresources[i].nsec = resource[i].nsec
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of dnszone resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = dnszone()
if type(resource) != type(unsetresource):
unsetresource.zonename = resource
else :
unsetresource.zonename = resource.zonename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].zonename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].zonename = resource[i].zonename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete dnszone.
"""
try :
if type(resource) is not list :
deleteresource = dnszone()
if type(resource) != type(deleteresource):
deleteresource.zonename = resource
else :
deleteresource.zonename = resource.zonename
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].zonename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].zonename = resource[i].zonename
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def sign(cls, client, resource) :
ur""" Use this API to sign dnszone.
"""
try :
if type(resource) is not list :
signresource = dnszone()
signresource.zonename = resource.zonename
signresource.keyname = resource.keyname
return signresource.perform_operation(client,"sign")
else :
if (resource and len(resource) > 0) :
signresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
signresources[i].zonename = resource[i].zonename
signresources[i].keyname = resource[i].keyname
result = cls.perform_operation_bulk_request(client, signresources,"sign")
return result
except Exception as e :
raise e
@classmethod
def unsign(cls, client, resource) :
ur""" Use this API to unsign dnszone.
"""
try :
if type(resource) is not list :
unsignresource = dnszone()
unsignresource.zonename = resource.zonename
unsignresource.keyname = resource.keyname
return unsignresource.perform_operation(client,"unsign")
else :
if (resource and len(resource) > 0) :
unsignresources = [ dnszone() for _ in range(len(resource))]
for i in range(len(resource)) :
unsignresources[i].zonename = resource[i].zonename
unsignresources[i].keyname = resource[i].keyname
result = cls.perform_operation_bulk_request(client, unsignresources,"unsign")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the dnszone resources that are configured on netscaler.
"""
try :
if not name :
obj = dnszone()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = dnszone()
obj.zonename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [dnszone() for _ in range(len(name))]
obj = [dnszone() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = dnszone()
obj[i].zonename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
ur""" Use this API to fetch all the dnszone resources that are configured on netscaler.
# This uses dnszone_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = dnszone()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of dnszone resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnszone()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the dnszone resources configured on NetScaler.
"""
try :
obj = dnszone()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of dnszone resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnszone()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Type:
ALL = "ALL"
ADNS = "ADNS"
PROXY = "PROXY"
class Proxymode:
YES = "YES"
NO = "NO"
class Nsec:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dnssecoffload:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class dnszone_response(base_response) :
def __init__(self, length=1) :
self.dnszone = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnszone = [dnszone() for _ in range(length)] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-09 10:42
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0003_auto_20171101_0151'),
]
operations = [
migrations.AlterField(
model_name='historicalworkflowlevel2',
name='actual_cost',
field=models.DecimalField(blank=True, decimal_places=2, default=Decimal('0.00'), help_text='What was the actual final cost? This should match any financial documentation you have in the file. It should be completely documented and verifiable by finance and any potential audit', max_digits=20, verbose_name='Actual Cost'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='actual_cost',
field=models.DecimalField(blank=True, decimal_places=2, default=Decimal('0.00'), help_text='What was the actual final cost? This should match any financial documentation you have in the file. It should be completely documented and verifiable by finance and any potential audit', max_digits=20, verbose_name='Actual Cost'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='workflowlevel2', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='workflowlevel2',
name='donor_currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='donor_project', to='workflow.Currency'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='local_currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='local_project', to='workflow.Currency'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='milestone',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Milestone'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='office',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Office', verbose_name='Office'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='partners',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Partner', verbose_name='Partners'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='sector',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='workflow2_sector', to='workflow.Sector', verbose_name='Sector'),
),
] | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"path/filepath"
"regexp"
"sort"
"testing"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/states/statemgr"
)
// testStateBackups returns the list of backups in order of creation
// (oldest first) in the given directory.
func testStateBackups(t *testing.T, dir string) []string {
// Find all the backups
list, err := filepath.Glob(filepath.Join(dir, "*"+DefaultBackupExtension))
if err != nil {
t.Fatalf("err: %s", err)
}
// Sort them which will put them naturally in the right order
sort.Strings(list)
return list
}
func TestStateDefaultBackupExtension(t *testing.T) {
tmp := t.TempDir()
t.Chdir(tmp)
view := arguments.ViewHuman
s, err := (&StateMeta{}).State(view)
if err != nil {
t.Fatal(err)
}
backupPath := s.(*statemgr.Filesystem).BackupPath()
match := regexp.MustCompile(`terraform\.tfstate\.\d+\.backup$`).MatchString
if !match(backupPath) {
t.Fatal("Bad backup path:", backupPath)
}
} | go | github | https://github.com/hashicorp/terraform | internal/command/state_test.go |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import Ollama
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Ollama": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Ollama",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/llms/ollama.py |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts Python data into data for Google Visualization API clients.
This library can be used to create a google.visualization.DataTable usable by
visualizations built on the Google Visualization API. Output formats are raw
JSON, JSON response, JavaScript, CSV, and HTML table.
See http://code.google.com/apis/visualization/ for documentation on the
Google Visualization API.
"""
__author__ = "Amit Weinstein, Misha Seltzer, Jacob Baskin"
import cgi
import cStringIO
import csv
import datetime
try:
import json
except ImportError:
import simplejson as json
import types
class DataTableException(Exception):
"""The general exception object thrown by DataTable."""
pass
class DataTableJSONEncoder(json.JSONEncoder):
"""JSON encoder that handles date/time/datetime objects correctly."""
def __init__(self):
json.JSONEncoder.__init__(self,
separators=(",", ":"),
ensure_ascii=False)
def default(self, o):
if isinstance(o, datetime.datetime):
if o.microsecond == 0:
# If the time doesn't have ms-resolution, leave it out to keep
# things smaller.
return "Date(%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second)
else:
return "Date(%d,%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second,
o.microsecond / 1000)
elif isinstance(o, datetime.date):
return "Date(%d,%d,%d)" % (o.year, o.month - 1, o.day)
elif isinstance(o, datetime.time):
return [o.hour, o.minute, o.second]
else:
return super(DataTableJSONEncoder, self).default(o)
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], types.StringTypes + (types.NoneType,)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, (int, long, float)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, unicode):
return value
else:
return str(value).decode("utf-8")
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, unicode):
return value
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value).decode("utf-8")
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (types.StringTypes, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, types.StringTypes):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, types.StringTypes):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (types.StringTypes, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(table_description.keys()[0], types.StringTypes) and
isinstance(table_description.values()[0], tuple) and
len(table_description.values()[0]) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] +
DataTable.TableDescriptionParser(table_description.values()[0],
depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
proper_sort_keys = []
if isinstance(order_by, types.StringTypes) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in order_by:
if isinstance(key, types.StringTypes):
proper_sort_keys.append((key, 1))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
proper_sort_keys.append((key[0], key[1].lower() == "asc" and 1 or -1))
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
def SortCmpFunc(row1, row2):
"""cmp function for sorted. Compares by keys and 'asc'/'desc' keywords."""
for key, asc_mult in proper_sort_keys:
cmp_result = asc_mult * cmp(row1[0].get(key), row2[0].get(key))
if cmp_result:
return cmp_result
return 0
return sorted(self.__data, cmp=SortCmpFunc)
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
cgi.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % cgi.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % cgi.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = cStringIO.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
writer.writerow([col_dict[col]["label"].encode("utf-8")
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(self.ToString(value[1]).encode("utf-8"))
else:
cells_list.append(self.ToString(value[0]).encode("utf-8"))
else:
cells_list.append(self.ToString(value).encode("utf-8"))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
return (self.ToCsv(columns_order, order_by, separator="\t")
.decode("utf-8").encode("UTF-16LE"))
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
return encoder.encode(
self._ToJSonObj(columns_order, order_by)).encode("utf-8")
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoder = DataTableJSONEncoder()
return "%s(%s);" % (response_handler,
encoder.encode(response_obj).encode("utf-8"))
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"]) | unknown | codeparrot/codeparrot-clean | ||
# Author: Anthony Baxter
"""Class representing audio/* type MIME documents.
"""
import sndhdr
from cStringIO import StringIO
from email import Errors
from email import Encoders
from email.MIMENonMultipart import MIMENonMultipart
_sndhdr_MIMEmap = {'au' : 'basic',
'wav' :'x-wav',
'aiff':'x-aiff',
'aifc':'x-aiff',
}
# There are others in sndhdr that don't have MIME types. :(
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
def _whatsnd(data):
"""Try to identify a sound file type.
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
we re-do it here. It would be easier to reverse engineer the Unix 'file'
command and use the standard 'magic' file, as shipped with a modern Unix.
"""
hdr = data[:512]
fakefile = StringIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None
class MIMEAudio(MIMENonMultipart):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
_encoder=Encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
can be decoded by the standard Python `sndhdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific audio subtype via the
_subtype parameter. If _subtype is not given, and no subtype can be
guessed, a TypeError is raised.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = _whatsnd(_audiodata)
if _subtype is None:
raise TypeError, 'Could not find audio MIME subtype'
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
self.set_payload(_audiodata)
_encoder(self) | unknown | codeparrot/codeparrot-clean | ||
"""
mailthon.response
~~~~~~~~~~~~~~~~~
Response objects encapsulate responses returned
by SMTP servers.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
class Response(object):
"""
Encapsulates a (status_code, message) tuple
returned by a server when the ``NOOP``
command is called.
:param pair: A (status_code, message) pair.
"""
def __init__(self, pair):
status, message = pair
self.status_code = status
self.message = message
@property
def ok(self):
"""
Tells whether the Response object is ok-
that everything went well. Returns true
if the status code is 250, false otherwise.
"""
return self.status_code == 250
class SendmailResponse(Response):
"""
Encapsulates a (status_code, message) tuple
as well as a mapping of email-address to
(status_code, message) tuples that can be
attained by the NOOP and the SENDMAIL
command.
:param pair: The response pair.
:param rejected: Dictionary of rejected
addresses to status-code message pairs.
"""
def __init__(self, pair, rejected):
Response.__init__(self, pair)
self.rejected = {}
for addr, pair in rejected.items():
self.rejected[addr] = Response(pair)
@property
def ok(self):
"""
Returns True only if no addresses were
rejected and if the status code is 250.
"""
return (Response.ok.fget(self) and
not self.rejected) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
from __future__ import division
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from vehicle_core.config import thrusters_config as tc
from vehicle_core.model import thruster_model as tm
from vehicle_core.model import throttle_model as th
import rospy
import roslib
roslib.load_manifest('vehicle_core')
from vehicle_interface.msg import ThrusterCommand, ThrusterFeedback, Vector6Stamped
# topics
TOPIC_MODEL = 'thrusters/model'
TOPIC_CMDS = 'thrusters/commands'
TOPIC_FORCES = 'forces/body'
class SimulatedThrusters(object):
def __init__(self, name, topic_input, topic_feedback, topic_forces, thruster_limit, **kwargs):
self.name = name
# data arrays
self.throttle_request = np.zeros((6, tc.LPF_WINDOW))
self.throttle_predicted = np.zeros(6)
self.throttle_last = np.zeros(6)
self.current_predicted = np.zeros(6)
self.forces_predicted = np.zeros(6)
self.body_forces = np.zeros(6)
self.throttle_limit = thruster_limit
self.limit_rate = bool(kwargs.get('limit_rate', False))
self.rising_limit = float(kwargs.get('rising_limit', tc.THROTTLE_RISING_LIMIT))
self.falling_limit = float(kwargs.get('falling_limit', tc.THROTTLE_FALLING_LIMIT))
self.model_delay = int(kwargs.get('model_delay', 0))
self.rising_limit = np.clip(self.rising_limit, 0, 100)
self.falling_limit = np.clip(self.falling_limit, 0, 100)
self.model_delay = np.clip(self.model_delay, 0, tc.LPF_WINDOW-1)
# subscribers
self.sub_cmd = rospy.Subscriber(topic_input, ThrusterCommand, self.handle_commands, tcp_nodelay=True, queue_size=10)
self.pub_feedback = rospy.Publisher(topic_feedback, ThrusterFeedback, tcp_nodelay=True, queue_size=10)
self.pub_forces = rospy.Publisher(topic_forces, Vector6Stamped, tcp_nodelay=True, queue_size=10)
def handle_commands(self, data):
self.throttle_request[:, -1] = np.array(data.throttle[0:6])
# apply thruster model (current estimation)
self.throttle_last = np.copy(self.throttle_predicted)
self.throttle_predicted = th.predict_throttle(
self.throttle_request, b=tc.LPF[0], a=tc.LPF[1], offset=self.model_delay, limit=self.throttle_limit
)
if self.limit_rate is True:
# new thrusters filter
self.throttle_predicted = th.rate_limiter(
self.throttle_predicted, self.throttle_last, rising_limit=self.rising_limit, falling_limit=self.falling_limit
)
self.current_predicted = tm.estimate_current(self.throttle_predicted, tc.THROTTLE_TO_CURRENT)
# apply thruster model (thrust estimation)
self.forces_predicted = tm.estimate_forces( self.throttle_predicted, self.current_predicted, tc.CURRENT_TO_THRUST )
# converting from thruster domain to body forces using the thruster allocation matrix
self.forces_predicted = self.forces_predicted.reshape(6, 1)
self.body_forces = np.dot(tc.TAM, self.forces_predicted)
# zero the array in case the messages are not received
self.throttle_request = np.roll(self.throttle_request, -1, axis=1)
self.throttle_request[:, -1] = np.zeros(6)
# send thruster feedback
msg = ThrusterFeedback()
msg.header.stamp = rospy.Time.now()
msg.throttle = self.throttle_predicted
msg.current = self.current_predicted
msg.temp = np.zeros(6)
msg.status = np.zeros(6)
msg.errors = np.zeros(6)
self.pub_feedback.publish(msg)
# send body forces
msg = Vector6Stamped()
msg.header.stamp = rospy.Time.now()
msg.values = self.body_forces
self.pub_forces.publish(msg)
if __name__ == '__main__':
rospy.init_node('thrusters_model')
topic_input = rospy.get_param('~topic_input', TOPIC_CMDS)
topic_feedback = rospy.get_param('~topic_feedback', TOPIC_MODEL)
topic_forces = rospy.get_param('~topic_forces', TOPIC_FORCES)
lim = int(rospy.get_param('thrusters/throttle_limit', tc.MAX_THROTTLE))
lim = int(np.clip(lim, 0, 100).astype(int))
config = rospy.get_param('thruster_model', dict())
rospy.loginfo('%s: model init ... ', rospy.get_name())
rospy.loginfo('%s: thrusters input topic: %s', rospy.get_name(), topic_input)
rospy.loginfo('%s: thrusters feedback topic: %s', rospy.get_name(), topic_feedback)
rospy.loginfo('%s: thrusters forces topic: %s', rospy.get_name(), topic_forces)
rospy.loginfo('%s: thrusters throttle limit: %s', rospy.get_name(), lim)
rospy.loginfo('%s: thrusters simulator config:\n%s', rospy.get_name(), config)
thruster = SimulatedThrusters(rospy.get_name(), topic_input, topic_feedback, topic_forces, lim, **config)
rospy.spin() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import os
import re
from logging import getLogger
from M2Crypto import X509
EXMSG = \
"""
A bundle must contain both the private key and
certificate PEM text. The [%s] PEM text was not found.
"""
EXMSG_AT_PATH = \
"""
The bundle at: %s
must contain both the private key and certificate
PEM text. The [%s] PEM text was not found.
"""
log = getLogger(__name__)
class KeyNotFound(Exception):
def __init__(self, bundle, path=None):
if path:
msg = EXMSG_AT_PATH % (path, 'key')
else:
msg = EXMSG % 'key'
Exception.__init__(self, msg)
class CertNotFound(Exception):
def __init__(self, bundle, path=None):
if path:
msg = EXMSG_AT_PATH % (path, 'certificate')
else:
msg = EXMSG % 'certificate'
Exception.__init__(self, msg)
class Bundle:
"""
Represents x509, pem encoded key & certificate bundles.
"""
KEY_BEGIN = re.compile(r'[\n]*[\-]{5}BEGIN( RSA| DSA)? PRIVATE KEY[\-]{5}')
KEY_END = re.compile(r'[\-]{5}END( RSA| DSA)? PRIVATE KEY[\-]{5}')
CRT_BEGIN = re.compile(r'[\n]*[\-]{5}BEGIN CERTIFICATE[\-]{5}')
CRT_END = re.compile(r'[\-]{5}END CERTIFICATE[\-]{5}')
@classmethod
def haskey(cls, bundle):
"""
Get whether the string contains a PEM encoded private key.
@param bundle: A PEM string.
@type bundle: str
@return: True if contains a key.
@rtype: bool
"""
m = cls.KEY_BEGIN.search(bundle)
return ( m is not None )
@classmethod
def hascrt(cls, bundle):
"""
Get whether the string contains a PEM encoded certificate.
@param bundle: A PEM string.
@type bundle: str
@return: True if contains a certificate.
@rtype: bool
"""
m = cls.CRT_BEGIN.search(bundle)
return ( m is not None )
@classmethod
def hasboth(cls, bundle):
"""
Get whether the string contains both
a PEM encoded private key AND certificate.
@param bundle: A PEM string.
@type bundle: str
@return: True if contains a key & cert.
@rtype: bool
"""
return ( cls.haskey(bundle) and cls.hascrt(bundle) )
@classmethod
def assertvalid(cls, bundle, path=None):
"""
Validate that the bundle is valid.
@param bundle: A bundle to validate.
@type bundle: str
@raise KeyMissing: When key PEM is missing.
@raise CertMissing: When cert PEM is missing.
"""
if not cls.haskey(bundle):
raise KeyNotFound(bundle, path)
if not cls.hascrt(bundle):
raise CertNotFound(bundle, path)
@classmethod
def split(cls, bundle):
"""
Split the bundle into key and certificate components.
@param bundle: A bundle containing the key and certificate PEM.
@type bundle: str
@return: (key,crt)
@rtype: tuple
"""
# key
begin = cls.KEY_BEGIN.search(bundle)
end = cls.KEY_END.search(bundle)
if not (begin and end):
raise Exception, '%s, not valid' % bundle
begin = begin.start(0)
end = end.end(0)
key = bundle[begin:end]
# certificate
begin = cls.CRT_BEGIN.search(bundle)
end = cls.CRT_END.search(bundle)
if not (begin and end):
raise Exception, '%s, not valid' % bundle
begin = begin.start(0)
end = end.end(0)
crt= bundle[begin:end]
return (key.strip(), crt.strip())
@classmethod
def join(cls, key, crt):
"""
Join the specified key and certificate not a bundle.
@param key: A private key (PEM).
@type key: str
@param crt: A certificate (PEM).
@type crt: str
@return: A bundle containing the key and certifiate.
@rtype: str
"""
key = key.strip()
crt = crt.strip()
return '\n'.join((key, crt))
def __init__(self, path):
"""
@param path: The absolute path to the bundle represented.
@type path: str
"""
self.path = os.path.expanduser(path)
def valid(self):
"""
Validate the bundle.
@return: True if exists & valid.
@rtype: bool
"""
if os.path.exists(self.path):
s = self.read()
return self.hasboth(s)
else:
return False
def read(self):
"""
Read and return the bundle contents.
@return: A string containing the PEM encoded key & cert.
@rtype: str
"""
f = open(self.path)
bundle = f.read()
f.close()
self.assertvalid(bundle, self.path)
return bundle
def write(self, bundle):
"""
Write the specified bundle content.
@param bundle: The PEM text for the private key and certificate.
@type bundle: str
"""
self.mkdir()
self.assertvalid(bundle)
f = open(self.path, 'w')
f.write(bundle)
f.close()
def delete(self):
"""
Delete the certificate.
"""
try:
if os.path.exists(self.path):
os.unlink(self.path)
except IOError:
log.exception('unlink %s failed', self.path)
def mkdir(self):
"""
Ensure I{root} directory exists.
"""
path = os.path.dirname(self.path)
if not os.path.exists(path):
os.makedirs(path)
def cn(self):
"""
Get the subject (CN) Common Name
@return: The subject CN
@rtype: str
"""
if self.valid():
subject = self.subject()
return subject.get('CN')
def uid(self):
"""
Get the subject (UID) userid.
@return: The subject UID
@rtype: str
"""
if self.valid():
subject = self.subject()
return subject.get('UID')
def subject(self):
"""
Get the certificate subject.
note: Missing NID mapping for UID added to patch openssl.
@return: A dictionary of subject fields.
@rtype: dict
"""
d = {}
content = self.read()
x509 = X509.load_cert_string(content)
subject = x509.get_subject()
subject.nid['UID'] = 458
for key, nid in subject.nid.items():
entry = subject.get_entries_by_nid(nid)
if len(entry):
asn1 = entry[0].get_data()
d[key] = str(asn1)
continue
return d
def __str__(self):
return 'bundle: %s' % self.path | unknown | codeparrot/codeparrot-clean | ||
"""
Support for monitoring an OpenEVSE Charger.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.openevse/
"""
import logging
from requests import RequestException
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TEMP_CELSIUS, CONF_HOST
from homeassistant.const import CONF_MONITORED_VARIABLES
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['openevsewifi==0.4']
SENSOR_TYPES = {
'status': ['Charging Status', None],
'charge_time': ['Charge Time Elapsed', 'minutes'],
'ambient_temp': ['Ambient Termperature', TEMP_CELSIUS],
'ir_temp': ['IR Temperature', TEMP_CELSIUS],
'rtc_temp': ['RTC Temperature', TEMP_CELSIUS],
'usage_session': ['Usage this Session', 'kWh'],
'usage_total': ['Total Usage', 'kWh']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=['status']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the OpenEVSE sensor."""
import openevsewifi
host = config.get(CONF_HOST)
monitored_variables = config.get(CONF_MONITORED_VARIABLES)
charger = openevsewifi.Charger(host)
dev = []
for variable in monitored_variables:
dev.append(OpenEVSESensor(variable, charger))
add_devices(dev)
class OpenEVSESensor(Entity):
"""Implementation of an OpenEVSE sensor."""
# pylint: disable=too-many-arguments
def __init__(self, sensor_type, charger):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self.charger = charger
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return self._unit_of_measurement
def update(self):
"""Get the monitored data from the charger."""
try:
if self.type == 'status':
self._state = self.charger.getStatus()
elif self.type == 'charge_time':
self._state = self.charger.getChargeTimeElapsed() / 60
elif self.type == 'ambient_temp':
self._state = self.charger.getAmbientTemperature()
elif self.type == 'ir_temp':
self._state = self.charger.getIRTemperature()
elif self.type == 'rtc_temp':
self._state = self.charger.getRTCTemperature()
elif self.type == 'usage_session':
self._state = float(self.charger.getUsageSession()) / 1000
elif self.type == 'usage_total':
self._state = float(self.charger.getUsageTotal()) / 1000
else:
self._state = 'Unknown'
except (RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update status for %s", self.name) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Comunitea All Rights Reserved
# $Omar Castiñeira Saavedra <omar@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Flask middleware connector",
'version': '1.0',
'category': 'Connector',
'description': """Connect to Visiotech flask middleware using Odoo connector""",
'author': 'Comunitea',
'website': 'www.comunitea.com',
"depends": ['base', 'product', 'connector', 'stock', 'custom_partner', 'crm_claim_rma', 'product_virtual_stock_conservative'],
"data": ["views/middleware_view.xml", "views/product_view.xml", 'views/res_users.xml',
"views/product_brand.xml", "views/claim_line_view.xml", "security/ir.model.access.csv"],
"installable": True
} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
########################################################################
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/FrameworkSystem/scripts/dirac-proxy-upload.py $
# File : dirac-proxy-init.py
# Author : Adrian Casajus
###########################################################from DIRAC.Core.Base import Script#############
__RCSID__ = "$Id: dirac-proxy-upload.py 18161 2009-11-11 12:07:09Z acasajus $"
import sys
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import ProxyManagerClient
from DIRAC.Core.Security import CS, Properties
from DIRAC.Core.Security.ProxyInfo import *
userName = False
def setUser( arg ):
global userName
userName = arg
return DIRAC.S_OK()
Script.registerSwitch( "u:", "user=", "User to query (by default oneself)", setUser )
Script.parseCommandLine()
result = getProxyInfo()
if not result[ 'OK' ]:
print "Do you have a valid proxy?"
print result[ 'Message' ]
sys.exit( 1 )
proxyProps = result[ 'Value' ]
if not userName:
userName = proxyProps[ 'username' ]
if userName in CS.getAllUsers():
if Properties.PROXY_MANAGEMENT not in proxyProps[ 'groupProperties' ]:
if userName != proxyProps[ 'username' ] and userName != proxyProps[ 'issuer' ]:
print "You can only query info about yourself!"
sys.exit( 1 )
result = CS.getDNForUsername( userName )
if not result[ 'OK' ]:
print "Oops %s" % result[ 'Message' ]
dnList = result[ 'Value' ]
if not dnList:
print "User %s has no DN defined!" % userName
sys.exit( 1 )
userDNs = dnList
else:
userDNs = [ userName ]
print "Checking for DNs %s" % " | ".join( userDNs )
pmc = ProxyManagerClient()
result = pmc.getDBContents( { 'UserDN' : userDNs } )
if not result[ 'OK' ]:
print "Could not retrieve the proxy list: %s" % result[ 'Message' ]
sys.exit( 1 )
data = result[ 'Value' ]
colLengths = []
for pN in data[ 'ParameterNames' ]:
colLengths.append( len( pN ) )
for row in data[ 'Records' ] :
for i in range( len( row ) ):
colLengths[ i ] = max( colLengths[i], len( str( row[i] ) ) )
lines = [""]
for i in range( len( data[ 'ParameterNames' ] ) ):
pN = data[ 'ParameterNames' ][i]
lines[0] += "| %s " % pN.ljust( colLengths[i] )
lines[0] += "|"
tL = len( lines[0] )
lines.insert( 0, "-"*tL )
lines.append( "-"*tL )
for row in data[ 'Records' ] :
nL = ""
for i in range( len( row ) ):
nL += "| %s " % str( row[i] ).ljust( colLengths[i] )
nL += "|"
lines.append( nL )
lines.append( "-"*tL )
print "\n".join( lines ) | unknown | codeparrot/codeparrot-clean | ||
first:
id: 1 | unknown | github | https://github.com/rails/rails | activerecord/test/fixtures/fk_object_to_point_to.yml |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.playbook.block import Block
from ansible.playbook.task import Task
class TestBlock(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_construct_empty_block(self):
b = Block()
def test_construct_block_with_role(self):
pass
def test_load_block_simple(self):
ds = dict(
block=[],
rescue=[],
always=[],
# otherwise=[],
)
b = Block.load(ds)
self.assertEqual(b.block, [])
self.assertEqual(b.rescue, [])
self.assertEqual(b.always, [])
# not currently used
# self.assertEqual(b.otherwise, [])
def test_load_block_with_tasks(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
# otherwise=[dict(action='otherwise')],
)
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
self.assertEqual(len(b.rescue), 1)
self.assertIsInstance(b.rescue[0], Task)
self.assertEqual(len(b.always), 1)
self.assertIsInstance(b.always[0], Task)
# not currently used
# self.assertEqual(len(b.otherwise), 1)
# self.assertIsInstance(b.otherwise[0], Task)
def test_load_implicit_block(self):
ds = [dict(action='foo')]
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
def test_deserialize(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
)
b = Block.load(ds)
data = dict(parent=ds, parent_type='Block')
b.deserialize(data)
self.assertIsInstance(b._parent, Block) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
"""
Generate the big keycodes table for virkeys.
It read keymaps.csv from stdin and put the generated code to stdout.
Please keep keymaps.csv be exactly the same as:
http://git.gnome.org/browse/gtk-vnc/plain/src/keymaps.csv.
If anything inconsistent happens, please change this file
instead of keymaps.csv which is a mirror.
"""
import sys
import re
namecolums = (0,2,10)
xtkbdkey_index = 8
def quotestring(str):
if str[0] != '"':
return '"' + str + '"'
return str
print '''
/* Generated file, DON'T edit it */
#ifndef VIRT_KEY_INTERNAL
# error do not use this; it is not a public header
#endif
struct keycode virKeycodes[] = {
'''
sys.stdin.readline() # eat the fist line.
for line in sys.stdin.xreadlines():
a = re.match("([^,]*)," * 13 + "([^,]*)$", line[0:-1]).groups()
b = ""
rfbkey = 0
for i in namecolums:
b = b + (a[i] and quotestring(a[i]) or 'NULL') + ','
for i in [ x for x in range(12) if not x in namecolums ]:
b = b + (a[i] or '0') + ','
if i == xtkbdkey_index:
# RFB keycodes are XT kbd keycodes with a slightly
# different encoding of 0xe0 scan codes. RFB uses
# the high bit of the first byte, instead of the low
# bit of the second byte.
rfbkey = int(a[i] or '0')
rfbkey = (rfbkey & 0x100) >> 1 | (rfbkey & 0x7f)
# Append RFB keycode as the last column
b = b + str(rfbkey)
print " { " + b + "},"
print '};' | unknown | codeparrot/codeparrot-clean | ||
# Natural Language Toolkit: Semantic Interpretation
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
#
# Copyright (C) 2001-2012 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions for batch-processing sentences: parsing and
extraction of the semantic representation of the root node of the the
syntax tree, followed by evaluation of the semantic representation in
a first-order model.
"""
import evaluate
import re
##############################################################
## Utility functions for connecting parse output to semantics
##############################################################
def batch_parse(inputs, grammar, trace=0):
"""
Convert input sentences into syntactic trees.
:param inputs: sentences to be parsed
:type inputs: list of str
:param grammar: ``FeatureGrammar`` or name of feature-based grammar
:rtype: dict
:return: a mapping from input sentences to a list of ``Tree``s
"""
# put imports here to avoid circult dependencies
from nltk.grammar import FeatureGrammar
from nltk.parse import FeatureChartParser, load_parser
if isinstance(grammar, FeatureGrammar):
cp = FeatureChartParser(grammar)
else:
cp = load_parser(grammar, trace=trace)
parses = []
for sent in inputs:
tokens = sent.split() # use a tokenizer?
syntrees = cp.nbest_parse(tokens)
parses.append(syntrees)
return parses
def root_semrep(syntree, semkey='SEM'):
"""
Find the semantic representation at the root of a tree.
:param syntree: a parse ``Tree``
:param semkey: the feature label to use for the root semantics in the tree
:return: the semantic representation at the root of a ``Tree``
:rtype: sem.Expression
"""
from nltk.grammar import FeatStructNonterminal
node = syntree.node
assert isinstance(node, FeatStructNonterminal)
try:
return node[semkey]
except KeyError:
print node,
print "has no specification for the feature %s" % semkey
raise
def batch_interpret(inputs, grammar, semkey='SEM', trace=0):
"""
Add the semantic representation to each syntactic parse tree
of each input sentence.
:param inputs: a list of sentences
:param grammar: ``FeatureGrammar`` or name of feature-based grammar
:return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations)
:rtype: dict
"""
return [[(syn, root_semrep(syn, semkey)) for syn in syntrees]
for syntrees in batch_parse(inputs, grammar, trace=trace)]
def batch_evaluate(inputs, grammar, model, assignment, trace=0):
"""
Add the truth-in-a-model value to each semantic representation
for each syntactic parse of each input sentences.
:param inputs: a list of sentences
:param grammar: ``FeatureGrammar`` or name of feature-based grammar
:return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model)
:rtype: dict
"""
return [[(syn, sem, model.evaluate(str(sem), assignment, trace=trace))
for (syn, sem) in interpretations]
for interpretations in batch_interpret(inputs, grammar)]
##########################################
# REs used by the parse_valuation function
##########################################
_VAL_SPLIT_RE = re.compile(r'\s*=+>\s*')
_ELEMENT_SPLIT_RE = re.compile(r'\s*,\s*')
_TUPLES_RE = re.compile(r"""\s*
(\([^)]+\)) # tuple-expression
\s*""", re.VERBOSE)
def parse_valuation_line(s):
"""
Parse a line in a valuation file.
Lines are expected to be of the form::
noosa => n
girl => {g1, g2}
chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
:param s: input line
:type s: str
:return: a pair (symbol, value)
:rtype: tuple
"""
pieces = _VAL_SPLIT_RE.split(s)
symbol = pieces[0]
value = pieces[1]
# check whether the value is meant to be a set
if value.startswith('{'):
value = value[1:-1]
tuple_strings = _TUPLES_RE.findall(value)
# are the set elements tuples?
if tuple_strings:
set_elements = []
for ts in tuple_strings:
ts = ts[1:-1]
element = tuple(_ELEMENT_SPLIT_RE.split(ts))
set_elements.append(element)
else:
set_elements = _ELEMENT_SPLIT_RE.split(value)
value = set(set_elements)
return symbol, value
def parse_valuation(s):
"""
Convert a valuation file into a valuation.
:param s: the contents of a valuation file
:type s: str
:return: a ``nltk.sem`` valuation
:rtype: Valuation
"""
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try: statements.append(parse_valuation_line(line))
except ValueError:
raise ValueError, 'Unable to parse line %s: %s' % (linenum, line)
val = evaluate.Valuation(statements)
return val
def demo_model0():
global m0, g0
#Initialize a valuation of non-logical constants."""
v = [('john', 'b1'),
('mary', 'g1'),
('suzie', 'g2'),
('fido', 'd1'),
('tess', 'd2'),
('noosa', 'n'),
('girl', set(['g1', 'g2'])),
('boy', set(['b1', 'b2'])),
('dog', set(['d1', 'd2'])),
('bark', set(['d1', 'd2'])),
('walk', set(['b1', 'g2', 'd1'])),
('chase', set([('b1', 'g1'), ('b2', 'g1'), ('g1', 'd1'), ('g2', 'd2')])),
('see', set([('b1', 'g1'), ('b2', 'd2'), ('g1', 'b1'),('d2', 'b1'), ('g2', 'n')])),
('in', set([('b1', 'n'), ('b2', 'n'), ('d2', 'n')])),
('with', set([('b1', 'g1'), ('g1', 'b1'), ('d1', 'b1'), ('b1', 'd1')]))
]
#Read in the data from ``v``
val = evaluate.Valuation(v)
#Bind ``dom`` to the ``domain`` property of ``val``
dom = val.domain
#Initialize a model with parameters ``dom`` and ``val``.
m0 = evaluate.Model(dom, val)
#Initialize a variable assignment with parameter ``dom``
g0 = evaluate.Assignment(dom)
def read_sents(file):
sents = [l.rstrip() for l in open(file)]
# get rid of blank lines
sents = [l for l in sents if len(l) > 0]
sents = [l for l in sents if not l[0] == '#']
return sents
def demo_legacy_grammar():
"""
Check that batch_interpret() is compatible with legacy grammars that use
a lowercase 'sem' feature.
Define 'test.fcfg' to be the following
"""
from nltk.grammar import parse_fcfg
g = parse_fcfg("""
% start S
S[sem=<hello>] -> 'hello'
""")
print "Reading grammar: %s" % g
print "*" * 20
for reading in batch_interpret(['hello'], g, semkey='sem'):
syn, sem = reading[0]
print
print "output: ", sem
def demo():
import sys
from optparse import OptionParser
description = \
"""
Parse and evaluate some sentences.
"""
opts = OptionParser(description=description)
opts.set_defaults(evaluate=True, beta=True, syntrace=0,
semtrace=0, demo='default', grammar='', sentences='')
opts.add_option("-d", "--demo", dest="demo",
help="choose demo D; omit this for the default demo, or specify 'chat80'", metavar="D")
opts.add_option("-g", "--gram", dest="grammar",
help="read in grammar G", metavar="G")
opts.add_option("-m", "--model", dest="model",
help="import model M (omit '.py' suffix)", metavar="M")
opts.add_option("-s", "--sentences", dest="sentences",
help="read in a file of test sentences S", metavar="S")
opts.add_option("-e", "--no-eval", action="store_false", dest="evaluate",
help="just do a syntactic analysis")
opts.add_option("-b", "--no-beta-reduction", action="store_false",
dest="beta", help="don't carry out beta-reduction")
opts.add_option("-t", "--syntrace", action="count", dest="syntrace",
help="set syntactic tracing on; requires '-e' option")
opts.add_option("-T", "--semtrace", action="count", dest="semtrace",
help="set semantic tracing on")
(options, args) = opts.parse_args()
SPACER = '-' * 30
demo_model0()
sents = [
'Fido sees a boy with Mary',
'John sees Mary',
'every girl chases a dog',
'every boy chases a girl',
'John walks with a girl in Noosa',
'who walks']
gramfile = 'grammars/sample_grammars/sem2.fcfg'
if options.sentences:
sentsfile = options.sentences
if options.grammar:
gramfile = options.grammar
if options.model:
exec "import %s as model" % options.model
if sents is None:
sents = read_sents(sentsfile)
# Set model and assignment
model = m0
g = g0
if options.evaluate:
evaluations = \
batch_evaluate(sents, gramfile, model, g, trace=options.semtrace)
else:
semreps = \
batch_interpret(sents, gramfile, trace=options.syntrace)
for i, sent in enumerate(sents):
n = 1
print '\nSentence: %s' % sent
print SPACER
if options.evaluate:
for (syntree, semrep, value) in evaluations[i]:
if isinstance(value, dict):
value = set(value.keys())
print '%d: %s' % (n, semrep)
print value
n += 1
else:
for (syntree, semrep) in semreps[i]:
print '%d: %s' % (n, semrep)
n += 1
if __name__ == "__main__":
#demo()
demo_legacy_grammar() | unknown | codeparrot/codeparrot-clean | ||
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXPGEXP(gxapi_cy.WrapPGEXP):
"""
GXPGEXP class.
The `GXPGEXP <geosoft.gxapi.GXPGEXP>` class is similar to the `GXEXP <geosoft.gxapi.GXEXP>` class, but is used
to apply math expressions to pagers (`GXPG <geosoft.gxapi.GXPG>` objects).
It works only on PGs of the same dimensions.
"""
def __init__(self, handle=0):
super(GXPGEXP, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXPGEXP <geosoft.gxapi.GXPGEXP>`
:returns: A null `GXPGEXP <geosoft.gxapi.GXPGEXP>`
:rtype: GXPGEXP
"""
return GXPGEXP()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def add_pager(self, pg, var):
"""
This method adds an pager to the `GXPGEXP <geosoft.gxapi.GXPGEXP>` object with a
variable name.
:param pg: Pager to add
:param var: Variable name
:type pg: GXPG
:type var: str
.. versionadded:: 7.1
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
"""
self._add_pager(pg, var.encode())
@classmethod
def create(cls):
"""
This method creates an `GXPGEXP <geosoft.gxapi.GXPGEXP>` object.
:returns: `GXPGEXP <geosoft.gxapi.GXPGEXP>` Object
:rtype: GXPGEXP
.. versionadded:: 7.1
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
"""
ret_val = gxapi_cy.WrapPGEXP._create(GXContext._get_tls_geo())
return GXPGEXP(ret_val)
def do_formula(self, formula, unused):
"""
This method runs a formula on the pagers.
:param formula: Formula
:param unused: Legacy parameter, no longer used.
:type formula: str
:type unused: int
.. versionadded:: 7.1
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
"""
self._do_formula(formula.encode(), unused)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_gtm_datacenter import ApiParameters
from library.bigip_gtm_datacenter import ModuleParameters
from library.bigip_gtm_datacenter import ModuleManager
from library.bigip_gtm_datacenter import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_datacenter import ApiParameters
from ansible.modules.network.f5.bigip_gtm_datacenter import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_datacenter import ModuleManager
from ansible.modules.network.f5.bigip_gtm_datacenter import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
state='present',
contact='foo',
description='bar',
location='baz',
name='datacenter'
)
p = ModuleParameters(params=args)
assert p.state == 'present'
def test_api_parameters(self):
args = load_fixture('load_gtm_datacenter_default.json')
p = ApiParameters(params=args)
assert p.name == 'asd'
def test_module_parameters_state_present(self):
args = dict(
state='present'
)
p = ModuleParameters(params=args)
assert p.state == 'present'
assert p.enabled is True
def test_module_parameters_state_absent(self):
args = dict(
state='absent'
)
p = ModuleParameters(params=args)
assert p.state == 'absent'
def test_module_parameters_state_enabled(self):
args = dict(
state='enabled'
)
p = ModuleParameters(params=args)
assert p.state == 'enabled'
assert p.enabled is True
assert p.disabled is None
def test_module_parameters_state_disabled(self):
args = dict(
state='disabled'
)
p = ModuleParameters(params=args)
assert p.state == 'disabled'
assert p.enabled is None
assert p.disabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_datacenter(self, *args):
set_module_args(dict(
state='present',
password='admin',
server='localhost',
user='admin',
name='foo'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['state'] == 'present'
def test_create_disabled_datacenter(self, *args):
set_module_args(dict(
state='disabled',
password='admin',
server='localhost',
user='admin',
name='foo'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enabled'] is False
assert results['disabled'] is True
def test_create_enabled_datacenter(self, *args):
set_module_args(dict(
state='enabled',
password='admin',
server='localhost',
user='admin',
name='foo'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enabled'] is True
assert results['disabled'] is False
def test_idempotent_disable_datacenter(self, *args):
set_module_args(dict(
state='disabled',
password='admin',
server='localhost',
user='admin',
name='foo'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_gtm_datacenter_disabled.json'))
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=True)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.