code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it.
**Example**
```esql
ROW date_string = "2022-05-06"
| EVAL date = DATE_PARSE("yyyy-MM-dd", date_string)
```
| date_string:keyword | date:date |
| --- | --- |
| 2022-05-06 | 2022-05-06T00:00:00.000Z | | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/_snippets/functions/examples/date_parse.md |
# -*- coding: utf-8 -*-
"""Display help for a command in $STASH_ROOT/bin/ or a topic, or list all commands if no name is given.
"""
from __future__ import print_function
import argparse
import ast
import os
import sys
from stash.system.shcommon import _STASH_EXTENSION_BIN_PATH, _STASH_EXTENSION_MAN_PATH
try:
raw_input
except NameError:
# py3
raw_input = input
_stash = globals()["_stash"]
TYPE_CMD = "command"
TYPE_PAGE = "page"
TYPE_NOTFOUND = "not found"
TYPE_LISTTOPICS = "list topics"
MAIN_BINPATH = os.path.join(os.environ["STASH_ROOT"], "bin")
MAIN_PAGEPATH = os.path.join(os.environ["STASH_ROOT"], "man")
BINPATHS = [MAIN_BINPATH, _STASH_EXTENSION_BIN_PATH]
PAGEPATHS = [MAIN_PAGEPATH, _STASH_EXTENSION_MAN_PATH]
for p in BINPATHS + PAGEPATHS:
if not os.path.exists(p):
os.mkdir(p)
def all_commands():
all_cmds = []
for bp in BINPATHS:
cmds = [
fn[:-3]
for fn in os.listdir(bp)
if fn.endswith(".py") and not fn.startswith(".") and os.path.isfile(os.path.join(bp,
fn))
]
all_cmds += cmds
all_cmds.sort()
return all_cmds
def get_type(search):
"""returns (type, path) for a given topic/command."""
if search == "topics":
return (TYPE_LISTTOPICS, None)
cmdpath = find_command(search)
if cmdpath is not None:
return (TYPE_CMD, cmdpath)
if "(" in search and ")" in search:
try:
pn = int(search[search.index("(") + 1:search.index(")")])
except:
print(_stash.text_color("Invalid Pagenumber", "red"))
sys.exit(1)
search = search[:search.index("(")]
else:
pn = 1
if "." in search:
# FIXME: fix '.' in search shoild search only matching extensions
# Example: 'man test.md' searches for 'test.md' instead of 'test'
print(_stash.text_color("Searching for pages with '.' in the name is bugged and has been disabled.", "red"))
sys.exit(1)
to_search = search
found = []
for pp in PAGEPATHS:
found += os.listdir(pp)
else:
to_search = search
found = []
for p in PAGEPATHS:
found += [(fn[:fn.index(".")] if "." in fn else fn) for fn in os.listdir(p)]
if to_search in found:
ppc = []
for pp in PAGEPATHS:
ppc += [(fn, pp) for fn in os.listdir(pp)]
ffns = [(fn, pp) if fn.startswith(to_search + ".") else None for fn, pp in ppc]
ffn = list(filter(None, ffns))
if len(ffn) == 0:
# isdir
pname = "page_" + str(pn)
for pp in PAGEPATHS:
dirpath = os.path.join(pp, to_search)
if not os.path.exists(dirpath):
continue
for fn in os.listdir(dirpath):
if fn.startswith(pname):
fp = os.path.join(dirpath, fn)
if not os.path.exists(fp):
print(_stash.text_color("Page not found!", "red"))
return (TYPE_PAGE, fp)
return (TYPE_NOTFOUND, None)
path = os.path.join(ffn[0][1], ffn[0][0])
return (TYPE_PAGE, path)
else:
return (TYPE_NOTFOUND, None)
def find_command(cmd):
for bp in BINPATHS:
if os.path.exists(bp) and cmd + ".py" in os.listdir(bp):
return os.path.join(bp, cmd + ".py")
return None
def get_docstring(filename):
try:
with open(filename) as f:
tree = ast.parse(f.read(), os.path.basename(filename))
return ast.get_docstring(tree)
except:
return "UNKNOWN"
def get_summary(filename):
docstring = get_docstring(filename)
return docstring.splitlines()[0] if docstring else ''
def show_page(path):
"""shows the page at path."""
if not os.path.exists(path):
print(_stash.text_color("Error: cannot find page!", "red"), )
sys.exit(1)
with open(path, "r") as fin:
content = fin.read()
if len(content.replace("\n", "")) == 0:
print(_stash.text_color("Error: help empty!", "red"))
sys.exit(1)
if path.endswith(".txt"):
show_text(content)
elif path.endswith(".url"):
if content.startswith("stash://"):
# local file
path = os.path.join(os.getenv("STASH_ROOT"), content.replace("stash://", ""))
show_page(path.replace("\n", ""))
return
print("Opening webviewer...")
_stash("webviewer -n '{u}'".format(u=content.replace("\n", "")))
elif path.endswith(".html"):
print("Opening quicklook...")
_stash("quicklook {p}".format(p=path))
else:
show_text(content)
def show_text(text):
print(_stash.text_color("=" * 20, "yellow"))
lines = text.split("\n")
while True:
if len(lines) < 100:
print("\n".join(lines))
return
else:
print("\n".join(lines[:100]))
lines = lines[100:]
prompt = _stash.text_color("(Press Return to continue)", "yellow")
raw_input(prompt)
print("\n")
def show_topics():
"""prints all available miscellaneous help topics."""
print(_stash.text_color("Miscellaneous Topics:", "yellow"))
for pp in PAGEPATHS:
if not os.path.isdir(pp):
continue
content = os.listdir(pp)
for pn in content:
if "." in pn:
name = pn[:pn.index(".")]
else:
name = pn
print(name)
def main(args):
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument("topic", nargs="?", help="the command/topic to get help for")
ns = ap.parse_args(args)
if not ns.topic:
cmds = all_commands()
if len(cmds) > 100:
if raw_input("List all {} commands?".format(len(cmds))).strip().lower() not in ("y", "yes"):
sys.exit(0)
for cmd in cmds:
print(_stash.text_bold('{:>11}: '.format(cmd)) + get_summary(find_command(cmd)))
print("Type 'man topics' to see miscellaneous help topics")
sys.exit(0)
else:
ft, path = get_type(ns.topic)
if ft == TYPE_NOTFOUND:
print(_stash.text_color("man: no help for '{}'".format(ns.topic), "red"))
sys.exit(1)
if ft == TYPE_LISTTOPICS:
show_topics()
sys.exit(0)
elif ft == TYPE_CMD:
try:
docstring = get_docstring(path)
except Exception as err:
print(_stash.text_color("man: {}: {!s}".format(type(err).__name__, err), "red"), file=sys.stderr)
sys.exit(1)
if docstring:
print("Docstring of command '{}':\n{}".format(ns.topic, docstring))
else:
print(_stash.text_color("man: command '{}' has no docstring".format(ns.topic), "red"))
sys.exit(0)
elif ft == TYPE_PAGE:
show_page(path)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: panos_nat_rule
short_description: create a policy NAT rule
description: >
- Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or
both. Instead of splitting it into two we will make a fair attempt to determine which one the user wants.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPi U(https://pypi.org/project/pandevice/)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
rule_name:
description:
- name of the SNAT rule
required: true
source_zone:
description:
- list of source zones
required: true
destination_zone:
description:
- destination zone
required: true
source_ip:
description:
- list of source addresses
default: ["any"]
destination_ip:
description:
- list of destination addresses
default: ["any"]
service:
description:
- service
default: "any"
snat_type:
description:
- type of source translation
snat_address_type:
description:
- type of source translation. Supported values are I(translated-address)/I(translated-address).
default: 'translated-address'
snat_static_address:
description:
- Source NAT translated address. Used with Static-IP translation.
snat_dynamic_address:
description:
- Source NAT translated address. Used with Dynamic-IP and Dynamic-IP-and-Port.
snat_interface:
description:
- snat interface
snat_interface_address:
description:
- snat interface address
snat_bidirectional:
description:
- bidirectional flag
type: bool
default: 'no'
dnat_address:
description:
- dnat translated address
dnat_port:
description:
- dnat translated port
commit:
description:
- Commit configuration if changed.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Create a source and destination nat rule
- name: Create NAT SSH rule for 10.0.1.101
panos_nat_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_name: "Web SSH"
source_zone: ["external"]
destination_zone: "external"
source: ["any"]
destination: ["10.0.0.100"]
service: "service-tcp-221"
snat_type: "dynamic-ip-and-port"
snat_interface: "ethernet1/2"
dnat_address: "10.0.1.101"
dnat_port: "22"
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
# import pydevd
# pydevd.settrace('localhost', port=60374, stdoutToServer=True, stderrToServer=True)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, pandevice.firewall.Firewall):
rulebase = pandevice.policies.Rulebase()
device.add(rulebase)
elif isinstance(device, pandevice.panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.NatRule.refreshall(rulebase)
return rulebase
def find_rule(rulebase, rule_name):
# Search for the rule name
rule = rulebase.find(rule_name)
if rule:
return rule
else:
return False
def create_nat_rule(**kwargs):
nat_rule = policies.NatRule(
name=kwargs['rule_name'],
description=kwargs['description'],
fromzone=kwargs['source_zone'],
source=kwargs['source_ip'],
tozone=kwargs['destination_zone'],
destination=kwargs['destination_ip'],
service=kwargs['service'],
to_interface=kwargs['to_interface'],
nat_type=kwargs['nat_type']
)
# Source translation: Static IP
if kwargs['snat_type'] in ['static-ip'] and kwargs['snat_static_address']:
nat_rule.source_translation_type = kwargs['snat_type']
nat_rule.source_translation_static_translated_address = kwargs['snat_static_address']
# Bi-directional flag set?
if kwargs['snat_bidirectional']:
nat_rule.source_translation_static_bi_directional = kwargs['snat_bidirectional']
# Source translation: Dynamic IP and port
elif kwargs['snat_type'] in ['dynamic-ip-and-port']:
nat_rule.source_translation_type = kwargs['snat_type']
nat_rule.source_translation_address_type = kwargs['snat_address_type']
# Interface address?
if kwargs['snat_interface']:
nat_rule.source_translation_interface = kwargs['snat_interface']
# Interface IP?
if kwargs['snat_interface_address']:
nat_rule.source_translation_ip_address = kwargs['snat_interface_address']
else:
nat_rule.source_translation_translated_addresses = kwargs['snat_dynamic_address']
# Source translation: Dynamic IP
elif kwargs['snat_type'] in ['dynamic-ip']:
if kwargs['snat_dynamic_address']:
nat_rule.source_translation_type = kwargs['snat_type']
nat_rule.source_translation_translated_addresses = kwargs['snat_dynamic_address']
else:
return False
# Destination translation
if kwargs['dnat_address']:
nat_rule.destination_translated_address = kwargs['dnat_address']
if kwargs['dnat_port']:
nat_rule.destination_translated_port = kwargs['dnat_port']
# Any tags?
if 'tag_name' in kwargs:
nat_rule.tag = kwargs['tag_name']
return nat_rule
def add_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.create()
return True
else:
return False
def update_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.apply()
return True
else:
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
password=dict(required=True, no_log=True),
api_key=dict(no_log=True),
operation=dict(required=True, choices=['add', 'update', 'delete', 'find']),
rule_name=dict(required=True),
description=dict(),
tag_name=dict(),
source_zone=dict(type='list'),
source_ip=dict(type='list', default=['any']),
destination_zone=dict(),
destination_ip=dict(type='list', default=['any']),
service=dict(default='any'),
to_interface=dict(default='any'),
snat_type=dict(choices=['static-ip', 'dynamic-ip-and-port', 'dynamic-ip']),
snat_address_type=dict(choices=['interface-address', 'translated-address'], default='interface-address'),
snat_static_address=dict(),
snat_dynamic_address=dict(type='list'),
snat_interface=dict(),
snat_interface_address=dict(),
snat_bidirectional=dict(type='bool', default=False),
dnat_address=dict(),
dnat_port=dict(),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
rule_name = module.params['rule_name']
description = module.params['description']
tag_name = module.params['tag_name']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
service = module.params['service']
to_interface = module.params['to_interface']
nat_type = 'ipv4'
snat_type = module.params['snat_type']
snat_address_type = module.params['snat_address_type']
snat_static_address = module.params['snat_static_address']
snat_dynamic_address = module.params['snat_dynamic_address']
snat_interface = module.params['snat_interface']
snat_interface_address = module.params['snat_interface_address']
snat_bidirectional = module.params['snat_bidirectional']
dnat_address = module.params['dnat_address']
dnat_port = module.params['dnat_port']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# Get the rulebase
rulebase = get_rulebase(device, dev_group)
# Which action shall we take on the object?
if operation == "find":
# Search for the rule
match = find_rule(rulebase, rule_name)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Rule matched'
)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "delete":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, delete it
if match:
try:
match.delete()
if commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted.' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "add":
# Look for required parameters
if source_zone and destination_zone and nat_type:
pass
else:
module.fail_json(msg='Missing parameter. Required: source_zone, destination_zone, nat_type')
# Search for the rule. Fail if found.
match = find_rule(rulebase, rule_name)
if match:
module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name)
else:
try:
new_rule = create_nat_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
destination_ip=destination_ip,
service=service,
to_interface=to_interface,
nat_type=nat_type,
snat_type=snat_type,
snat_address_type=snat_address_type,
snat_static_address=snat_static_address,
snat_dynamic_address=snat_dynamic_address,
snat_interface=snat_interface,
snat_interface_address=snat_interface_address,
snat_bidirectional=snat_bidirectional,
dnat_address=dnat_address,
dnat_port=dnat_port
)
changed = add_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully added.' % rule_name)
elif operation == 'update':
# Search for the rule. Update if found.
match = find_rule(rulebase, rule_name)
if match:
try:
new_rule = create_nat_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
destination_ip=destination_ip,
service=service,
to_interface=to_interface,
nat_type=nat_type,
snat_type=snat_type,
snat_address_type=snat_address_type,
snat_static_address=snat_static_address,
snat_dynamic_address=snat_dynamic_address,
snat_interface=snat_interface,
snat_interface_address=snat_interface_address,
snat_bidirectional=snat_bidirectional,
dnat_address=dnat_address,
dnat_port=dnat_port
)
changed = update_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated.' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testhelpers provides utilities for testing the Prometheus HTTP API.
// This file contains helper functions for creating test API instances and managing test lifecycles.
package testhelpers
import (
"context"
"log/slog"
"net/http"
"net/url"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/util/notifications"
)
// RulesRetriever provides a list of active rules and alerts.
type RulesRetriever interface {
RuleGroups() []*rules.Group
AlertingRules() []*rules.AlertingRule
}
// TargetRetriever provides the list of active/dropped targets to scrape or not.
type TargetRetriever interface {
TargetsActive() map[string][]*scrape.Target
TargetsDropped() map[string][]*scrape.Target
TargetsDroppedCounts() map[string]int
ScrapePoolConfig(string) (*config.ScrapeConfig, error)
}
// ScrapePoolsRetriever provide the list of all scrape pools.
type ScrapePoolsRetriever interface {
ScrapePools() []string
}
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
type AlertmanagerRetriever interface {
Alertmanagers() []*url.URL
DroppedAlertmanagers() []*url.URL
}
// TSDBAdminStats provides TSDB admin statistics.
type TSDBAdminStats interface {
CleanTombstones() error
Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
WALReplayStatus() (tsdb.WALReplayStatus, error)
BlockMetas() ([]tsdb.BlockMeta, error)
}
// APIConfig holds configuration for creating a test API instance.
type APIConfig struct {
// Core dependencies.
QueryEngine *LazyLoader[promql.QueryEngine]
Queryable *LazyLoader[storage.SampleAndChunkQueryable]
ExemplarQueryable *LazyLoader[storage.ExemplarQueryable]
// Retrievers.
RulesRetriever *LazyLoader[RulesRetriever]
TargetRetriever *LazyLoader[TargetRetriever]
ScrapePoolsRetriever *LazyLoader[ScrapePoolsRetriever]
AlertmanagerRetriever *LazyLoader[AlertmanagerRetriever]
// Admin.
TSDBAdmin *LazyLoader[TSDBAdminStats]
DBDir string
// Optional overrides.
Config func() config.Config
FlagsMap map[string]string
Now func() time.Time
}
// APIWrapper wraps the API and provides a handler for testing.
type APIWrapper struct {
Handler http.Handler
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion struct {
Version string `json:"version"`
Revision string `json:"revision"`
Branch string `json:"branch"`
BuildUser string `json:"buildUser"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
}
// RuntimeInfo contains runtime information about Prometheus.
type RuntimeInfo struct {
StartTime time.Time `json:"startTime"`
CWD string `json:"CWD"`
Hostname string `json:"hostname"`
ServerTime time.Time `json:"serverTime"`
ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
LastConfigTime time.Time `json:"lastConfigTime"`
CorruptionCount int64 `json:"corruptionCount"`
GoroutineCount int `json:"goroutineCount"`
GOMAXPROCS int `json:"GOMAXPROCS"`
GOMEMLIMIT int64 `json:"GOMEMLIMIT"`
GOGC string `json:"GOGC"`
GODEBUG string `json:"GODEBUG"`
StorageRetention string `json:"storageRetention"`
}
// NewAPIParams holds all the parameters needed to create a v1.API instance.
type NewAPIParams struct {
QueryEngine promql.QueryEngine
Queryable storage.SampleAndChunkQueryable
ExemplarQueryable storage.ExemplarQueryable
ScrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
TargetRetriever func(context.Context) TargetRetriever
AlertmanagerRetriever func(context.Context) AlertmanagerRetriever
ConfigFunc func() config.Config
FlagsMap map[string]string
ReadyFunc func(http.HandlerFunc) http.HandlerFunc
TSDBAdmin TSDBAdminStats
DBDir string
Logger *slog.Logger
RulesRetriever func(context.Context) RulesRetriever
RuntimeInfoFunc func() (RuntimeInfo, error)
BuildInfo *PrometheusVersion
NotificationsGetter func() []notifications.Notification
NotificationsSub func() (<-chan notifications.Notification, func(), bool)
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// PrepareAPI creates a NewAPIParams with sensible defaults for testing.
func PrepareAPI(t *testing.T, cfg APIConfig) NewAPIParams {
t.Helper()
// Create defaults for unset lazy loaders.
if cfg.QueryEngine == nil {
cfg.QueryEngine = NewLazyLoader(func() promql.QueryEngine {
return promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 100 * time.Second,
NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
EnableAtModifier: true,
EnableNegativeOffset: true,
EnablePerStepStats: true,
})
})
}
if cfg.Queryable == nil {
cfg.Queryable = NewLazyLoader(NewEmptyQueryable)
}
if cfg.ExemplarQueryable == nil {
cfg.ExemplarQueryable = NewLazyLoader(NewEmptyExemplarQueryable)
}
if cfg.RulesRetriever == nil {
cfg.RulesRetriever = NewLazyLoader(func() RulesRetriever {
return NewEmptyRulesRetriever()
})
}
if cfg.TargetRetriever == nil {
cfg.TargetRetriever = NewLazyLoader(func() TargetRetriever {
return NewEmptyTargetRetriever()
})
}
if cfg.ScrapePoolsRetriever == nil {
cfg.ScrapePoolsRetriever = NewLazyLoader(func() ScrapePoolsRetriever {
return NewEmptyScrapePoolsRetriever()
})
}
if cfg.AlertmanagerRetriever == nil {
cfg.AlertmanagerRetriever = NewLazyLoader(func() AlertmanagerRetriever {
return NewEmptyAlertmanagerRetriever()
})
}
if cfg.TSDBAdmin == nil {
cfg.TSDBAdmin = NewLazyLoader(func() TSDBAdminStats {
return NewEmptyTSDBAdminStats()
})
}
if cfg.Config == nil {
cfg.Config = func() config.Config { return config.Config{} }
}
if cfg.FlagsMap == nil {
cfg.FlagsMap = map[string]string{}
}
if cfg.DBDir == "" {
cfg.DBDir = t.TempDir()
}
return NewAPIParams{
QueryEngine: cfg.QueryEngine.Get(),
Queryable: cfg.Queryable.Get(),
ExemplarQueryable: cfg.ExemplarQueryable.Get(),
ScrapePoolsRetriever: func(context.Context) ScrapePoolsRetriever { return cfg.ScrapePoolsRetriever.Get() },
TargetRetriever: func(context.Context) TargetRetriever { return cfg.TargetRetriever.Get() },
AlertmanagerRetriever: func(context.Context) AlertmanagerRetriever { return cfg.AlertmanagerRetriever.Get() },
ConfigFunc: cfg.Config,
FlagsMap: cfg.FlagsMap,
ReadyFunc: func(f http.HandlerFunc) http.HandlerFunc { return f },
TSDBAdmin: cfg.TSDBAdmin.Get(),
DBDir: cfg.DBDir,
Logger: promslog.NewNopLogger(),
RulesRetriever: func(context.Context) RulesRetriever { return cfg.RulesRetriever.Get() },
RuntimeInfoFunc: func() (RuntimeInfo, error) { return RuntimeInfo{}, nil },
BuildInfo: &PrometheusVersion{},
NotificationsGetter: func() []notifications.Notification { return nil },
NotificationsSub: func() (<-chan notifications.Notification, func(), bool) { return nil, func() {}, false },
Gatherer: prometheus.NewRegistry(),
Registerer: prometheus.NewRegistry(),
}
} | go | github | https://github.com/prometheus/prometheus | web/api/testhelpers/api.go |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.timeline-demo.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"query": {
"kind": "datasource",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "State changes with nulls",
"description": "Should show gaps",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "true,false,true,true,true,true,false,false"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "false,true,false,true,true,false,false,false,true,true"
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "true,false,null,true,true"
}
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "false,null,null,false,true,true"
}
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "state-timeline",
"spec": {
"pluginVersion": "",
"options": {
"alignValue": "center",
"colWidth": 1,
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"mode": "changes",
"rowHeight": 0.98,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"mappings": [
{
"type": "special",
"options": {
"match": "true",
"result": {
"text": "ON",
"color": "semi-dark-green",
"index": 0
}
}
},
{
"type": "special",
"options": {
"match": "false",
"result": {
"text": "OFF",
"color": "red",
"index": 1
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 80,
"lineWidth": 1,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "State changes with boolean values",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"scenarioId": "csv_metric_values",
"stringInput": "true,false,true,true,true,true,false,false"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "false,true,false,true,true,false,false,false,true,true"
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "true,false,true,true"
}
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "false,true,false,true,true"
}
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "state-timeline",
"spec": {
"pluginVersion": "",
"options": {
"alignValue": "center",
"colWidth": 1,
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"mode": "changes",
"rowHeight": 0.98,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"mappings": [
{
"type": "special",
"options": {
"match": "true",
"result": {
"text": "ON",
"color": "semi-dark-green",
"index": 0
}
}
},
{
"type": "special",
"options": {
"match": "false",
"result": {
"text": "OFF",
"color": "red",
"index": 1
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 1,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Status map",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"max": 30,
"min": -10,
"noise": 2,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "random_walk",
"seriesCount": 4,
"spread": 15,
"startValue": 5,
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "status-history",
"spec": {
"pluginVersion": "7.5.0-pre",
"options": {
"alignValue": "center",
"colWidth": 0.96,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"rowHeight": 0.98,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "asc"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"decimals": 0,
"min": -10,
"max": 30,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd"
},
"custom": {
"fillOpacity": 96,
"lineWidth": 0
}
},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "State changes strings",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "SensorA",
"scenarioId": "csv_metric_values",
"stringInput": "LOW,HIGH,NORMAL,NORMAL,NORMAL,LOW,LOW,NORMAL,HIGH,CRITICAL"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "SensorB",
"scenarioId": "csv_metric_values",
"stringInput": "NORMAL,LOW,LOW,CRITICAL,CRITICAL,LOW,LOW,NORMAL,HIGH,CRITICAL"
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "SensorA",
"scenarioId": "csv_metric_values",
"stringInput": "NORMAL,NORMAL,NORMAL,NORMAL,CRITICAL,LOW,NORMAL,NORMAL,NORMAL,LOW"
}
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "state-timeline",
"spec": {
"pluginVersion": "7.5.0-pre",
"options": {
"alignValue": "center",
"colWidth": 0.9,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.98,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"mappings": [
{
"type": "value",
"options": {
"CRITICAL": {
"color": "red",
"index": 3
},
"HIGH": {
"color": "orange",
"index": 2
},
"LOW": {
"color": "blue",
"index": 0
},
"NORMAL": {
"color": "green",
"index": 1
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"lineWidth": 1,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 24,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 13,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 13,
"y": 8,
"width": 11,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 19,
"width": 24,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng",
"demo"
],
"timeSettings": {
"timezone": "utc",
"from": "now-1h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Timeline Demo",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-timeline/v0alpha1.timeline-demo.v42.v2alpha1.json |
# -*- coding: utf-8 -*-
import sys
import os
reload(sys)
sys.setdefaultencoding("utf-8")
from sqlalchemy import Table
from yaml import load,dump
try:
from yaml import CSafeLoader as SafeLoader
print "Using CSafeLoader"
except ImportError:
from yaml import SafeLoader
print "Using Python SafeLoader"
distribution={'twosome':1,'bubble':2}
effectcategory={}
def importyaml(connection,metadata,sourcePath,language='en'):
print "Importing dogma effects"
dgmEffects = Table('dgmEffects',metadata)
print "opening Yaml"
trans = connection.begin()
with open(os.path.join(sourcePath,'fsd','dogmaEffects.yaml'),'r') as yamlstream:
print "importing"
dogmaEffects=load(yamlstream,Loader=SafeLoader)
print "Yaml Processed into memory"
for dogmaEffectsid in dogmaEffects:
effect=dogmaEffects[dogmaEffectsid]
connection.execute(dgmEffects.insert(),
effectID=dogmaEffectsid,
effectName=effect.get('effectName'),
effectCategory=effectcategory.get(effect['effectCategory']),
description=effect.get('descriptionID',{}).get(language),
guid=effect.get('guid'),
iconID=effect.get('iconID'),
isOffensive=effect['isOffensive'],
isAssistance=effect['isAssistance'],
durationAttributeID=effect.get('durationAttributeID'),
trackingSpeedAttributeID=effect.get('trackingSpeedAttributeID'),
dischargeAttributeID=effect.get('dischargeAttributeID'),
rangeAttributeID=effect.get('rangeAttributeID'),
falloffAttributeID=effect.get('falloffAttributeID'),
disallowAutoRepeat=effect.get('disallowAutoRepeat'),
published=effect.get('published'),
displayName=effect.get('displayNameID',{}).get(language),
isWarpSafe=effect.get('isWarpSafe'),
rangeChance=effect.get('rangeChance'),
electronicChance=effect.get('electronicChance'),
propulsionChance=effect.get('propulsionChance'),
distribution=distribution.get(effect.get('distribution')),
sfxName=effect.get('sfxName'),
npcUsageChanceAttributeID=effect.get('npcUsageChanceAttributeID'),
npcActivationChanceAttributeID=effect.get('npcActivationChanceAttributeID'),
fittingUsageChanceAttributeID=effect.get('fittingUsageChanceAttributeID'),
modifierInfo=dump(effect.get('modifierInfo'))
)
trans.commit() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#Pull version out of the module
from pyaib import __version__
setup(name='pyaib',
version=__version__,
packages=['pyaib', 'pyaib.dbd', 'pyaib.util'],
url='http://github.com/facebook/pyaib',
license='Apache 2.0',
author='Jason Fried, Facebook',
author_email='fried@fb.com',
description='Python Framework for writing IRC Bots using gevent',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
],
install_requires=[
'pyOpenSSL >= 0.12',
'gevent >= 1.0',
'PyYAML >= 3.09',
]) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.server.netty
import io.ktor.http.*
import io.ktor.server.engine.*
import io.ktor.server.netty.*
import io.ktor.server.response.*
import io.ktor.server.test.base.*
import io.netty.channel.*
import kotlin.test.*
abstract class NettyCustomChannelTest<TEngine : ApplicationEngine, TConfiguration : ApplicationEngine.Configuration>(
hostFactory: ApplicationEngineFactory<TEngine, TConfiguration>
) : EngineTestBase<TEngine, TConfiguration>(hostFactory) {
var counter = 0
@Test
fun testCustomChannelHandlerInvoked() = runTest {
createAndStartServer {
handle {
call.respondText("Hello")
}
}
withUrl("/") {
assertEquals(HttpStatusCode.OK.value, status.value)
assertNotEquals(0, counter)
}
}
}
class NettyCustomChannelPipelineConfigurationTest :
NettyCustomChannelTest<NettyApplicationEngine, NettyApplicationEngine.Configuration>(Netty) {
override fun configure(configuration: NettyApplicationEngine.Configuration) {
configuration.shareWorkGroup = true
configuration.channelPipelineConfig = {
addLast(
"customHandler",
object : ChannelInboundHandlerAdapter() {
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
counter = counter.plus(1)
super.channelRead(ctx, msg)
}
}
)
}
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-netty/jvm/test/io/ktor/tests/server/netty/NettyCustomChannelPipelineConfigurationTest.kt |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* This package is based on the work done by Keiron Liddle, Aftex Software
* <keiron@aftexsw.com> to whom the Ant project is very grateful for his
* great code.
*/
package org.apache.hadoop.io.compress.bzip2;
/**
* A simple class the hold and calculate the CRC for sanity checking of the
* data.
*
*/
final class CRC {
static final int crc32Table[] = { 0x00000000, 0x04c11db7, 0x09823b6e,
0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64,
0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2,
0x52568b75, 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, 0x9823b6e0,
0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b,
0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef, 0xb7a96036,
0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c,
0xc3f706fb, 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f,
0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a,
0xec7dd02d, 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, 0x128e9dcf,
0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069,
0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063,
0x495a2dd4, 0x44190b0d, 0x40d816ba, 0xaca5c697, 0xa864db20,
0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25,
0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7,
0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c,
0xfa325055, 0xfef34de2, 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31,
0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632,
0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4,
0x51435d53, 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, 0x0315d626,
0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d,
0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60,
0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a,
0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9,
0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc,
0xa379dd7b, 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, 0x5d8a9099,
0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f,
0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35,
0x065e2082, 0x0b1d065b, 0x0fdc1bec, 0x3793a651, 0x3352bbe6,
0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3,
0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1,
0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa,
0xf9278673, 0xfde69bc4, 0x89b8fd09, 0x8d79e0be, 0x803ac667,
0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d,
0xb8757bda, 0xb5365d03, 0xb1f740b4 };
CRC() {
initialiseCRC();
}
void initialiseCRC() {
globalCrc = 0xffffffff;
}
int getFinalCRC() {
return ~globalCrc;
}
int getGlobalCRC() {
return globalCrc;
}
void setGlobalCRC(int newCrc) {
globalCrc = newCrc;
}
void updateCRC(int inCh) {
int temp = (globalCrc >> 24) ^ inCh;
if (temp < 0) {
temp = 256 + temp;
}
globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
}
void updateCRC(int inCh, int repeat) {
int globalCrcShadow = this.globalCrc;
while (repeat-- > 0) {
int temp = (globalCrcShadow >> 24) ^ inCh;
globalCrcShadow = (globalCrcShadow << 8)
^ crc32Table[(temp >= 0) ? temp : (temp + 256)];
}
this.globalCrc = globalCrcShadow;
}
int globalCrc;
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CRC.java |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import shutil
import snapcraft
import snapcraft.sources
logger = logging.getLogger(__name__)
class TarContentPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
return {
'properties': {
'source': {
'type': 'string',
},
'destination': {
'type': 'string',
},
},
'required': [
'source',
],
# Inform Snapcraft of the properties associated with pulling. If
# these change in the YAML Snapcraft will consider the pull step
# dirty.
'pull-properties': [
'source'
],
# Inform Snapcraft of the properties associated with building. If
# these change in the YAML Snapcraft will consider the build step
# dirty.
'build-properties': [
'destination'
]
}
def __init__(self, name, options, project):
super().__init__(name, options, project)
logger.warning("DEPRECATED: The 'tar-content' plugin's functionality "
"has been replaced by the 'copy' plugin, and it will "
"soon be removed.")
if (self.options.destination and
os.path.isabs(self.options.destination)):
raise ValueError('path {!r} must be relative'.format(
self.options.destination))
def enable_cross_compilation(self):
pass
def pull(self):
snapcraft.sources.Tar(self.options.source, self.sourcedir).pull()
def build(self):
super().build()
installdir = self.installdir
if self.options.destination:
installdir = os.path.join(self.installdir,
self.options.destination)
if os.path.exists(installdir):
shutil.rmtree(installdir)
shutil.copytree(self.builddir, installdir) | unknown | codeparrot/codeparrot-clean | ||
export async function foo() {
return 'data';
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/errors/server-actions/server-graph/16/output.js |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import json
import os
import shutil
import tempfile
import uuid
from os.path import dirname, join
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_celeryext import FlaskCeleryExt
from flask.cli import ScriptInfo
from flask_oauthlib.client import OAuth as FlaskOAuth
from invenio_accounts import InvenioAccounts
from invenio_accounts.models import User
from invenio_communities import InvenioCommunities
from invenio_db import db as db_
from invenio_db import InvenioDB
from invenio_deposit import InvenioDeposit
from invenio_files_rest import InvenioFilesREST
from invenio_files_rest.models import Bucket, Location, ObjectVersion
from invenio_jsonschemas.ext import InvenioJSONSchemas
from invenio_oauthclient import InvenioOAuthClient
from invenio_pidstore import InvenioPIDStore
from invenio_pidstore.fetchers import FetchedPID
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
from invenio_pidstore.resolver import Resolver
from invenio_records import InvenioRecords
from invenio_records.api import Record
from invenio_sipstore.ext import InvenioSIPStore
from six import BytesIO
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_migrator.ext import InvenioMigrator
from invenio_migrator.records import RecordDump
"""Pytest configuration."""
@pytest.yield_fixture(scope='session', autouse=True)
def app(request):
"""Flask application fixture."""
app_ = Flask('testapp')
app_.config.update(
CELERY_ALWAYS_EAGER=True,
CELERY_RESULT_BACKEND="cache",
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
TESTING=True,
SQLALCHEMY_TRACK_MODIFICATIONS=True,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///:memory:'),
SECURITY_PASSWORD_SALT='TEST',
SECRET_KEY='TEST',
)
FlaskCeleryExt(app_)
InvenioDB(app_)
InvenioRecords(app_)
InvenioDeposit(app_)
InvenioJSONSchemas(app_)
InvenioAccounts(app_)
InvenioCommunities(app_)
InvenioPIDStore(app_)
InvenioSIPStore(app_)
Babel(app_)
InvenioFilesREST(app_)
InvenioMigrator(app_)
FlaskOAuth(app_)
InvenioOAuthClient(app_)
with app_.app_context():
yield app_
@pytest.fixture
def script_info(app):
"""Get ScriptInfo object for testing CLI."""
return ScriptInfo(create_app=lambda info: app)
@pytest.yield_fixture()
def db(app):
"""Setup database."""
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
db_.session.remove()
db_.drop_all()
@pytest.yield_fixture()
def dummy_location(db):
"""File system location."""
tmppath = tempfile.mkdtemp()
loc = Location(
name='testloc',
uri=tmppath,
default=True
)
db.session.add(loc)
db.session.commit()
yield loc
shutil.rmtree(tmppath)
@pytest.fixture()
def datadir():
"""Get data directory."""
return join(dirname(__file__), '..', 'data')
@pytest.fixture()
def logos_dir(datadir):
"""Get data directory."""
return join(datadir, 'community_logos')
@pytest.fixture()
def records_json(datadir):
"""Load records json."""
with open(join(datadir, 'records.json')) as fp:
records = json.load(fp)
return records
@pytest.fixture()
def communities_dump(datadir):
"""Load test data of dumped communities.
:returns: Loaded dump of communities as a list of dict.
:rtype: list
"""
with open(join(datadir, 'communities.json')) as fp:
records = json.load(fp)
return records
@pytest.fixture()
def deposit_user(db):
"""Test user for deposit loading."""
u1 = User(id=1, email='user@invenio.org',
password='change_me', active=True)
db.session.add(u1)
db.session.commit()
return u1
@pytest.fixture()
def deposit_record_pid(db):
"""Test record PID for deposit loading."""
rec = Record.create({'title': 'Test'})
rec_pid = PersistentIdentifier.create(
pid_type='recid',
pid_value='10',
object_type='rec',
object_uuid=rec.id,
status=PIDStatus.REGISTERED,
)
db.session.commit()
return rec_pid
@pytest.fixture()
def deposit_dump(datadir):
"""Load test data of dumped deposits.
:returns: Loaded dump of deposits as a list of dict.
:rtype: list
"""
with open(join(datadir, 'deposit.json')) as fp:
records = json.load(fp)
return records
@pytest.fixture()
def featured_dump(datadir):
"""Load test data of dumped community featurings.
:returns: Loaded dump of community featurings as a list of dict.
:rtype: list
"""
with open(join(datadir, 'featured.json')) as fp:
records = json.load(fp)
return records
@pytest.fixture()
def users_dump(datadir):
"""Load test data of dumped users.
:returns: Loaded dump of users as a list of dict.
:rtype: list
"""
with open(join(datadir, 'users.json')) as fp:
records = json.load(fp)
return records
@pytest.fixture()
def record_db(db):
"""Load records JSON."""
id_ = uuid.uuid4()
record = Record.create({
'title': 'Test record',
'recid': 11782
}, id_=id_)
PersistentIdentifier.create(
pid_type='recid',
pid_value='11782',
status=PIDStatus.REGISTERED,
object_type='rec',
object_uuid=id_,
)
RecordIdentifier.insert(11782)
db.session.commit()
return record
@pytest.fixture()
def record_pid(db, record_db):
"""Create record pid."""
pid = PersistentIdentifier.create(
pid_type='doi',
pid_value='10.5281/zenodo.11782',
status=PIDStatus.REGISTERED,
object_type='rec',
object_uuid=record_db.id,
)
db.session.commit()
return pid
@pytest.fixture()
def oauthclient_dump(datadir):
"""Load oauthclient JSON."""
with open(join(datadir, 'oauthclient.json')) as fp:
oauth = json.load(fp)
return oauth['remoteaccounts'], oauth['remotetokens']
@pytest.fixture()
def oauth2server_dump(datadir):
"""Load oauth2server JSON."""
with open(join(datadir, 'oauth2server.json')) as fp:
oauth = json.load(fp)
return oauth['clients'], oauth['tokens']
@pytest.fixture()
def record_file(db, dummy_location):
"""Create record pid."""
data = b'testfile'
b = Bucket.create()
obj = ObjectVersion.create(
b, 'CERN_openlab_Parin_Porecha.pdf', stream=BytesIO(data))
db.session.commit()
return dict(
bucket=str(obj.bucket_id),
key=obj.key,
size=obj.file.size,
checksum=obj.file.checksum,
)
@pytest.fixture()
def record_dump(records_json):
"""A record dump."""
def doi_fetcher(record_uuid, data):
if 'doi' in data:
return FetchedPID(
pid_type='doi', pid_value=data['doi'], provider=None
)
return None
return RecordDump(
records_json[0], source_type='json', pid_fetchers=[doi_fetcher])
@pytest.fixture()
def resolver():
"""PID resolver."""
return Resolver(
pid_type='recid', object_type='rec', getter=Record.get_record) | unknown | codeparrot/codeparrot-clean | ||
"""Track selection page"""
from bok_choy.page_object import PageObject
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.pay_and_verify import PaymentAndVerificationFlow
class TrackSelectionPage(PageObject):
"""Interact with the track selection page.
This page can be accessed at `/course_modes/choose/{course_id}/`.
"""
def __init__(self, browser, course_id):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
"""
super(TrackSelectionPage, self).__init__(browser)
self._course_id = course_id
@property
def url(self):
"""Return the URL corresponding to the track selection page."""
url = "{base}/course_modes/choose/{course_id}/".format(
base=BASE_URL,
course_id=self._course_id
)
return url
def is_browser_on_page(self):
"""Check if the track selection page has loaded."""
return self.q(css=".wrapper-register-choose").is_present()
def enroll(self, mode="audit"):
"""Interact with one of the enrollment buttons on the page.
Keyword Arguments:
mode (str): Can be "audit" or "verified"
Raises:
ValueError
"""
if mode == "verified":
# Check the first contribution option, then click the enroll button
self.q(css=".contribution-option > input").first.click()
self.q(css="input[name='verified_mode']").click()
return PaymentAndVerificationFlow(self.browser, self._course_id).wait_for_page()
elif mode == "audit":
self.q(css="input[name='audit_mode']").click()
return CourseHomePage(self.browser, self._course_id).wait_for_page()
else:
raise ValueError("Mode must be either 'audit' or 'verified'.") | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2beta1HorizontalPodAutoscalerCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
"""
V2beta1HorizontalPodAutoscalerCondition - a model defined in Swagger
"""
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
lastTransitionTime is the last time the condition transitioned from one status to another
:return: The last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
lastTransitionTime is the last time the condition transitioned from one status to another
:param last_transition_time: The last_transition_time of this V2beta1HorizontalPodAutoscalerCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""
Gets the message of this V2beta1HorizontalPodAutoscalerCondition.
message is a human-readable explanation containing details about the transition
:return: The message of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V2beta1HorizontalPodAutoscalerCondition.
message is a human-readable explanation containing details about the transition
:param message: The message of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V2beta1HorizontalPodAutoscalerCondition.
reason is the reason for the condition's last transition.
:return: The reason of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V2beta1HorizontalPodAutoscalerCondition.
reason is the reason for the condition's last transition.
:param reason: The reason of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this V2beta1HorizontalPodAutoscalerCondition.
status is the status of the condition (True, False, Unknown)
:return: The status of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V2beta1HorizontalPodAutoscalerCondition.
status is the status of the condition (True, False, Unknown)
:param status: The status of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this V2beta1HorizontalPodAutoscalerCondition.
type describes the current condition
:return: The type of this V2beta1HorizontalPodAutoscalerCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V2beta1HorizontalPodAutoscalerCondition.
type describes the current condition
:param type: The type of this V2beta1HorizontalPodAutoscalerCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2beta1HorizontalPodAutoscalerCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Tests for the Category class."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
import pywikibot
import pywikibot.page
from tests.aspects import unittest, TestCase
class TestCategoryObject(TestCase):
"""Test Category object."""
family = 'wikipedia'
code = 'en'
cached = True
def test_init(self):
"""Test the category's __init__ for one condition that can't be dry."""
site = self.get_site()
self.assertRaises(ValueError, pywikibot.Category, site, 'Wikipedia:Test')
def test_is_empty(self):
"""Test if category is empty or not."""
site = self.get_site()
cat_empty = pywikibot.Category(site, 'Category:foooooo')
cat_not_empty = pywikibot.Category(site, 'Category:Wikipedia categories')
self.assertTrue(cat_empty.isEmptyCategory())
self.assertFalse(cat_not_empty.isEmptyCategory())
def test_is_hidden(self):
"""Test isHiddenCategory."""
site = self.get_site()
cat_hidden = pywikibot.Category(site, 'Category:Hidden categories')
cat_not_hidden = pywikibot.Category(site, 'Category:Wikipedia categories')
self.assertTrue(cat_hidden.isHiddenCategory())
self.assertFalse(cat_not_hidden.isHiddenCategory())
def test_categoryinfo(self):
"""Test the categoryinfo property."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Female Wikipedians')
categoryinfo = cat.categoryinfo
self.assertTrue(categoryinfo['files'] >= 0)
self.assertTrue(categoryinfo['pages'] >= 0)
self.assertTrue(categoryinfo['size'] > 0)
self.assertTrue(categoryinfo['subcats'] > 0)
members_sum = categoryinfo['files'] + categoryinfo['pages'] + categoryinfo['subcats']
self.assertEqual(members_sum, categoryinfo['size'])
cat_files = pywikibot.Category(site, 'Category:Files lacking an author')
categoryinfo2 = cat_files.categoryinfo
self.assertTrue(categoryinfo2['files'] > 0)
def test_members(self):
"""Test the members method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia legal policies')
p1 = pywikibot.Page(site, 'Category:Wikipedia disclaimers')
p2 = pywikibot.Page(site, 'Wikipedia:Terms of use')
p3 = pywikibot.Page(site, 'Wikipedia:Risk disclaimer')
members = list(cat.members())
self.assertIn(p1, members)
self.assertIn(p2, members)
self.assertNotIn(p3, members)
members_recurse = list(cat.members(recurse=True))
self.assertIn(p1, members_recurse)
self.assertIn(p2, members_recurse)
self.assertIn(p3, members_recurse)
members_namespace = list(cat.members(namespaces=14))
self.assertIn(p1, members_namespace)
self.assertNotIn(p2, members_namespace)
self.assertNotIn(p3, members_namespace)
members_total = list(cat.members(total=2))
self.assertEqual(len(members_total), 2)
def test_subcategories(self):
"""Test the subcategories method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedians by gender')
c1 = pywikibot.Category(site, 'Category:Female Wikipedians')
c2 = pywikibot.Category(site, 'Category:Lesbian Wikipedians')
subcategories = list(cat.subcategories())
self.assertIn(c1, subcategories)
self.assertNotIn(c2, subcategories)
subcategories_total = list(cat.subcategories(total=2))
self.assertEqual(len(subcategories_total), 2)
def test_subcategories_recurse(self):
"""Test the subcategories method with recurse=True."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedians by gender')
c1 = pywikibot.Category(site, 'Category:Female Wikipedians')
c2 = pywikibot.Category(site, 'Category:Lesbian Wikipedians')
subcategories_recurse = list(cat.subcategories(recurse=True))
self.assertIn(c1, subcategories_recurse)
self.assertIn(c2, subcategories_recurse)
def test_articles(self):
"""Test the articles method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia legal policies')
p1 = pywikibot.Page(site, 'Wikipedia:Terms of use')
p2 = pywikibot.Page(site, 'Wikipedia:Risk disclaimer')
articles = list(cat.articles())
self.assertIn(p1, articles)
self.assertNotIn(p2, articles)
articles_recurse = list(cat.articles(recurse=True))
self.assertIn(p1, articles_recurse)
self.assertIn(p2, articles_recurse)
articles_namespace = list(cat.articles(namespaces=1))
self.assertNotIn(p1, articles_namespace)
self.assertNotIn(p2, articles_namespace)
articles_total = list(cat.articles(total=2))
self.assertEqual(len(articles_total), 2)
def test_redirects(self):
"""Test the redirects method."""
site = self.get_site()
cat1 = pywikibot.Category(site, 'Category:Fonts')
cat2 = pywikibot.Category(site, 'Category:Typefaces')
self.assertTrue(cat1.isCategoryRedirect())
self.assertFalse(cat2.isCategoryRedirect())
# The correct target category if fetched.
tgt = cat1.getCategoryRedirectTarget()
self.assertEqual(tgt, cat2)
# Raise exception if target is fetched for non Category redirects.
self.assertRaises(pywikibot.IsNotRedirectPage,
cat2.getCategoryRedirectTarget)
class TestCategoryDryObject(TestCase):
"""Test the category object with dry tests."""
family = 'wikipedia'
code = 'en'
dry = True
def test_init_dry(self):
"""Test the category's __init__."""
site = self.get_site()
cat_normal = pywikibot.Category(site, 'Category:Foo')
self.assertEqual(cat_normal.title(withNamespace=False), 'Foo')
self.assertEqual(cat_normal.namespace(), 14)
cat_missing = pywikibot.Category(site, 'Foo')
self.assertEqual(cat_missing.title(withNamespace=False), 'Foo')
self.assertEqual(cat_missing.namespace(), 14)
cat_duplicate = pywikibot.Category(site, 'Category:Category:Foo')
self.assertEqual(cat_duplicate.title(withNamespace=False), 'Category:Foo')
self.assertEqual(cat_duplicate.namespace(), 14)
cat_dup_ns = pywikibot.Category(site, 'Category:Wikipedia:Test')
self.assertTrue(cat_dup_ns.title(withNamespace=False), 'Page:Foo')
self.assertTrue(cat_dup_ns.namespace(), 14)
self.assertRaises(ValueError, pywikibot.Category, site, 'Talk:Foo')
def test_section(self):
"""Test the section method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Foo#bar')
self.assertEqual(cat.section(), 'bar')
cat2 = pywikibot.Category(site, 'Category:Foo')
self.assertEqual(cat2.section(), None)
def test_aslink(self):
"""Test the title method with asLink=True."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia Categories')
self.assertEqual(cat.title(asLink=True, insite=cat.site),
u'[[Category:Wikipedia Categories]]')
cat_section = pywikibot.Category(site, 'Category:Wikipedia Categories#Foo')
self.assertEqual(cat_section.title(asLink=True, insite=cat_section.site),
u'[[Category:Wikipedia Categories#Foo]]')
cat_dup = pywikibot.Category(site, 'Category:Wikipedia:Test')
self.assertEqual(cat_dup.title(asLink=True, insite=cat_dup.site),
u'[[Category:Wikipedia:Test]]')
def test_sortkey(self):
"""Test the sortKey attribute."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia categories', 'Example')
self.assertEqual(cat.aslink(), '[[Category:Wikipedia categories|Example]]')
self.assertEqual(cat.aslink(sortKey='Foo'), '[[Category:Wikipedia categories|Foo]]')
class CategoryNewestPages(TestCase):
"""Test newest_pages feature on French Wikinews."""
family = 'wikinews'
code = 'fr'
cached = True
def test_newest_pages(self):
"""Test that the pages are getting older."""
cat = pywikibot.Category(self.get_site(), u'Catégorie:Yukon Quest 2015')
last = pywikibot.Timestamp.max
count = 0
for page in cat.newest_pages():
creation_stamp = page.oldest_revision.timestamp
self.assertLessEqual(creation_stamp, last)
last = creation_stamp
count += 1
self.assertEqual(count, cat.categoryinfo['size'])
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 1 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import cuda_op
class AddOneTest(tf.test.TestCase):
def test(self):
if tf.test.is_built_with_cuda():
with self.test_session():
result = cuda_op.add_one([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [6, 5, 4, 3, 2])
if __name__ == '__main__':
tf.test.main() | unknown | codeparrot/codeparrot-clean | ||
import {identity} from 'shared-runtime';
function Component(props) {
let x;
[x] = props.value;
const foo = () => {
x = identity(props.value[0]);
};
foo();
return {x};
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{value: [42]}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/destructure-array-assignment-to-context-var.js |
"""Tests for distutils.filelist."""
from os.path import join
import unittest
from test.test_support import captured_stdout, run_unittest
from distutils.filelist import glob_to_re, FileList
from distutils import debug
MANIFEST_IN = """\
include ok
include xo
exclude xo
include foo.tmp
global-include *.x
global-include *.txt
global-exclude *.tmp
recursive-include f *.oo
recursive-exclude global *.x
graft dir
prune dir3
"""
class FileListTestCase(unittest.TestCase):
def test_glob_to_re(self):
# simple cases
self.assertEqual(glob_to_re('foo*'), 'foo[^/]*\\Z(?ms)')
self.assertEqual(glob_to_re('foo?'), 'foo[^/]\\Z(?ms)')
self.assertEqual(glob_to_re('foo??'), 'foo[^/][^/]\\Z(?ms)')
# special cases
self.assertEqual(glob_to_re(r'foo\\*'), r'foo\\\\[^/]*\Z(?ms)')
self.assertEqual(glob_to_re(r'foo\\\*'), r'foo\\\\\\[^/]*\Z(?ms)')
self.assertEqual(glob_to_re('foo????'), r'foo[^/][^/][^/][^/]\Z(?ms)')
self.assertEqual(glob_to_re(r'foo\\??'), r'foo\\\\[^/][^/]\Z(?ms)')
def test_process_template_line(self):
# testing all MANIFEST.in template patterns
file_list = FileList()
# simulated file list
file_list.allfiles = ['foo.tmp', 'ok', 'xo', 'four.txt',
join('global', 'one.txt'),
join('global', 'two.txt'),
join('global', 'files.x'),
join('global', 'here.tmp'),
join('f', 'o', 'f.oo'),
join('dir', 'graft-one'),
join('dir', 'dir2', 'graft2'),
join('dir3', 'ok'),
join('dir3', 'sub', 'ok.txt')
]
for line in MANIFEST_IN.split('\n'):
if line.strip() == '':
continue
file_list.process_template_line(line)
wanted = ['ok', 'four.txt', join('global', 'one.txt'),
join('global', 'two.txt'), join('f', 'o', 'f.oo'),
join('dir', 'graft-one'), join('dir', 'dir2', 'graft2')]
self.assertEqual(file_list.files, wanted)
def test_debug_print(self):
file_list = FileList()
with captured_stdout() as stdout:
file_list.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), '')
debug.DEBUG = True
try:
with captured_stdout() as stdout:
file_list.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), 'xxx\n')
finally:
debug.DEBUG = False
def test_suite():
return unittest.makeSuite(FileListTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) | unknown | codeparrot/codeparrot-clean | ||
"""
masked_reductions.py is for reduction algorithms using a mask-based approach
for missing values.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs import missing as libmissing
from pandas.core.nanops import check_below_min_count
if TYPE_CHECKING:
from collections.abc import Callable
from pandas._typing import (
AxisInt,
npt,
)
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any() or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
if values.dtype == np.dtype(object):
# object dtype does not support `where` without passing an initial
values = values[~mask]
return func(values, axis=axis, **kwargs)
return func(values, where=~mask, axis=axis, **kwargs)
def sum(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
):
return _reductions(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def prod(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
):
return _reductions(
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def _minmax(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
"""
Reduction for 1D masked array.
Parameters
----------
func : np.min or np.max
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any() or not values.size:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
else:
return func(values, axis=axis)
else:
subset = values[~mask]
if subset.size:
return func(subset, axis=axis)
else:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
def min(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)
def max(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)
def mean(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis)
def var(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return _reductions(
np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
)
def std(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: AxisInt | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return _reductions(
np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
) | python | github | https://github.com/pandas-dev/pandas | pandas/core/array_algos/masked_reductions.py |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.5,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
2000: 5e-04,
5000: 1e-04,
7000: 5e-05
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
]
)
def exp_a(name):
# 5 appliances
# avg valid cost = 1.1260980368
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
# one pool layer
# avg valid cost = 1.2261329889
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# BLSTM
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# BLSTM 2x2x pool
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abcde')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=4000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
"""Implements Host Collection UI."""
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class HostCollection(Base):
"""Provides the CRUD functionality for Host Collection."""
is_katello = True
def navigate_to_entity(self):
"""Navigate to Host Collection entity page"""
Navigator(self.browser).go_to_host_collections()
def _search_locator(self):
"""Specify locator for Host Collection entity search procedure"""
return locators['hostcollection.select_name']
def create(self, name, limit=None, description=None):
"""Creates new Host Collection from UI."""
self.click(locators['hostcollection.new'])
if self.wait_until_element(common_locators['name']) is None:
raise UIError(
u'Could not create new host collection "{0}"'.format(name)
)
self.text_field_update(common_locators['name'], name)
if limit:
self.set_limit(limit)
if description:
self.text_field_update(
common_locators['description'], description)
self.click(common_locators['create'])
def update(self, name, new_name=None, description=None, limit=None):
"""Updates an existing Host Collection."""
element = self.search(name)
if element is None:
raise UIError(
u'Could not find host collection "{0}" to update'.format(name))
element.click()
self.wait_for_ajax()
if new_name:
self.edit_entity(
locators['hostcollection.edit_name'],
locators['hostcollection.edit_name_text'],
new_name,
locators['hostcollection.save_name'],
)
if description:
self.edit_entity(
locators['hostcollection.edit_description'],
locators['hostcollection.edit_description_text'],
description,
locators['hostcollection.save_description']
)
if limit:
self.click(locators['hostcollection.edit_limit'])
self.set_limit(limit)
if self.wait_until_element(
locators['hostcollection.save_limit']).is_enabled():
self.click(locators['hostcollection.save_limit'])
else:
raise ValueError(
'Please update content host limit with valid integer '
'value'
)
def validate_field_value(self, name, field_name, field_value):
"""Validate whether corresponding Host Collection field has expected
value
:param str name: Host Collection name
:param str field_name: Field to be validated (supported fields: 'name',
'description', 'limit')
:param str field_value: Expected field value
:return bool result: Return True in case field contains expected value
and False otherwise
"""
element = self.search(name)
if element is None:
raise UIError(
u'Could not find host collection "{0}" to verify'.format(name))
element.click()
self.wait_for_ajax()
strategy, value = locators[
'hostcollection.{0}_field'.format(field_name)]
return self.wait_until_element((strategy, value % field_value))
def delete(self, name, really=True):
"""Deletes an existing Host Collection entity."""
self.delete_entity(
name,
really,
locators['hostcollection.remove'],
)
def copy(self, name, new_name):
"""Copies an existing Host Collection entity"""
element = self.search(name)
if element is None:
raise UIError(
u'Could not find host collection "{0}" to copy'.format(name))
element.click()
self.wait_for_ajax()
self.edit_entity(
locators['hostcollection.copy'],
locators['hostcollection.copy_name'],
new_name,
locators['hostcollection.copy_create'],
)
def add_content_host(self, name, ch_name):
"""Add content host to existing Host Collection entity."""
# find host collection
host_collection = self.search(name)
if host_collection is None:
raise UIError(
u'Could not find host collection {0}'.format(name))
host_collection.click()
self.wait_for_ajax()
self.click(tab_locators['hostcollection.content_hosts'])
self.click(tab_locators['hostcollection.tab_ch_add'])
strategy, value = locators['hostcollection.select_ch']
self.click((strategy, value % ch_name))
self.click(locators['hostcollection.add_content_host'])
self.click(tab_locators['hostcollection.tab_ch_remove'])
element = self.wait_until_element(
(strategy, value % ch_name), timeout=8)
if element is None:
raise UIError(
"Adding content host {0} is failed".format(ch_name)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../foobar")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'foobar'
copyright = u'2014, Martin Hunt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from foobar import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'foobar-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'foobar Documentation',
u'Martin Hunt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = [] | unknown | codeparrot/codeparrot-clean | ||
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.migrations import TERM_YELLOW, TERM_RESET
from askbot.utils.console import ProgressBar
class Migration(DataMigration):
def forwards(self, orm):
message = "Adding accepted answers to threads"
num_questions = orm.Question.objects.count()
for question in ProgressBar(orm.Question.objects.iterator(), num_questions, message):
thread = question.thread
if question.answer_accepted:
try:
accepted_answer = question.answers.get(accepted=True)
except orm.Answer.DoesNotExist:
# Unfortunately there's probably nothing we can do in this case,
# there's no record of which answer has been accepted (HMM, MAYBE IN ACTIVITIES???)
print TERM_YELLOW, "Found a thread with question.answer_accepted, " \
"but no answer actually marked as accepted, question.id=%d." \
"This happens so there's no need to panic." % question.id, TERM_RESET
else:
thread.accepted_answer = accepted_answer
thread.answer_accepted_at = accepted_answer.accepted_at
thread.save()
# Verify data integrity
message = "Checking correctness of accepted answer data"
for question in ProgressBar(orm.Question.objects.iterator(), num_questions, message):
accepted_answers = question.answers.filter(accepted=True)
num_accepted_answers = len(accepted_answers)
if( (num_accepted_answers == 0 and question.thread.accepted_answer) or
(num_accepted_answers == 1 and not question.thread.accepted_answer) or
(num_accepted_answers == 1 and question.thread.accepted_answer != accepted_answers[0]) or
(num_accepted_answers == 1 and question.thread.answer_accepted_at != accepted_answers[0].accepted_at) or
(num_accepted_answers > 1) ):
raise ValueError("There are Thread instances for which data doesn't match Question/Answers!")
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Answer']", 'null': 'True', 'blank': 'True'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot'] | unknown | codeparrot/codeparrot-clean | ||
//===- bolt/Passes/CacheMetrics.h - Instruction cache metrics ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Functions to show metrics of cache lines.
//
//===----------------------------------------------------------------------===//
#ifndef BOLT_PASSES_CACHEMETRICS_H
#define BOLT_PASSES_CACHEMETRICS_H
#include "bolt/Core/BinaryContext.h"
#include <vector>
namespace llvm {
class raw_ostream;
namespace bolt {
namespace CacheMetrics {
/// Calculate and print various metrics related to instruction cache performance
void printAll(raw_ostream &OS, const BinaryFunctionListType &BinaryFunctions);
} // namespace CacheMetrics
} // namespace bolt
} // namespace llvm
#endif // BOLT_PASSES_CACHEMETRICS_H | c | github | https://github.com/llvm/llvm-project | bolt/include/bolt/Passes/CacheMetrics.h |
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls_023 as rustls;
use std::{
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
use tokio_util::codec::Encoder;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("tcp", ("127.0.0.1", 8080), || {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build()
.finish(handler)
.rustls_0_23(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
tracing::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
tracing::info!("responding");
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))
}
struct Heartbeat {
codec: ws::Codec,
interval: Interval,
}
impl Heartbeat {
fn new(codec: ws::Codec) -> Self {
Self {
codec,
interval: interval(Duration::from_secs(4)),
}
}
}
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
tracing::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
let mut buffer = BytesMut::new();
self.as_mut()
.codec
.encode(
ws::Message::Text(ByteString::from_static("hello world")),
&mut buffer,
)
.unwrap();
Poll::Ready(Some(Ok(buffer.freeze())))
}
}
fn tls_config() -> rustls::ServerConfig {
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_chain = vec![cert.der().clone()];
let key_der = rustls_pki_types::PrivateKeyDer::Pkcs8(
rustls_pki_types::PrivatePkcs8KeyDer::from(key_pair.serialize_der()),
);
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(cert_chain, key_der)
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
} | rust | github | https://github.com/actix/actix-web | actix-http/examples/ws.rs |
import unittest
import os
import shutil
import numpy as np
from esp import specUtils
class testSpecUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
sample_wavelen = np.arange(50)
sample_flux = np.random.uniform(size=50)
cls.sample_spec = np.array([sample_wavelen, sample_flux])
if os.path.exists('scratch_spec'):
shutil.rmtree('scratch_spec')
os.mkdir('scratch_spec')
np.savetxt('scratch_spec/sample.dat', cls.sample_spec.T,
header='Lambda Flux', delimiter=' ')
def test_load_spectra(self):
test_su = specUtils()
test_spec = test_su.load_spectra('scratch_spec')
np.testing.assert_array_equal(self.sample_spec[0],
test_spec[0].wavelen)
np.testing.assert_array_equal(self.sample_spec[1],
test_spec[0].flambda)
self.assertEqual('sample.dat', test_spec[0].name)
def test_scale_spectrum(self):
test_su = specUtils()
test_spec = test_su.load_spectra('scratch_spec')
new_flux = test_su.scale_spectrum(test_spec[0].flambda)
self.assertAlmostEqual(np.sum(new_flux), 1.0)
@classmethod
def tearDownClass(cls):
if os.path.exists('scratch_spec'):
shutil.rmtree('scratch_spec')
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
{
"openapi": "3.0.1",
"info": {
"title": "Calculator Plugin",
"description": "A plugin that allows the user to perform basic arithmetic operations like addition, subtraction, multiplication, division, power, and square root using ChatGPT.",
"version": "v1"
},
"servers": [
{
"url": "https://chat-calculator-plugin.supportmirage.repl.co"
}
],
"paths": {
"/calculator/{operation}/{a}/{b}": {
"get": {
"operationId": "calculate",
"summary": "Perform a calculation",
"parameters": [
{
"in": "path",
"name": "operation",
"schema": {
"type": "string",
"enum": [
"add",
"subtract",
"multiply",
"divide",
"power"
]
},
"required": true,
"description": "The operation to perform."
},
{
"in": "path",
"name": "a",
"schema": {
"type": "number"
},
"required": true,
"description": "The first operand."
},
{
"in": "path",
"name": "b",
"schema": {
"type": "number"
},
"required": true,
"description": "The second operand."
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/calculateResponse"
}
}
}
}
}
}
},
"/calculator/sqrt/{a}": {
"get": {
"operationId": "sqrt",
"summary": "Find the square root of a number",
"parameters": [
{
"in": "path",
"name": "a",
"schema": {
"type": "number"
},
"required": true,
"description": "The number to find the square root of."
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/calculateResponse"
}
}
}
}
}
}
}
},
"components": {
"schemas": {
"calculateResponse": {
"type": "object",
"properties": {
"result": {
"type": "number",
"description": "The result of the calculation."
}
}
}
}
}
} | json | github | https://github.com/langchain-ai/langchain | libs/langchain/tests/unit_tests/examples/test_specs/calculator/apispec.json |
// Originally from narwhal.js (http://narwhaljs.org)
// Copyright (c) 2009 Thomas Robinson <280north.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the 'Software'), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
const {
ArrayPrototypeForEach,
ArrayPrototypeIndexOf,
ArrayPrototypeJoin,
ArrayPrototypePush,
ArrayPrototypeSlice,
Error,
FunctionPrototypeCall,
NumberIsNaN,
ObjectAssign,
ObjectDefineProperty,
ObjectIs,
ObjectKeys,
ObjectPrototypeIsPrototypeOf,
RegExpPrototypeExec,
String,
StringPrototypeIndexOf,
StringPrototypeSlice,
StringPrototypeSplit,
Symbol,
} = primordials;
const {
codes: {
ERR_AMBIGUOUS_ARGUMENT,
ERR_CONSTRUCT_CALL_REQUIRED,
ERR_INVALID_ARG_TYPE,
ERR_INVALID_ARG_VALUE,
ERR_INVALID_RETURN_VALUE,
ERR_MISSING_ARGS,
},
} = require('internal/errors');
const AssertionError = require('internal/assert/assertion_error');
const { inspect } = require('internal/util/inspect');
const {
isPromise,
isRegExp,
} = require('internal/util/types');
const { isError, setOwnProperty } = require('internal/util');
const { innerOk, innerFail } = require('internal/assert/utils');
const {
validateFunction,
validateOneOf,
} = require('internal/validators');
const kOptions = Symbol('options');
let isDeepEqual;
let isDeepStrictEqual;
let isPartialStrictEqual;
function lazyLoadComparison() {
const comparison = require('internal/util/comparisons');
isDeepEqual = comparison.isDeepEqual;
isDeepStrictEqual = comparison.isDeepStrictEqual;
isPartialStrictEqual = comparison.isPartialStrictEqual;
}
// The assert module provides functions that throw
// AssertionError's when particular conditions are not met. The
// assert module must conform to the following interface.
module.exports = assert;
const NO_EXCEPTION_SENTINEL = {};
/**
* Assert options.
* @typedef {object} AssertOptions
* @property {'full'|'simple'} [diff='simple'] - If set to 'full', shows the full diff in assertion errors.
* @property {boolean} [strict=true] - If set to true, non-strict methods behave like their corresponding
* strict methods.
* @property {boolean} [skipPrototype=false] - If set to true, skips comparing prototypes
* in deep equality checks.
*/
/**
* @class Assert
* @param {AssertOptions} [options] - Optional configuration for assertions.
* @throws {ERR_CONSTRUCT_CALL_REQUIRED} If not called with `new`.
*/
function Assert(options) {
if (!new.target) {
throw new ERR_CONSTRUCT_CALL_REQUIRED('Assert');
}
options = ObjectAssign({ __proto__: null, strict: true, skipPrototype: false }, options);
const allowedDiffs = ['simple', 'full'];
if (options.diff !== undefined) {
validateOneOf(options.diff, 'options.diff', allowedDiffs);
}
this.AssertionError = AssertionError;
ObjectDefineProperty(this, kOptions, {
__proto__: null,
value: options,
enumerable: false,
configurable: false,
writable: false,
});
if (options.strict) {
this.equal = this.strictEqual;
this.deepEqual = this.deepStrictEqual;
this.notEqual = this.notStrictEqual;
this.notDeepEqual = this.notDeepStrictEqual;
}
}
// All of the following functions must throw an AssertionError
// when a corresponding condition is not met, with a message that
// may be undefined if not provided. All assertion methods provide
// both the actual and expected values to the assertion error for
// display purposes.
// DESTRUCTURING WARNING: All Assert.prototype methods use optional chaining
// (this?.[kOptions]) to safely access instance configuration. When methods are
// destructured from an Assert instance (e.g., const {strictEqual} = myAssert),
// they lose their `this` context and will use default behavior instead of the
// instance's custom options.
/**
* Throws an AssertionError with the given message.
* @param {any | Error} [message]
*/
Assert.prototype.fail = function fail(message) {
if (isError(message)) throw message;
let internalMessage = false;
if (message === undefined) {
message = 'Failed';
internalMessage = true;
}
// IMPORTANT: When adding new references to `this`, ensure they use optional chaining
// (this?.[kOptions]?.diff) to handle cases where the method is destructured from an
// Assert instance and loses its context. Destructured methods will fall back
// to default behavior when `this` is undefined.
const errArgs = {
operator: 'fail',
stackStartFn: fail,
message,
diff: this?.[kOptions]?.diff,
};
const err = new AssertionError(errArgs);
if (internalMessage) {
err.generatedMessage = true;
}
throw err;
};
// The AssertionError is defined in internal/error.
assert.AssertionError = AssertionError;
/**
* Pure assertion tests whether a value is truthy, as determined
* by !!value.
* @param {...any} args
* @returns {void}
*/
function assert(...args) {
innerOk(assert, ...args);
}
/**
* Pure assertion tests whether a value is truthy, as determined
* by !!value.
* Duplicated as the other `ok` function is supercharged and exposed as default export.
* @param {...any} args
* @returns {void}
*/
Assert.prototype.ok = function ok(...args) {
innerOk(ok, ...args);
};
/**
* The equality assertion tests shallow, coercive equality with ==.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.equal = function equal(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
// eslint-disable-next-line eqeqeq
if (actual != expected && (!NumberIsNaN(actual) || !NumberIsNaN(expected))) {
innerFail({
actual,
expected,
message,
operator: '==',
stackStartFn: equal,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The non-equality assertion tests for whether two objects are not
* equal with !=.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.notEqual = function notEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
// eslint-disable-next-line eqeqeq
if (actual == expected || (NumberIsNaN(actual) && NumberIsNaN(expected))) {
innerFail({
actual,
expected,
message,
operator: '!=',
stackStartFn: notEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The deep equivalence assertion tests a deep equality relation.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.deepEqual = function deepEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (isDeepEqual === undefined) lazyLoadComparison();
if (!isDeepEqual(actual, expected)) {
innerFail({
actual,
expected,
message,
operator: 'deepEqual',
stackStartFn: deepEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The deep non-equivalence assertion tests for any deep inequality.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.notDeepEqual = function notDeepEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (isDeepEqual === undefined) lazyLoadComparison();
if (isDeepEqual(actual, expected)) {
innerFail({
actual,
expected,
message,
operator: 'notDeepEqual',
stackStartFn: notDeepEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The deep strict equivalence assertion tests a deep strict equality
* relation.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.deepStrictEqual = function deepStrictEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (isDeepEqual === undefined) lazyLoadComparison();
if (!isDeepStrictEqual(actual, expected, this?.[kOptions]?.skipPrototype)) {
innerFail({
actual,
expected,
message,
operator: 'deepStrictEqual',
stackStartFn: deepStrictEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The deep strict non-equivalence assertion tests for any deep strict
* inequality.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.notDeepStrictEqual = notDeepStrictEqual;
function notDeepStrictEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (isDeepEqual === undefined) lazyLoadComparison();
if (isDeepStrictEqual(actual, expected, this?.[kOptions]?.skipPrototype)) {
innerFail({
actual,
expected,
message,
operator: 'notDeepStrictEqual',
stackStartFn: notDeepStrictEqual,
diff: this?.[kOptions]?.diff,
});
}
}
/**
* The strict equivalence assertion tests a strict equality relation.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.strictEqual = function strictEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (!ObjectIs(actual, expected)) {
innerFail({
actual,
expected,
message,
operator: 'strictEqual',
stackStartFn: strictEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The strict non-equivalence assertion tests for any strict inequality.
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.notStrictEqual = function notStrictEqual(actual, expected, ...message) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (ObjectIs(actual, expected)) {
innerFail({
actual,
expected,
message,
operator: 'notStrictEqual',
stackStartFn: notStrictEqual,
diff: this?.[kOptions]?.diff,
});
}
};
/**
* The strict equivalence assertion test between two objects
* @param {any} actual
* @param {any} expected
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.partialDeepStrictEqual = function partialDeepStrictEqual(
actual,
expected,
...message
) {
if (arguments.length < 2) {
throw new ERR_MISSING_ARGS('actual', 'expected');
}
if (isDeepEqual === undefined) lazyLoadComparison();
if (!isPartialStrictEqual(actual, expected)) {
innerFail({
actual,
expected,
message,
operator: 'partialDeepStrictEqual',
stackStartFn: partialDeepStrictEqual,
diff: this?.[kOptions]?.diff,
});
}
};
class Comparison {
constructor(obj, keys, actual) {
for (const key of keys) {
if (key in obj) {
if (actual !== undefined &&
typeof actual[key] === 'string' &&
isRegExp(obj[key]) &&
RegExpPrototypeExec(obj[key], actual[key]) !== null) {
this[key] = actual[key];
} else {
this[key] = obj[key];
}
}
}
}
}
function compareExceptionKey(actual, expected, key, message, keys, fn) {
if (!(key in actual) || !isDeepStrictEqual(actual[key], expected[key])) {
if (!message) {
// Create placeholder objects to create a nice output.
const a = new Comparison(actual, keys);
const b = new Comparison(expected, keys, actual);
const err = new AssertionError({
actual: a,
expected: b,
operator: 'deepStrictEqual',
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
});
err.actual = actual;
err.expected = expected;
err.operator = fn.name;
throw err;
}
innerFail({
actual,
expected,
message: [message],
operator: fn.name,
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
});
}
}
function expectedException(actual, expected, message, fn) {
let generatedMessage = false;
let throwError = false;
if (typeof expected !== 'function') {
// Handle regular expressions.
if (isRegExp(expected)) {
const str = String(actual);
if (RegExpPrototypeExec(expected, str) !== null)
return;
if (!message) {
generatedMessage = true;
message = 'The input did not match the regular expression ' +
`${inspect(expected)}. Input:\n\n${inspect(str)}\n`;
}
throwError = true;
// Handle primitives properly.
} else if (typeof actual !== 'object' || actual === null) {
const err = new AssertionError({
actual,
expected,
message,
operator: 'deepStrictEqual',
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
});
err.operator = fn.name;
throw err;
} else {
// Handle validation objects.
const keys = ObjectKeys(expected);
// Special handle errors to make sure the name and the message are
// compared as well.
if (expected instanceof Error) {
ArrayPrototypePush(keys, 'name', 'message');
} else if (keys.length === 0) {
throw new ERR_INVALID_ARG_VALUE('error',
expected, 'may not be an empty object');
}
if (isDeepEqual === undefined) lazyLoadComparison();
for (const key of keys) {
if (typeof actual[key] === 'string' &&
isRegExp(expected[key]) &&
RegExpPrototypeExec(expected[key], actual[key]) !== null) {
continue;
}
compareExceptionKey(actual, expected, key, message, keys, fn);
}
return;
}
// Guard instanceof against arrow functions as they don't have a prototype.
// Check for matching Error classes.
} else if (expected.prototype !== undefined && actual instanceof expected) {
return;
} else if (ObjectPrototypeIsPrototypeOf(Error, expected)) {
if (!message) {
generatedMessage = true;
message = 'The error is expected to be an instance of ' +
`"${expected.name}". Received `;
if (isError(actual)) {
const name = (actual.constructor?.name) ||
actual.name;
if (expected.name === name) {
message += 'an error with identical name but a different prototype.';
} else {
message += `"${name}"`;
}
if (actual.message) {
message += `\n\nError message:\n\n${actual.message}`;
}
} else {
message += `"${inspect(actual, { depth: -1 })}"`;
}
}
throwError = true;
} else {
// Check validation functions return value.
const res = FunctionPrototypeCall(expected, {}, actual);
if (res !== true) {
if (!message) {
generatedMessage = true;
const name = expected.name ? `"${expected.name}" ` : '';
message = `The ${name}validation function is expected to return` +
` "true". Received ${inspect(res)}`;
if (isError(actual)) {
message += `\n\nCaught error:\n\n${actual}`;
}
}
throwError = true;
}
}
if (throwError) {
const err = new AssertionError({
actual,
expected,
message,
operator: fn.name,
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
});
err.generatedMessage = generatedMessage;
throw err;
}
}
function getActual(fn) {
validateFunction(fn, 'fn');
try {
fn();
} catch (e) {
return e;
}
return NO_EXCEPTION_SENTINEL;
}
function checkIsPromise(obj) {
// Accept native ES6 promises and promises that are implemented in a similar
// way. Do not accept thenables that use a function as `obj` and that have no
// `catch` handler.
return isPromise(obj) ||
(obj !== null && typeof obj === 'object' &&
typeof obj.then === 'function' &&
typeof obj.catch === 'function');
}
async function waitForActual(promiseFn) {
let resultPromise;
if (typeof promiseFn === 'function') {
// Return a rejected promise if `promiseFn` throws synchronously.
resultPromise = promiseFn();
// Fail in case no promise is returned.
if (!checkIsPromise(resultPromise)) {
throw new ERR_INVALID_RETURN_VALUE('instance of Promise',
'promiseFn', resultPromise);
}
} else if (checkIsPromise(promiseFn)) {
resultPromise = promiseFn;
} else {
throw new ERR_INVALID_ARG_TYPE(
'promiseFn', ['Function', 'Promise'], promiseFn);
}
try {
await resultPromise;
} catch (e) {
return e;
}
return NO_EXCEPTION_SENTINEL;
}
function expectsError(stackStartFn, actual, error, message) {
if (typeof error === 'string') {
if (arguments.length === 4) {
throw new ERR_INVALID_ARG_TYPE('error',
['Object', 'Error', 'Function', 'RegExp'],
error);
}
if (typeof actual === 'object' && actual !== null) {
if (actual.message === error) {
throw new ERR_AMBIGUOUS_ARGUMENT(
'error/message',
`The error message "${actual.message}" is identical to the message.`,
);
}
} else if (actual === error) {
throw new ERR_AMBIGUOUS_ARGUMENT(
'error/message',
`The error "${actual}" is identical to the message.`,
);
}
message = error;
error = undefined;
} else if (error != null &&
typeof error !== 'object' &&
typeof error !== 'function') {
throw new ERR_INVALID_ARG_TYPE('error',
['Object', 'Error', 'Function', 'RegExp'],
error);
}
if (actual === NO_EXCEPTION_SENTINEL) {
let details = '';
if (error?.name) {
details += ` (${error.name})`;
}
details += message ? `: ${message}` : '.';
const fnType = stackStartFn === Assert.prototype.rejects ? 'rejection' : 'exception';
innerFail({
actual: undefined,
expected: error,
operator: stackStartFn.name,
message: [`Missing expected ${fnType}${details}`],
stackStartFn,
diff: this?.[kOptions]?.diff,
});
}
if (!error)
return;
expectedException.call(this, actual, error, message, stackStartFn);
}
function hasMatchingError(actual, expected) {
if (typeof expected !== 'function') {
if (isRegExp(expected)) {
const str = String(actual);
return RegExpPrototypeExec(expected, str) !== null;
}
throw new ERR_INVALID_ARG_TYPE(
'expected', ['Function', 'RegExp'], expected,
);
}
// Guard instanceof against arrow functions as they don't have a prototype.
if (expected.prototype !== undefined && actual instanceof expected) {
return true;
}
if (ObjectPrototypeIsPrototypeOf(Error, expected)) {
return false;
}
return FunctionPrototypeCall(expected, {}, actual) === true;
}
function expectsNoError(stackStartFn, actual, error, message) {
if (actual === NO_EXCEPTION_SENTINEL)
return;
if (typeof error === 'string') {
message = error;
error = undefined;
}
if (!error || hasMatchingError(actual, error)) {
const details = message ? `: ${message}` : '.';
const fnType = stackStartFn === Assert.prototype.doesNotReject ?
'rejection' : 'exception';
innerFail({
actual,
expected: error,
operator: stackStartFn.name,
message: [`Got unwanted ${fnType}${details}\n` +
`Actual message: "${actual?.message}"`],
stackStartFn,
diff: this?.[kOptions]?.diff,
});
}
throw actual;
}
/**
* Expects the function `promiseFn` to throw an error.
* @param {() => any} promiseFn
* @param {...any} [args]
* @returns {void}
*/
Assert.prototype.throws = function throws(promiseFn, ...args) {
expectsError(throws, getActual(promiseFn), ...args);
};
/**
* Expects `promiseFn` function or its value to reject.
* @param {() => Promise<any>} promiseFn
* @param {...any} [args]
* @returns {Promise<void>}
*/
Assert.prototype.rejects = async function rejects(promiseFn, ...args) {
expectsError(rejects, await waitForActual(promiseFn), ...args);
};
/**
* Asserts that the function `fn` does not throw an error.
* @param {() => any} fn
* @param {...any} [args]
* @returns {void}
*/
Assert.prototype.doesNotThrow = function doesNotThrow(fn, ...args) {
expectsNoError(doesNotThrow, getActual(fn), ...args);
};
/**
* Expects `fn` or its value to not reject.
* @param {() => Promise<any>} fn
* @param {...any} [args]
* @returns {Promise<void>}
*/
Assert.prototype.doesNotReject = async function doesNotReject(fn, ...args) {
expectsNoError(doesNotReject, await waitForActual(fn), ...args);
};
/**
* Throws `AssertionError` if the value is not `null` or `undefined`.
* @param {any} err
* @returns {void}
*/
Assert.prototype.ifError = function ifError(err) {
if (err !== null && err !== undefined) {
let message = 'ifError got unwanted exception: ';
if (typeof err === 'object' && typeof err.message === 'string') {
if (err.message.length === 0 && err.constructor) {
message += err.constructor.name;
} else {
message += err.message;
}
} else {
message += inspect(err);
}
const newErr = new AssertionError({
actual: err,
expected: null,
operator: 'ifError',
message,
stackStartFn: ifError,
diff: this?.[kOptions]?.diff,
});
// Make sure we actually have a stack trace!
const origStack = err.stack;
if (typeof origStack === 'string') {
// This will remove any duplicated frames from the error frames taken
// from within `ifError` and add the original error frames to the newly
// created ones.
const origStackStart = StringPrototypeIndexOf(origStack, '\n at');
if (origStackStart !== -1) {
const originalFrames = StringPrototypeSplit(
StringPrototypeSlice(origStack, origStackStart + 1),
'\n',
);
// Filter all frames existing in err.stack.
let newFrames = StringPrototypeSplit(newErr.stack, '\n');
for (const errFrame of originalFrames) {
// Find the first occurrence of the frame.
const pos = ArrayPrototypeIndexOf(newFrames, errFrame);
if (pos !== -1) {
// Only keep new frames.
newFrames = ArrayPrototypeSlice(newFrames, 0, pos);
break;
}
}
const stackStart = ArrayPrototypeJoin(newFrames, '\n');
const stackEnd = ArrayPrototypeJoin(originalFrames, '\n');
newErr.stack = `${stackStart}\n${stackEnd}`;
}
}
throw newErr;
}
};
function internalMatch(string, regexp, message, fn) {
if (!isRegExp(regexp)) {
throw new ERR_INVALID_ARG_TYPE(
'regexp', 'RegExp', regexp,
);
}
const match = fn === Assert.prototype.match;
if (typeof string !== 'string' ||
RegExpPrototypeExec(regexp, string) !== null !== match) {
const generatedMessage = message.length === 0;
// 'The input was expected to not match the regular expression ' +
message[0] ||= (typeof string !== 'string' ?
'The "string" argument must be of type string. Received type ' +
`${typeof string} (${inspect(string)})` :
(match ?
'The input did not match the regular expression ' :
'The input was expected to not match the regular expression ') +
`${inspect(regexp)}. Input:\n\n${inspect(string)}\n`);
innerFail({
actual: string,
expected: regexp,
message,
operator: fn.name,
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
generatedMessage: generatedMessage,
});
}
}
/**
* Expects the `string` input to match the regular expression.
* @param {string} string
* @param {RegExp} regexp
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.match = function match(string, regexp, ...message) {
internalMatch(string, regexp, message, match);
};
/**
* Expects the `string` input not to match the regular expression.
* @param {string} string
* @param {RegExp} regexp
* @param {string | Error | MessageFactory} [message]
* @returns {void}
*/
Assert.prototype.doesNotMatch = function doesNotMatch(string, regexp, ...message) {
internalMatch(string, regexp, message, doesNotMatch);
};
/**
* Expose a strict only variant of assert.
* @param {...any} args
* @returns {void}
*/
function strict(...args) {
innerOk(strict, ...args);
}
ArrayPrototypeForEach([
'ok', 'fail', 'equal', 'notEqual', 'deepEqual', 'notDeepEqual',
'deepStrictEqual', 'notDeepStrictEqual', 'strictEqual',
'notStrictEqual', 'partialDeepStrictEqual', 'match', 'doesNotMatch',
'throws', 'rejects', 'doesNotThrow', 'doesNotReject', 'ifError',
], (name) => {
setOwnProperty(assert, name, Assert.prototype[name]);
});
assert.strict = ObjectAssign(strict, assert, {
equal: assert.strictEqual,
deepEqual: assert.deepStrictEqual,
notEqual: assert.notStrictEqual,
notDeepEqual: assert.notDeepStrictEqual,
});
assert.strict.Assert = Assert;
assert.strict.strict = assert.strict;
assert.Assert = Assert; | javascript | github | https://github.com/nodejs/node | lib/assert.js |
lazy from __future__ import annotations | python | github | https://github.com/python/cpython | Lib/test/test_import/data/lazy_imports/lazy_future_import.py |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import os
import sys
import shutil
from subprocess import check_call
from glob import glob
v = sys.version_info
if v[:2] < (3,3):
error = "ERROR: Jupyter Hub requires Python version 3.3 or above."
print(error, file=sys.stderr)
sys.exit(1)
def mtime(path):
"""shorthand for mtime"""
return os.stat(path).st_mtime
if os.name in ('nt', 'dos'):
error = "ERROR: Windows is not supported"
print(error, file=sys.stderr)
# At least we're on the python version we need, move on.
from distutils.core import setup
pjoin = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
from distutils.cmd import Command
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
npm_path = ':'.join([
pjoin(here, 'node_modules', '.bin'),
os.environ.get("PATH", os.defpath),
])
here = os.path.abspath(os.path.dirname(__file__))
share = pjoin(here, 'share')
static = pjoin(share, 'static')
class BaseCommand(Command):
"""Dumb empty command because Command needs subclasses to override too much"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_inputs(self):
return []
def get_outputs(self):
return []
class Bower(BaseCommand):
description = "fetch static client-side components with bower"
user_options = []
bower_dir = pjoin(static, 'components')
node_modules = pjoin(here, 'node_modules')
def should_run(self):
if not os.path.exists(self.bower_dir):
return True
return mtime(self.bower_dir) < mtime(pjoin(here, 'bower.json'))
def should_run_npm(self):
if not shutil.which('npm'):
print("npm unavailable", file=sys.stderr)
return False
if not os.path.exists(self.node_modules):
return True
return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))
def run(self):
if not self.should_run():
print("bower dependencies up to date")
return
if self.should_run_npm():
print("installing build dependencies with npm")
check_call(['npm', 'install'], cwd=here)
os.utime(self.node_modules)
env = os.environ.copy()
env['PATH'] = npm_path
try:
check_call(
['bower', 'install', '--allow-root', '--config.interactive=false'],
cwd=here,
env=env,
)
except OSError as e:
print("Failed to run bower: %s" % e, file=sys.stderr)
print("You can install js dependencies with `npm install`", file=sys.stderr)
raise
os.utime(self.bower_dir)
# update data-files in case this created new files
self.distribution.data_files = get_data_files()
class CSS(BaseCommand):
description = "compile CSS from LESS"
def should_run(self):
"""Does less need to run?"""
# from IPython.html.tasks.py
css_targets = [pjoin(static, 'css', 'style.min.css')]
css_maps = [t + '.map' for t in css_targets]
targets = css_targets + css_maps
if not all(os.path.exists(t) for t in targets):
# some generated files don't exist
return True
earliest_target = sorted(mtime(t) for t in targets)[0]
# check if any .less files are newer than the generated targets
for (dirpath, dirnames, filenames) in os.walk(static):
for f in filenames:
if f.endswith('.less'):
path = pjoin(static, dirpath, f)
timestamp = mtime(path)
if timestamp > earliest_target:
return True
return False
def run(self):
if not self.should_run():
print("CSS up-to-date")
return
self.run_command('js')
style_less = pjoin(static, 'less', 'style.less')
style_css = pjoin(static, 'css', 'style.min.css')
sourcemap = style_css + '.map'
env = os.environ.copy()
env['PATH'] = npm_path
try:
check_call([
'lessc', '--clean-css',
'--source-map-basepath={}'.format(static),
'--source-map={}'.format(sourcemap),
'--source-map-rootpath=../',
style_less, style_css,
], cwd=here, env=env)
except OSError as e:
print("Failed to run lessc: %s" % e, file=sys.stderr)
print("You can install js dependencies with `npm install`", file=sys.stderr)
raise
# update data-files in case this created new files
self.distribution.data_files = get_data_files()
def get_data_files():
"""Get data files in share/jupyter"""
data_files = []
ntrim = len(here) + 1
for (d, dirs, filenames) in os.walk(static):
data_files.append((
d[ntrim:],
[ pjoin(d, f) for f in filenames ]
))
return data_files
setup_args = dict(
name = 'everware',
packages = ['everware'],
scripts = glob(pjoin('scripts', '*')),
version = '0.0.0',
description = """Everware""",
long_description = "",
author = "",
author_email = "",
url = "",
license = "BSD",
platforms = "Linux, Mac OS X",
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
setup_args['cmdclass'] = {'js': Bower, 'css': CSS}
# setuptools requirements
if 'setuptools' in sys.modules:
setup_args['install_requires'] = install_requires = []
with open('requirements.txt') as f:
for line in f.readlines():
req = line.strip()
if not req or req.startswith(('-e', '#')):
continue
install_requires.append(req)
def main():
setup(**setup_args)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
//go:build windows
package libnetwork
import "fmt"
// DriverInfo returns a collection of driver operational data related to this endpoint retrieved from the driver.
func (ep *Endpoint) DriverInfo() (map[string]any, error) {
ep, err := ep.retrieveFromStore()
if err != nil {
return nil, err
}
var gwDriverInfo map[string]any
if sb, ok := ep.getSandbox(); ok {
if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() {
gwDriverInfo, err = gwep.DriverInfo()
if err != nil {
return nil, err
}
}
}
n, err := ep.getNetworkFromStore()
if err != nil {
return nil, fmt.Errorf("could not find network in store for driver info: %v", err)
}
driver, err := n.driver(true)
if err != nil {
return nil, fmt.Errorf("failed to get driver info: %v", err)
}
epInfo, err := driver.EndpointOperInfo(n.ID(), ep.ID())
if err != nil {
return nil, err
}
if epInfo != nil {
epInfo["GW_INFO"] = gwDriverInfo
return epInfo, nil
}
return gwDriverInfo, nil
} | go | github | https://github.com/moby/moby | daemon/libnetwork/endpoint_info_windows.go |
# -*- coding: utf-8 -*-
# Needed to allow import
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==============
Tk integration
==============
Simple components providing a basic way to integrate Tk windows as Kamaelia
components.
""" | unknown | codeparrot/codeparrot-clean | ||
"""Test script for unittest.
By Collin Winter <collinw at gmail.com>
Still need testing:
TestCase.{assert,fail}* methods (some are tested implicitly)
"""
from StringIO import StringIO
import __builtin__
import os
import re
import sys
import unittest
from unittest import TestCase, TestProgram
import types
from copy import deepcopy
from cStringIO import StringIO
import pickle
### Support code
################################################################
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super(LoggingResult, self).__init__()
def startTest(self, test):
self._events.append('startTest')
super(LoggingResult, self).startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super(LoggingResult, self).stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super(LoggingResult, self).addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super(LoggingResult, self).addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception, e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception, e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
# List subclass we can add attributes to.
class MyClassSuite(list):
def __init__(self, tests):
super(MyClassSuite, self).__init__(tests)
################################################################
### /Support code
class Test_TestLoader(TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertTrue(isinstance(suite, loader.suiteClass))
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertEquals(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEquals(load_tests_args, [])
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError, e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
# Why pick audioop? Google shows it isn't used very often, so there's
# a good chance that it won't be imported when this test is run
module_name = 'audioop'
import sys
if module_name in sys.modules:
del sys.modules[module_name]
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [])
# audioop should now be loaded, thanks to loadTestsFromName()
self.assertTrue(module_name in sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertTrue(isinstance(suite, loader.suiteClass))
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
# Why pick audioop? Google shows it isn't used very often, so there's
# a good chance that it won't be imported when this test is run
module_name = 'audioop'
import sys
if module_name in sys.modules:
del sys.modules[module_name]
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertTrue(isinstance(suite, loader.suiteClass))
self.assertEqual(list(suite), [unittest.TestSuite()])
# audioop should now be loaded, thanks to loadTestsFromName()
self.assertTrue(module_name in sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
################################################################
### /Tests for TestLoader.suiteClass
### Support code for Test_TestSuite
################################################################
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Foo(n) for n in names)
################################################################
### /Support code for Test_TestSuite
class Test_TestSuite(TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
class Test_FunctionTestCase(TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertTrue(isinstance(test.id(), basestring))
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
class Test_TestResult(TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
import sys
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertTrue(isinstance(formatted_exc, str))
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
import sys
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertTrue(isinstance(formatted_exc, str))
### Support code for Test_TestCase
################################################################
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
if self.__class__ is LoggingTestCase:
# evade test discovery
raise unittest.SkipTest
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
################################################################
### /Support code for Test_TestCase
class Test_TestCase(TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Foo('test1'), Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Foo('test1'), Foo('runTest'))
,(Foo('test1'), Bar('test1'))
,(Foo('test1'), Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addError',
'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addFailure',
'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertTrue(isinstance(Foo().id(), basestring))
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertEqual(
self.shortDescription(),
'testShortDescriptionWithoutDocstring (' + __name__ +
'.Test_TestCase)')
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
('testShortDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestCase)\n'
'Tests shortDescription() for a method with a docstring.'))
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
('testShortDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestCase)\n'
'Tests shortDescription() for a method with a longer '
'docstring.'))
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self.assertRaises(unittest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 2}, {'a': 1},
'.*Mismatched values:.*')
self.assertRaises(unittest.TestCase.failureException,
self.assertDictContainsSubset, {'c': 1}, {'a': 1},
'.*Missing:.*')
self.assertRaises(unittest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*')
self.assertRaises(unittest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*Mismatched values:.*')
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
self.assertSameElements([1, 2, 3], [3, 2, 1])
self.assertSameElements([1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertSameElements(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertRaises(self.failureException, self.assertSameElements,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertSameElements,
[10, 11], [10])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertSameElements([[1, 2], [3, 4]], [[3, 4], [1, 2]])
self.assertSameElements([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
self.assertRaises(self.failureException, self.assertSameElements,
[[1]], [[2]])
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
u'bug')
self.assertRaises(self.failureException, self.assertLess, u'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant',
u'bug')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', 'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
for type_changer in (lambda x: x, lambda x: x.decode('utf8')):
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException, e:
# no fair testing ourself with ourself, use assertEqual..
self.assertEqual(sample_text_error, str(e).encode('utf8'))
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegexpMatches(self):
self.assertRegexpMatches('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegexpMatches,
'saaas', r'aaaa')
def testAssertRaisesRegexp(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesRegexp(self):
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, 'x',
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, '^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, u'^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception,
re.compile('^Expected$'), Stub)
# def testAssertRaisesExcValue(self):
# class ExceptionMock(Exception):
# pass
# def Stub(foo):
# raise ExceptionMock(foo)
# v = "particular value"
# ctx = self.assertRaises(ExceptionMock)
# with ctx:
# Stub(v)
# e = ctx.exc_value
# self.assertTrue(isinstance(e, ExceptionMock))
# self.assertEqual(e.args[0], v)
def testSynonymAssertMethodNames(self):
"""Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.
"""
self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
def testPendingDeprecationMethodNames(self):
"""Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.
"""
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, lambda _: 3.14 + u'spam')
self.failIf(False)
# not sure why this is broken, don't care
# def testDeepcopy(self):
# # Issue: 5660
# class TestableTest(TestCase):
# def testNothing(self):
# pass
# test = TestableTest('testNothing')
# # This shouldn't blow up
# deepcopy(test)
class Test_TestSkipping(TestCase):
def test_skipping(self):
class Foo(unittest.TestCase):
def test_skip_me(self):
self.skipTest("skip")
events = []
result = LoggingResult(events)
test = Foo("test_skip_me")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")])
# Try letting setUp skip the test now.
class Foo(unittest.TestCase):
def setUp(self):
self.skipTest("testing")
def test_nothing(self): pass
events = []
result = LoggingResult(events)
test = Foo("test_nothing")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(result.testsRun, 1)
def test_skipping_decorators(self):
op_table = ((unittest.skipUnless, False, True),
(unittest.skipIf, True, False))
for deco, do_skip, dont_skip in op_table:
class Foo(unittest.TestCase):
@deco(do_skip, "testing")
def test_skip(self): pass
@deco(dont_skip, "testing")
def test_dont_skip(self): pass
test_do_skip = Foo("test_skip")
test_dont_skip = Foo("test_dont_skip")
suite = unittest.TestSuite([test_do_skip, test_dont_skip])
events = []
result = LoggingResult(events)
suite.run(result)
self.assertEqual(len(result.skipped), 1)
expected = ['startTest', 'addSkip', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
self.assertEqual(events, expected)
self.assertEqual(result.testsRun, 2)
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
self.assertTrue(result.wasSuccessful())
def test_skip_class(self):
class Foo(unittest.TestCase):
def test_1(self):
record.append(1)
Foo = unittest.skip("testing")(Foo)
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_expected_failure(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
self.fail("help me!")
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_unexpected_success(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
pass
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addUnexpectedSuccess', 'stopTest'])
self.assertFalse(result.failures)
self.assertEqual(result.unexpectedSuccesses, [test])
self.assertTrue(result.wasSuccessful())
class Test_Assertions(TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException, e:
self.assert_("KeyError not raised" in e, str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
# with self.assertRaises(KeyError):
# raise KeyError
# with self.assertRaises(KeyError):
# raise KeyError("key")
# try:
# with self.assertRaises(KeyError):
# pass
# except self.failureException as e:
# self.assert_("KeyError not raised" in e, str(e))
# else:
# self.fail("assertRaises() didn't fail")
# try:
# with self.assertRaises(KeyError):
# raise ValueError
# except ValueError:
# pass
# else:
# self.fail("assertRaises() didn't let exception pass through")
class TestLongMessage(TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertFalse(TestCase.longMessage)
def test_formatMsg(self):
self.assertEquals(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEquals(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEquals(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEquals(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
def assertMessages(self, methodName, args, errors):
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regexp in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
self.assertRaisesRegexp(self.failureException, expected_regexp,
lambda: testMethod(*args, **kwargs))
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not True$", "^oops$", "^False is not True$",
"^False is not True : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not False$", "^oops$", "^True is not False$",
"^True is not False : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertSameElements(self):
self.assertMessages('assertSameElements', ([], [None]),
[r"\[None\]$", "^oops$",
r"\[None\]$",
r"\[None\] : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
class TestCleanUp(TestCase):
def testCleanUp(self):
class TestableTest(TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
result = test.doCleanups()
self.assertTrue(result)
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(TestCase):
def testNothing(self):
pass
class MockResult(object):
errors = []
def addError(self, test, exc_info):
self.errors.append((test, exc_info))
result = MockResult()
test = TestableTest('testNothing')
test._resultForDoCleanups = result
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
(test1, (Type1, instance1, _)), (test2, (Type2, instance2, _)) = reversed(MockResult.errors)
self.assertEqual((test1, Type1, instance1), (test, Exception, exc1))
self.assertEqual((test2, Type2, instance2), (test, Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
class Test_TestProgram(TestCase):
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = TestProgram.parseArgs
def restoreParseArgs():
TestProgram.parseArgs = oldParseArgs
TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del TestProgram.test
TestProgram.test = test
self.addCleanup(removeTest)
program = TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
class Test_TextTestRunner(TestCase):
"""Tests for TextTestRunner."""
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
import StringIO
# cStringIO objects are not pickleable, but StringIO objects are.
stream = StringIO.StringIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol=protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
class TestDiscovery(TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
self.assertRaises(AssertionError,
loader._get_name_from_path, '/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
loader._top_level_dir = '/foo'
suite = list(loader._find_tests('/foo', 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
self.assertRaises(ImportError,
loader.discover, '/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
_find_tests_args = []
def test():
pass
tests = [test]
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return [tests]
loader._find_tests = _find_tests
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath(os.path.normpath('/foo/bar'))
start_dir = os.path.abspath(os.path.normpath('/foo/bar/baz'))
self.assertEqual(list(suite), tests)
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0] # extract test from suite
self.assertRaises(ImportError, test.test_this_does_not_exist)
def test_command_line_handling_parseArgs(self):
# Haha - take that uninstantiable class
program = object.__new__(TestProgram)
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = object.__new__(TestProgram)
program.usageExit = usageExit
# too many args
self.assertRaises(
Stop,
lambda: program._do_discovery(['one', 'two', 'three', 'four']))
def test_command_line_handling_do_discovery_calls_loader(self):
program = object.__new__(TestProgram)
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
Loader.args = []
program = object.__new__(TestProgram)
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
######################################################################
## Main
######################################################################
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The result of the {@link Admin#listTransactions()} call.
* <p>
*/
public class ListTransactionsResult {
private final KafkaFuture<Map<Integer, KafkaFutureImpl<Collection<TransactionListing>>>> future;
ListTransactionsResult(KafkaFuture<Map<Integer, KafkaFutureImpl<Collection<TransactionListing>>>> future) {
this.future = future;
}
/**
* Get all transaction listings. If any of the underlying requests fail, then the future
* returned from this method will also fail with the first encountered error.
*
* @return A future containing the collection of transaction listings. The future completes
* when all transaction listings are available and fails after any non-retriable error.
*/
public KafkaFuture<Collection<TransactionListing>> all() {
return allByBrokerId().thenApply(map -> {
List<TransactionListing> allListings = new ArrayList<>();
for (Collection<TransactionListing> listings : map.values()) {
allListings.addAll(listings);
}
return allListings;
});
}
/**
* Get a future which returns a map containing the underlying listing future for each broker
* in the cluster. This is useful, for example, if a partial listing of transactions is
* sufficient, or if you want more granular error details.
*
* @return A future containing a map of futures by broker which complete individually when
* their respective transaction listings are available. The top-level future returned
* from this method may fail if the admin client is unable to lookup the available
* brokers in the cluster.
*/
public KafkaFuture<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> byBrokerId() {
KafkaFutureImpl<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> result = new KafkaFutureImpl<>();
future.whenComplete((brokerFutures, exception) -> {
if (brokerFutures != null) {
Map<Integer, KafkaFuture<Collection<TransactionListing>>> brokerFuturesCopy =
new HashMap<>(brokerFutures.size());
brokerFuturesCopy.putAll(brokerFutures);
result.complete(brokerFuturesCopy);
} else {
result.completeExceptionally(exception);
}
});
return result;
}
/**
* Get all transaction listings in a map which is keyed by the ID of respective broker
* that is currently managing them. If any of the underlying requests fail, then the future
* returned from this method will also fail with the first encountered error.
*
* @return A future containing a map from the broker ID to the transactions hosted by that
* broker respectively. This future completes when all transaction listings are
* available and fails after any non-retriable error.
*/
public KafkaFuture<Map<Integer, Collection<TransactionListing>>> allByBrokerId() {
KafkaFutureImpl<Map<Integer, Collection<TransactionListing>>> allFuture = new KafkaFutureImpl<>();
Map<Integer, Collection<TransactionListing>> allListingsMap = new HashMap<>();
future.whenComplete((map, topLevelException) -> {
if (topLevelException != null) {
allFuture.completeExceptionally(topLevelException);
return;
}
Set<Integer> remainingResponses = new HashSet<>(map.keySet());
map.forEach((brokerId, future) ->
future.whenComplete((listings, brokerException) -> {
if (brokerException != null) {
allFuture.completeExceptionally(brokerException);
} else if (!allFuture.isDone()) {
allListingsMap.put(brokerId, listings);
remainingResponses.remove(brokerId);
if (remainingResponses.isEmpty()) {
allFuture.complete(allListingsMap);
}
}
})
);
});
return allFuture;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
def _determine_migration_type(migration):
if migration['old_instance_type_id'] != migration['new_instance_type_id']:
return 'resize'
else:
return 'migration'
# TODO(berrange): Remove NovaObjectDictCompat
class Migration(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added migration_type and hidden
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'source_compute': fields.StringField(nullable=True),
'dest_compute': fields.StringField(nullable=True),
'source_node': fields.StringField(nullable=True),
'dest_node': fields.StringField(nullable=True),
'dest_host': fields.StringField(nullable=True),
'old_instance_type_id': fields.IntegerField(nullable=True),
'new_instance_type_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'migration_type': fields.EnumField(['migration', 'resize',
'live-migration', 'evacuate'],
nullable=False),
'hidden': fields.BooleanField(nullable=False, default=False),
}
@staticmethod
def _from_db_object(context, migration, db_migration):
for key in migration.fields:
value = db_migration[key]
if key == 'migration_type' and value is None:
value = _determine_migration_type(db_migration)
migration[key] = value
migration._context = context
migration.obj_reset_changes()
return migration
def obj_make_compatible(self, primitive, target_version):
super(Migration, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'migration_type' in primitive:
del primitive['migration_type']
del primitive['hidden']
def obj_load_attr(self, attrname):
if attrname == 'migration_type':
# NOTE(danms): The only reason we'd need to load this is if
# some older node sent us one. So, guess the type.
self.migration_type = _determine_migration_type(self)
elif attrname == 'hidden':
self.hidden = False
else:
super(Migration, self).obj_load_attr(attrname)
@base.remotable_classmethod
def get_by_id(cls, context, migration_id):
db_migration = db.migration_get(context, migration_id)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable_classmethod
def get_by_instance_and_status(cls, context, instance_uuid, status):
db_migration = db.migration_get_by_instance_and_status(
context, instance_uuid, status)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_migration = db.migration_create(self._context, updates)
self._from_db_object(self._context, self, db_migration)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
db_migration = db.migration_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_migration)
self.obj_reset_changes()
@property
def instance(self):
return objects.Instance.get_by_uuid(self._context, self.instance_uuid)
class MigrationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Migration <= 1.1
# Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute
# Version 1.2: Migration version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Migration'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): Migration was at 1.1 before we added this
'1.1': '1.1',
'1.2': '1.2',
}
@base.remotable_classmethod
def get_unconfirmed_by_dest_compute(cls, context, confirm_window,
dest_compute, use_slave=False):
db_migrations = db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute, use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_in_progress_by_host_and_node(cls, context, host, node):
db_migrations = db.migration_get_in_progress_by_host_and_node(
context, host, node)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_by_filters(cls, context, filters):
db_migrations = db.migration_get_all_by_filters(context, filters)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.JmxTool "$@" | unknown | github | https://github.com/apache/kafka | bin/kafka-jmx.sh |
{
"gnirps":[
"boot",
"framework"
]
} | json | github | https://github.com/spring-projects/spring-boot | core/spring-boot-test/src/test/resources/org/springframework/boot/test/json/different.json |
#!/usr/bin/env python
#
# Tests for dakota_utils.models.hydrotrend.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.models.hydrotrend import HydroTrend
def setup_module():
print('HydroTrend tests:')
os.environ['_test_hydrotrend_dir'] = tempfile.mkdtemp()
os.chdir(os.environ['_test_hydrotrend_dir'])
global h
h = HydroTrend()
def teardown_module():
shutil.rmtree(os.environ['_test_hydrotrend_dir'])
def test_HydroTrend_no_arguments():
'''
Tests whether no arguments creates input and output directories.
'''
assert_true(os.path.exists(h.input_dir))
assert_true(os.path.exists(h.output_dir))
def test_HydroTrend_set_input_dir():
'''
Tests setting the input directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
input_dir = '__hydro_in'
h = HydroTrend(input_dir)
assert_equal(h.input_dir, input_dir)
def test_HydroTrend_get_input_dir():
'''
Tests getting the input directory.
'''
input_dir = 'HYDRO_IN' # the default
assert_equal(os.path.basename(h.input_dir), input_dir)
def test_HydroTrend_set_output_dir():
'''
Tests setting the output directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
output_dir = '__hydro_out'
h = HydroTrend(None, output_dir)
assert_equal(h.output_dir, output_dir)
def test_HydroTrend_get_output_dir():
'''
Tests getting the output directory.
'''
output_dir = 'HYDRO_OUTPUT' # the default
assert_equal(os.path.basename(h.output_dir), output_dir)
def test_HydroTrend_get_input_file():
'''
Tests getting the input file name.
'''
input_file = 'HYDRO.IN' # the default
assert_equal(h.input_file, input_file)
def test_HydroTrend_set_input_file():
'''
Tests setting the input file name.
'''
input_file = '__hydro.in'
h.input_file = input_file
assert_equal(h.input_file, input_file)
def test_HydroTrend_get_input_template():
'''
Tests getting the input template name.
'''
input_template = 'HYDRO.IN.template' # the default
assert_equal(h.input_template, input_template)
def test_HydroTrend_set_input_template():
'''
Tests setting the input template name.
'''
input_template = '__hydro.in.template'
h.input_template = input_template
assert_equal(h.input_template, input_template)
def test_HydroTrend_get_hypsometry_file():
'''
Tests getting the hypsometry file name.
'''
hypsometry_file = 'HYDRO0.HYPS' # the default
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_set_hypsometry_file():
'''
Tests setting the hypsometry file name.
'''
hypsometry_file = '__hydro0.hyps'
h.hypsometry_file = hypsometry_file
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_get_output_files():
'''
Tests getting the tuple of output file names.
'''
output_files = ('HYDROASCII.QS') # the default
assert_equal(h.output_files, output_files)
def test_HydroTrend_set_output_files():
'''
Tests setting the tuple of output file names.
'''
output_files = ('foo', 'bar', 'baz')
h.output_files = output_files
assert_equal(h.output_files, output_files)
def test_get_response_statistic():
'''
Tests getting the current response_statistic.
'''
rstat = 'mean' # the default
assert_equal(h.response_statistic, rstat)
def test_set_response_statistic():
'''
Tests setting the response_statistic.
'''
rstat = 'sum'
h.response_statistic = rstat
assert_equal(h.response_statistic, rstat)
@raises(TypeError)
def test_load_zero_arguments():
'''
Tests load() when no argument is passed.
'''
r = h.load()
def test_load_does_not_exist():
'''
Tests load() when a nonexistent output file is defined.
'''
r = h.load('vfnqeubnuen.f')
assert_is_none(r) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
#
# Copyright (c) 2007 Eric Wong
#
test_description='git svn useSvnsyncProps test'
. ./lib-git-svn.sh
test_expect_success 'load svnsync repo' '
svnadmin load -q "$rawsvnrepo" < "$TEST_DIRECTORY"/t9111/svnsync.dump &&
git svn init --minimize-url -R arr -i bar "$svnrepo"/bar &&
git svn init --minimize-url -R argh -i dir "$svnrepo"/dir &&
git svn init --minimize-url -R argh -i e "$svnrepo"/dir/a/b/c/d/e &&
git config svn.useSvnsyncProps true &&
git svn fetch --all
'
uuid=161ce429-a9dd-4828-af4a-52023f968c89
bar_url=http://mayonaise/svnrepo/bar
test_expect_success 'verify metadata for /bar' "
git cat-file commit refs/remotes/bar >actual &&
grep '^git-svn-id: $bar_url@12 $uuid$' actual &&
git cat-file commit refs/remotes/bar~1 >actual &&
grep '^git-svn-id: $bar_url@11 $uuid$' actual &&
git cat-file commit refs/remotes/bar~2 >actual &&
grep '^git-svn-id: $bar_url@10 $uuid$' actual &&
git cat-file commit refs/remotes/bar~3 >actual &&
grep '^git-svn-id: $bar_url@9 $uuid$' actual &&
git cat-file commit refs/remotes/bar~4 >actual &&
grep '^git-svn-id: $bar_url@6 $uuid$' actual &&
git cat-file commit refs/remotes/bar~5 >actual &&
grep '^git-svn-id: $bar_url@1 $uuid$' actual
"
e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
git cat-file commit refs/remotes/e >actual &&
grep '^git-svn-id: $e_url@1 $uuid$' actual
"
dir_url=http://mayonaise/svnrepo/dir
test_expect_success 'verify metadata for /dir' "
git cat-file commit refs/remotes/dir >actual &&
grep '^git-svn-id: $dir_url@2 $uuid$' actual &&
git cat-file commit refs/remotes/dir~1 >actual &&
grep '^git-svn-id: $dir_url@1 $uuid$' actual
"
test_done | unknown | github | https://github.com/git/git | t/t9111-git-svn-use-svnsync-props.sh |
#! /usr/bin/env python3
#----------------------------------------------------------------------------
# Name: CreateTideData.py
# Purpose: Wrapper for XTide to generate fbm boundary data
# Author: J Park
#----------------------------------------------------------------------------
#
import os
import argparse
#----------------------------------------------------------------------------
# Main module
#----------------------------------------------------------------------------
def main():
'''See XTide http://www.flaterco.com/xtide/
http://manpages.ubuntu.com/manpages/trusty/man1/xtide.1.html
sudo apt-get install xtide
From the command line: tide ...
-l harmonic constants name
-b begin: "1990-01-01 00:00"
-e end: "1990-01-03 00:00"
-mm output times as: 2002-02-06 4:56 PM EST
-mr output time in Unix seconds instead of AM/PM.
-fc .cvs output
-um units meters
-s output interval for -mm or -mr modes as HH:MM
-o output filename (append mode)
'''
args = ParseCmdLine()
for station in args.stationNames :
#if True:
# station = args.stationNames
outFile = args.outputDirectory +\
station[ : station.find( ',' ) ].replace( ' ', '_' ) +\
'_' + args.begin[ : args.begin.find( ' ' ) ] +\
'_' + args.end [ : args.end.find ( ' ' ) ] + '.csv'
if args.DEBUG_ALL :
print( outFile )
command_line = 'tide -l "' + station + '" -b "' + args.begin +\
'" -e "' + args.end + '" -mm -fc -um -s ' +\
args.interval + ' -o ' + outFile.replace( '.csv', '.tmp' )
if args.DEBUG :
print( command_line )
# subprocess.call can't take a string command... wtf?
os.system( command_line )
# Remove the first column and round the water levels
fd = open( outFile.replace( '.csv', '.tmp' ), 'r' )
lines = fd.readlines()
fd.close()
# Delete the temporary file
os.system( 'rm ' + outFile.replace( '.csv', '.tmp' ) )
# Find the mean
mean = 0
if args.removeMean :
for line in lines :
words = line.split( ',' )
mean += float( words[3] )
mean = mean / len( lines )
print( 'Mean: ', mean )
buff = []
for line in lines :
words = line.split( ',' )
DateTime = words[1] + ' ' + words[2]
data = str( round( float( words[3] ) - mean, 3 ) )
buff.append( DateTime + ', ' + data + '\n' )
fd = open( outFile, 'w' )
if args.removeMean :
fd.write( 'Time, WL.(m).demeaned\n' )
else :
fd.write( 'Time, WL.(m)\n' )
for line in buff:
fd.write( line )
fd.close()
#--------------------------------------------------------------
#
#--------------------------------------------------------------
def ParseCmdLine():
StationNames = [
#'Flamingo, Florida Bay, Florida',
'Cape Sable, East Cape, Florida',
'Long Key, western end, Florida',
'Lignumvitae Key, NE side, Florida Bay, Florida',
'Snake Creek, Hwy. 1 bridge, Windley Key, Florida',
'Tavernier Creek, Hwy. 1 bridge, Hawk Channel, Florida',
#'Point Charles, Key Largo, Florida',
'Garden Cove, Key Largo, Florida',
'Little Card Sound bridge, Florida' ] #,
#'Main Key, Barnes Sound, Florida',
#'Manatee Creek, Manatee Bay, Barnes Sound, Florida',
#'Shell Key, northwest side, Lignumvitae Basin, Florida',
#'Yacht Harbor, Cowpens Anchorage, Plantation Key, Florida',
#'East Key, southern end, Florida Bay, Florida',
#'Crane Keys, north side, Florida Bay, Florida' ]
parser = argparse.ArgumentParser( description = 'CreateTideData' )
parser.add_argument('-b', '--begin',
dest = 'begin', type = str,
action = 'store',
default = '1990-01-01 00:00',
help = 'start date time')
parser.add_argument('-e', '--end',
dest = 'end', type = str,
action = 'store',
default = '2021-01-02 00:00',
help = 'End date time')
parser.add_argument('-i', '--interval',
dest = 'interval', type = str,
action = 'store',
default = '01:00',
help = 'Projection interval HH:MM')
parser.add_argument('-od', '--outputDirectory',
dest = 'outputDirectory', type = str,
action = 'store',
default = './',
help = 'Directory to write outputs.')
parser.add_argument('-s', '--stationNames',
dest = 'stationNames', type = str,
action = 'store',
default = StationNames,
help = 'List of Xtide station names.')
parser.add_argument('-rm', '--removeMean',
dest = 'removeMean', # type = bool,
action = 'store_true', default = True )
parser.add_argument('-D', '--DEBUG',
dest = 'DEBUG', # type = bool,
action = 'store_true', default = True )
parser.add_argument('-DA', '--DEBUG_ALL',
dest = 'DEBUG_ALL', # type = bool,
action = 'store_true', default = False )
args = parser.parse_args()
if args.outputDirectory[ -1 ] != '/' :
args.outputDirectory = outputDirectory + '/'
if not os.path.exists( args.outputDirectory ) :
raise Exception( 'Output directory not accessible: ',
args.outputDirectory )
return args
#----------------------------------------------------------------------------
# Provide for cmd line invocation: not executed on import
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
{
"format_version": "1.0",
"provider_schemas": {
"registry.terraform.io/hashicorp/test": {
"provider": {
"version": 0,
"block": {
"attributes": {
"region": {
"description_kind": "plain",
"optional": true,
"type": "string"
}
},
"description_kind": "plain"
}
},
"resource_schemas": {
"test_instance": {
"version": 0,
"block": {
"attributes": {
"ami": {
"type": "string",
"optional": true,
"description_kind": "plain"
},
"id": {
"type": "string",
"optional": true,
"computed": true,
"description_kind": "plain"
},
"volumes": {
"nested_type": {
"nesting_mode": "list",
"attributes": {
"size": {
"type": "string",
"required": true,
"description_kind": "plain"
},
"mount_point": {
"type": "string",
"required": true,
"description_kind": "plain"
}
}
},
"description_kind": "plain",
"optional": true
}
},
"description_kind": "plain"
}
}
}
}
}
} | json | github | https://github.com/hashicorp/terraform | internal/command/testdata/providers-schema/basic/output.json |
from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer, reactor
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_getinfo_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
diff = bitcoin_data.target_to_difficulty(wb.current_work.value['bits'].target)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
network_block_difficulty=diff,
network_hashrate=(diff * 2**32 // node.net.PARENT.BLOCK_PERIOD),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_last_difficulties = {}
for addr in wb.last_work_shares.value:
miner_last_difficulties[addr] = bitcoin_data.target_to_difficulty(wb.last_work_shares.value[addr].target)
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
miner_last_difficulties=miner_last_difficulties,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_getinfo_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
last_txout_nonce='%016x' % share.contents['last_txout_nonce'],
),
other_transaction_hashes=['%064x' % x for x in share.get_other_tx_hashes(node.tracker)],
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
new_root.putChild('my_share_hashes', WebInterface(lambda: ['%064x' % my_share_hash for my_share_hash in wb.my_share_hashes]))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
tx_explorer_url_prefix=node.net.PARENT.TX_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False,
multivalues=True, multivalue_undefined_means_0=True,
default_func=graph.make_multivalue_migrator(dict(good='local_share_hash_rate', dead='local_dead_share_hash_rate', orphan='local_orphan_share_hash_rate'),
post_func=lambda bins: [dict((k, (v[0] - (sum(bin.get(rem_k, (0, 0))[0] for rem_k in ['dead', 'orphan']) if k == 'good' else 0), v[1])) for k, v in bin.iteritems()) for bin in bins])),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'peers': graph.DataStreamDescription(dataview_descriptions, multivalues=True, default_func=graph.make_multivalue_migrator(dict(incoming='incoming_peers', outgoing='outgoing_peers'))),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead, share_hash):
t = time.time()
if not dead:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=work))
else:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=work))
def later():
res = node.tracker.is_child_of(share_hash, node.best_share_var.value)
if res is None: res = False # share isn't connected to sharechain? assume orphaned
if res and dead: # share was DOA, but is now in sharechain
# move from dead to good
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=-work, good=work))
elif not res and not dead: # share wasn't DOA, and isn't in sharechain
# move from good to orphan
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=-work, orphan=work))
reactor.callLater(200, later)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['peers'].add_datum(t, dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'web-static')))
return web_root | unknown | codeparrot/codeparrot-clean | ||
#pylint: disable=C0111
#pylint: disable=W0621
from courseware.mock_youtube_server.mock_youtube_server import MockYoutubeServer
from lettuce import before, after, world
from django.conf import settings
import threading
from logging import getLogger
logger = getLogger(__name__)
@before.all
def setup_mock_youtube_server():
# import ipdb; ipdb.set_trace()
server_host = '127.0.0.1'
server_port = settings.VIDEO_PORT
address = (server_host, server_port)
# Create the mock server instance
server = MockYoutubeServer(address)
logger.debug("Youtube server started at {} port".format(str(server_port)))
server.time_to_response = 1 # seconds
# Start the server running in a separate daemon thread
# Because the thread is a daemon, it will terminate
# when the main thread terminates.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Store the server instance in lettuce's world
# so that other steps can access it
# (and we can shut it down later)
world.youtube_server = server
@after.all
def teardown_mock_youtube_server(total):
# Stop the LTI server and free up the port
world.youtube_server.shutdown() | unknown | codeparrot/codeparrot-clean | ||
"""
example_b.py
by Ted Morin
contains example code for model b from
10.1161/STROKEAHA.113.004506
2014 Intracranial Hemorrhage Among Patients With Atrial Fibrillation Anticoagulated With Warfarin or Rivaroxaban
"""
from model_b import model
# inputs: ['Age', 'Diastolic Blood Pressure', 'Platelets','Albumin','History of Coronary Heart Failure', 'History of Stroke or TIA', 'Asian Ethnicity', 'Black Ethnicity', 'Warfarin', 'Rivaroxaban']
# store patient data (provided by Susanna Stevens)
toscore = [
[66, 74, 322, 3.8, 1, 0, 0, 0, 1],
[85, 82, 205, 4.4, 0, 0, 0, 1, 0],
[65, 86, 176, 4.4, 0, 0, 0, 1, 0],
[69, 80, 346, 4.5, 1, 0, 0, 1, 0],
[81, 70, 200, 3.7, 1, 0, 0, 1, 0]
]
# score data
scores = []
for patient in toscore:
scores.append(model(*patient))
# show data
for i in range(len(scores)):
print "%.3f" % (float(scores[i])*100.)
"""
# correct beta values provided by Susanna Stevens
xbeta = [
-6.45853,
-5.03671,
-5.49558,
-6.56325,
-5.27846
]
# for checking differences against SAS provided values
print scores[0] - scores[1], xbeta[0] - xbeta[1]
print scores[1] - scores[2], xbeta[1] - xbeta[2]
print scores[2] - scores[3], xbeta[2] - xbeta[3]
print scores[3] - scores[4], xbeta[3] - xbeta[4]
print scores[4] - scores[0], xbeta[4] - xbeta[0]
print scores[1] - scores[4], xbeta[1] - xbeta[4]
# calculating xbar_sum
xbar_sum = 0
for i in range(len(xbeta)):
new_val = scores[i] - xbeta[i]
print new_val
xbar_sum += new_val
xbar_sum = xbar_sum/len(xbeta)
print xbar_sum
""" | unknown | codeparrot/codeparrot-clean | ||
"""
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-25 15:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('WorkflowEngine', '0014_processtask_attempt'),
]
operations = [
migrations.RemoveField(
model_name='processstep',
name='progress',
),
] | unknown | codeparrot/codeparrot-clean | ||
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
"""
import logging
import os
import sys
import threading
import time
import cherrypy
from cherrypy._json import json
# ------------------------------- Statistics -------------------------------- #
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def extrapolate_statistics(scope):
"""Return an extrapolated copy of the given scope."""
c = {}
for k, v in scope.copy().items():
if isinstance(v, dict):
v = extrapolate_statistics(v)
elif isinstance(v, (list, tuple)):
v = [extrapolate_statistics(record) for record in v]
elif hasattr(v, '__call__'):
v = v(scope)
c[k] = v
return c
# -------------------- CherryPy Applications Statistics --------------------- #
appstats = logging.statistics.setdefault('CherryPy Applications', {})
appstats.update({
'Enabled': True,
'Bytes Read/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Read'] / float(s['Total Requests'])) or
0.0
),
'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
'Bytes Written/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Written'] / float(s['Total Requests'])) or
0.0
),
'Bytes Written/Second': lambda s: (
s['Total Bytes Written'] / s['Uptime'](s)
),
'Current Time': lambda s: time.time(),
'Current Requests': 0,
'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
'Server Version': cherrypy.__version__,
'Start Time': time.time(),
'Total Bytes Read': 0,
'Total Bytes Written': 0,
'Total Requests': 0,
'Total Time': 0,
'Uptime': lambda s: time.time() - s['Start Time'],
'Requests': {},
})
def proc_time(s):
return time.time() - s['Start Time']
class ByteCountWrapper(object):
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
self.rfile = rfile
self.bytes_read = 0
def read(self, size=-1):
data = self.rfile.read(size)
self.bytes_read += len(data)
return data
def readline(self, size=-1):
data = self.rfile.readline(size)
self.bytes_read += len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
return data
def average_uriset_time(s):
return s['Count'] and (s['Sum'] / s['Count']) or 0
def _get_threading_ident():
if sys.version_info >= (3, 3):
return threading.get_ident()
return threading._get_ident()
class StatsTool(cherrypy.Tool):
"""Record various information about the current request."""
def __init__(self):
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
self.record_start()
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][_get_threading_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
def record_stop(
self, uriset=None, slow_queries=1.0, slow_queries_count=100,
debug=False, **kwargs):
"""Record the end of a request."""
resp = cherrypy.serving.response
w = appstats['Requests'][_get_threading_ident()]
r = cherrypy.request.rfile.bytes_read
w['Bytes Read'] = r
appstats['Total Bytes Read'] += r
if resp.stream:
w['Bytes Written'] = 'chunked'
else:
cl = int(resp.headers.get('Content-Length', 0))
w['Bytes Written'] = cl
appstats['Total Bytes Written'] += cl
w['Response Status'] = \
getattr(resp, 'output_status', resp.status).decode()
w['End Time'] = time.time()
p = w['End Time'] - w['Start Time']
w['Processing Time'] = p
appstats['Total Time'] += p
appstats['Current Requests'] -= 1
if debug:
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
if uriset:
rs = appstats.setdefault('URI Set Tracking', {})
r = rs.setdefault(uriset, {
'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
'Avg': average_uriset_time})
if r['Min'] is None or p < r['Min']:
r['Min'] = p
if r['Max'] is None or p > r['Max']:
r['Max'] = p
r['Count'] += 1
r['Sum'] += p
if slow_queries and p > slow_queries:
sq = appstats.setdefault('Slow Queries', [])
sq.append(w.copy())
if len(sq) > slow_queries_count:
sq.pop(0)
cherrypy.tools.cpstats = StatsTool()
# ---------------------- CherryPy Statistics Reporting ---------------------- #
thisdir = os.path.abspath(os.path.dirname(__file__))
missing = object()
def locale_date(v):
return time.strftime('%c', time.gmtime(v))
def iso_format(v):
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
def pause_resume(ns):
def _pause_resume(enabled):
pause_disabled = ''
resume_disabled = ''
if enabled:
resume_disabled = 'disabled="disabled" '
else:
pause_disabled = 'disabled="disabled" '
return """
<form action="pause" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Pause" %s/>
</form>
<form action="resume" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Resume" %s/>
</form>
""" % (ns, pause_disabled, ns, resume_disabled)
return _pause_resume
class StatsPage(object):
formatting = {
'CherryPy Applications': {
'Enabled': pause_resume('CherryPy Applications'),
'Bytes Read/Request': '%.3f',
'Bytes Read/Second': '%.3f',
'Bytes Written/Request': '%.3f',
'Bytes Written/Second': '%.3f',
'Current Time': iso_format,
'Requests/Second': '%.3f',
'Start Time': iso_format,
'Total Time': '%.3f',
'Uptime': '%.3f',
'Slow Queries': {
'End Time': None,
'Processing Time': '%.3f',
'Start Time': iso_format,
},
'URI Set Tracking': {
'Avg': '%.3f',
'Max': '%.3f',
'Min': '%.3f',
'Sum': '%.3f',
},
'Requests': {
'Bytes Read': '%s',
'Bytes Written': '%s',
'End Time': None,
'Processing Time': '%.3f',
'Start Time': None,
},
},
'CherryPy WSGIServer': {
'Enabled': pause_resume('CherryPy WSGIServer'),
'Connections/second': '%.3f',
'Start time': iso_format,
},
}
@cherrypy.expose
def index(self):
# Transform the raw data into pretty output for HTML
yield """
<html>
<head>
<title>Statistics</title>
<style>
th, td {
padding: 0.25em 0.5em;
border: 1px solid #666699;
}
table {
border-collapse: collapse;
}
table.stats1 {
width: 100%;
}
table.stats1 th {
font-weight: bold;
text-align: right;
background-color: #CCD5DD;
}
table.stats2, h2 {
margin-left: 50px;
}
table.stats2 th {
font-weight: bold;
text-align: center;
background-color: #CCD5DD;
}
</style>
</head>
<body>
"""
for title, scalars, collections in self.get_namespaces():
yield """
<h1>%s</h1>
<table class='stats1'>
<tbody>
""" % title
for i, (key, value) in enumerate(scalars):
colnum = i % 3
if colnum == 0:
yield """
<tr>"""
yield (
"""
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" %
vars()
)
if colnum == 2:
yield """
</tr>"""
if colnum == 0:
yield """
<th></th><td></td>
<th></th><td></td>
</tr>"""
elif colnum == 1:
yield """
<th></th><td></td>
</tr>"""
yield """
</tbody>
</table>"""
for subtitle, headers, subrows in collections:
yield """
<h2>%s</h2>
<table class='stats2'>
<thead>
<tr>""" % subtitle
for key in headers:
yield """
<th>%s</th>""" % key
yield """
</tr>
</thead>
<tbody>"""
for subrow in subrows:
yield """
<tr>"""
for value in subrow:
yield """
<td>%s</td>""" % value
yield """
</tr>"""
yield """
</tbody>
</table>"""
yield """
</body>
</html>
"""
def get_namespaces(self):
"""Yield (title, scalars, collections) for each namespace."""
s = extrapolate_statistics(logging.statistics)
for title, ns in sorted(s.items()):
scalars = []
collections = []
ns_fmt = self.formatting.get(title, {})
for k, v in sorted(ns.items()):
fmt = ns_fmt.get(k, {})
if isinstance(v, dict):
headers, subrows = self.get_dict_collection(v, fmt)
collections.append((k, ['ID'] + headers, subrows))
elif isinstance(v, (list, tuple)):
headers, subrows = self.get_list_collection(v, fmt)
collections.append((k, headers, subrows))
else:
format = ns_fmt.get(k, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v = format(v)
elif format is not missing:
v = format % v
scalars.append((k, v))
yield title, scalars, collections
def get_dict_collection(self, v, formatting):
"""Return ([headers], [rows]) for the given collection."""
# E.g., the 'Requests' dict.
headers = []
vals = v.values()
for record in vals:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for k2, record in sorted(v.items()):
subrow = [k2]
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
def get_list_collection(self, v, formatting):
"""Return ([headers], [subrows]) for the given collection."""
# E.g., the 'Slow Queries' list.
headers = []
for record in v:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for record in v:
subrow = []
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
if json is not None:
@cherrypy.expose
def data(self):
s = extrapolate_statistics(logging.statistics)
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(s, sort_keys=True, indent=4).encode('utf-8')
@cherrypy.expose
def pause(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = False
raise cherrypy.HTTPRedirect('./')
pause.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
@cherrypy.expose
def resume(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = True
raise cherrypy.HTTPRedirect('./')
resume.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']} | unknown | codeparrot/codeparrot-clean | ||
package libnetwork
import (
"context"
"fmt"
"strconv"
"github.com/moby/moby/v2/daemon/internal/otelutil"
"github.com/moby/moby/v2/daemon/libnetwork/drivers/bridge"
"go.opentelemetry.io/otel/baggage"
)
const libnGWNetwork = "docker_gwbridge"
func getPlatformOption() EndpointOption {
return nil
}
func (c *Controller) createGWNetwork() (*Network, error) {
ctx := baggage.ContextWithBaggage(context.TODO(), otelutil.MustNewBaggage(
otelutil.MustNewMemberRaw(otelutil.TriggerKey, "libnetwork.Controller.createGWNetwork"),
))
n, err := c.NewNetwork(ctx, "bridge", libnGWNetwork, "",
NetworkOptionDriverOpts(map[string]string{
bridge.BridgeName: libnGWNetwork,
bridge.EnableICC: strconv.FormatBool(false),
bridge.EnableIPMasquerade: strconv.FormatBool(true),
}),
NetworkOptionEnableIPv4(true),
NetworkOptionEnableIPv6(false),
)
if err != nil {
return nil, fmt.Errorf("error creating external connectivity network: %v", err)
}
return n, err
} | go | github | https://github.com/moby/moby | daemon/libnetwork/default_gateway_linux.go |
import { prettyDOM, render, screen } from "@testing-library/react";
import user from "@testing-library/user-event";
import * as React from "react";
import { Meta, Outlet, createRoutesStub } from "../../../index";
const getDocumentHtmlForElement = (c: HTMLElement) =>
prettyDOM(c.ownerDocument, undefined, { highlight: false });
describe("meta", () => {
it("no meta export renders meta from nearest route meta in the tree", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
meta: ({ data }) => [
{ name: "description", content: data.description },
{ title: data.title },
],
Component() {
return (
<>
<Meta />
<Outlet />
</>
);
},
children: [
{
index: true,
Component() {
return <div>Parent meta here!</div>;
},
},
],
},
]);
let { container } = render(
<RoutesStub
hydrationData={{
loaderData: {
root: {
description: "This is a meta page",
title: "Meta Page",
},
},
}}
/>,
);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<meta
content="This is a meta page"
name="description"
/>
<title>
Meta Page
</title>
</head>
<body>
<div>
<div>
Parent meta here!
</div>
</div>
</body>
</html>"
`);
});
it("empty meta array does not render a tag", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [],
Component() {
return (
<>
<Meta />
<p>No meta here!</p>
</>
);
},
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head />
<body>
<div>
<p>
No meta here!
</p>
</div>
</body>
</html>"
`);
});
it("meta from `matches` renders meta tags", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
meta: () => [{ charSet: "utf-8" }],
Component() {
return (
<>
<Meta />
<Outlet />
</>
);
},
children: [
{
index: true,
meta({ matches }) {
let rootModule = matches.find((match) => match.id === "root");
// @ts-expect-error
let rootCharSet = rootModule?.meta.find((meta) => meta.charSet);
return [rootCharSet, { title: "Child title" }];
},
Component() {
return <p>Matches Meta</p>;
},
},
],
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<meta
charset="utf-8"
/>
<title>
Child title
</title>
</head>
<body>
<div>
<p>
Matches Meta
</p>
</div>
</body>
</html>"
`);
});
it("{ charSet } adds a <meta charset='utf-8' />", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [{ charSet: "utf-8" }],
Component: Meta,
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<meta
charset="utf-8"
/>
</head>
<body>
<div />
</body>
</html>"
`);
});
it("{ title } adds a <title />", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [{ title: "Document Title" }],
Component: Meta,
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<title>
Document Title
</title>
</head>
<body>
<div />
</body>
</html>"
`);
});
it("{ property: 'og:*', content: '*' } adds a <meta property='og:*' />", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [
{ property: "og:image", content: "https://picsum.photos/200/200" },
{ property: "og:type", content: undefined },
],
Component: Meta,
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<meta
content="https://picsum.photos/200/200"
property="og:image"
/>
<meta
property="og:type"
/>
</head>
<body>
<div />
</body>
</html>"
`);
});
it("{ 'script:ld+json': {} } adds a <script type='application/ld+json' />", () => {
let jsonLd = {
"@context": "http://schema.org",
"@type": "Person",
name: "Sonny Day",
address: {
"@type": "PostalAddress",
streetAddress: "123 Sunset Cliffs Blvd",
addressLocality: "San Diego",
addressRegion: "CA",
postalCode: "92107",
},
email: ["sonnyday@fancymail.com", "surfergal@veryprofessional.org"],
bio: "A <b>surfer</b> & <em>coder</em>.",
};
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [
{
"script:ld+json": jsonLd,
},
],
Component: Meta,
},
]);
let { container } = render(<RoutesStub />);
// For some reason, prettyDOM strips the script tag (maybe because of
// dangerouslySetInnerHTML), so we just parse the HTML out into JSON and assert that way
let scriptTagContents =
container.querySelector('script[type="application/ld+json"]')
?.innerHTML || "{}";
expect(JSON.parse(scriptTagContents)).toEqual(jsonLd);
expect(scriptTagContents).toContain(
"A \\u003cb\\u003esurfer\\u003c/b\\u003e \\u0026 \\u003cem\\u003ecoder\\u003c/em\\u003e.",
);
});
it("{ tagName: 'link' } adds a <link />", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: () => [
{
tagName: "link",
rel: "canonical",
href: "https://website.com/authors/1",
},
],
Component: Meta,
},
]);
let { container } = render(<RoutesStub />);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<link
href="https://website.com/authors/1"
rel="canonical"
/>
</head>
<body>
<div />
</body>
</html>"
`);
});
it("does not mutate meta when using tagName", async () => {
let RoutesStub = createRoutesStub([
{
path: "/",
meta: ({ data }) => data?.meta,
loader: () => ({
meta: [
{
tagName: "link",
rel: "canonical",
href: "https://website.com/authors/1",
},
],
}),
HydrateFallback: () => null,
Component() {
let [count, setCount] = React.useState(0);
return (
<>
<button onClick={() => setCount(count + 1)}>
{`Increment ${count}`}
</button>
<Meta key={count} />
</>
);
},
},
]);
let { container } = render(<RoutesStub />);
await screen.findByText("Increment 0");
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<link
href="https://website.com/authors/1"
rel="canonical"
/>
</head>
<body>
<div>
<button>
Increment 0
</button>
</div>
</body>
</html>"
`);
user.click(screen.getByRole("button"));
await screen.findByText("Increment 1");
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<link
href="https://website.com/authors/1"
rel="canonical"
/>
</head>
<body>
<div>
<button>
Increment 1
</button>
</div>
</body>
</html>"
`);
});
it("loader errors are passed to meta", () => {
let RoutesStub = createRoutesStub([
{
path: "/",
Component() {
return (
<>
<Meta />
<Outlet />
</>
);
},
children: [
{
id: "index",
index: true,
meta: ({ error }) => [
{
title: (error as Error)?.message || "Home",
},
],
Component() {
return <h1>Page</h1>;
},
ErrorBoundary() {
return <h1>Boundary</h1>;
},
},
],
},
]);
let { container } = render(
<RoutesStub hydrationData={{ errors: { index: new Error("Oh no!") } }} />,
);
expect(getDocumentHtmlForElement(container)).toMatchInlineSnapshot(`
"<html>
<head>
<title>
Oh no!
</title>
</head>
<body>
<div>
<h1>
Boundary
</h1>
</div>
</body>
</html>"
`);
});
}); | typescript | github | https://github.com/remix-run/react-router | packages/react-router/__tests__/dom/ssr/meta-test.tsx |
#!/usr/bin/env python3
import leveldb
import msgpack
import csv
from util.misc import Benchmark, open_file
REQUIRED_KEYS = {'title', 'paper_id', 'date'}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Creates a LevelDB of TSV metadata in infile")
parser.add_argument('infile')
parser.add_argument('leveldb_path')
parser.add_argument('--benchmark-freq', default=100000, type=int)
parser.add_argument('--delimiter', '-d', default='\t')
args = parser.parse_args()
db = leveldb.LevelDB(args.leveldb_path,
write_buffer_size=100 << 20, # 100MB
block_cache_size=400 << 20) # 400MB
with open_file(args.infile) as ifs:
b = Benchmark(args.benchmark_freq)
reader = csv.DictReader(ifs, delimiter=args.delimiter)
for row in reader:
if not REQUIRED_KEYS.issubset(row.keys()):
print(row)
raise KeyError("Not all required keys present")
db.Put(row["paper_id"].encode(), msgpack.packb(row))
b.increment()
b.print_freq()
print(db.GetStats()) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Templates
'''
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_RECORD
class BibSwordTemplate:
'''
This class contains attributes and methods that allows to display all
information used by the BibSword web user interface. Theses informations
are form, validation or error messages
'''
def __init__(self):
''' No init necessary for this class '''
#---------------------------------------------------------------------------
# BibSword WebSubmit Interface
#---------------------------------------------------------------------------
def tmpl_display_submit_ack(self, remote_id, link):
'''
This method generate the html code that displays the acknoledgement
message after the submission of a record.
@param remote_id: id of the record given by arXiv
@param link: links to modify or consult submission
@return: string containing the html code
'''
html = ''
html += '''<h1>Success !</h1>'''
html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \
'''You will get an email once it will be accepted by ''' \
'''arXiv moderator.</p>'''
html += '''<p>The arXiv id of the submission is: <b>%s</b></p>''' % \
remote_id
html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>'''
return html
#---------------------------------------------------------------------------
# BibSword Administrator Interface
#---------------------------------------------------------------------------
def tmpl_display_admin_page(self, submissions, first_row, last_row,
total_rows, is_prev, is_last, offset,
error_messages=None):
'''
format the html code that display the submission table
@param submissions: list of all submissions and their status
@return: html code to be displayed
'''
if error_messages == None:
error_messages = []
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
%(error_message)s
<input type="hidden" name="status" value="display_submission"/>
<input type="hidden" name="first_row" value="%(first_row)s"/>
<input type="hidden" name="last_row" value="%(last_row)s"/>
<input type="hidden" name="total_rows" value="%(total_rows)s" />
<input type="submit" name="submit" value="New submission"/><br/>
<br />
<input type="submit" name="submit" value="Refresh all"/><br/>
<br />
Display
<select name="offset">
<option value="5" %(selected_1)s>5</option>
<option value="10" %(selected_2)s>10</option>
<option value="25" %(selected_3)s>25</option>
<option value="50" %(selected_4)s>50</option>
<option value=%(total_rows)s %(selected_5)s>all</option>
</select>
rows per page <input type="submit" name="submit" value="Select" /><br />
<br />
<input type="submit" name="submit" value="First" %(is_prev)s/>
<input type="submit" name="submit" value="Prev" %(is_prev)s/>
Pages %(first_row)s - %(last_row)s / %(total_rows)s
<input type="submit" name="submit" value="Next" %(is_last)s/>
<input type="submit" name="submit" value="Last" %(is_last)s/><br/>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Submission state</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record number</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'first_row' : first_row,
'last_row' : last_row,
'total_rows' : total_rows,
'is_prev' : is_prev,
'is_last' : is_last,
'selected_1' : offset[0],
'selected_2' : offset[1],
'selected_3' : offset[2],
'selected_4' : offset[3],
'selected_5' : offset[4],
'submissions' : self.fill_submission_table(submissions)
}
return body
def tmpl_display_remote_server_info(self, server_info):
'''
Display a table containing all server informations
@param server_info: tuple containing all server infos
@return: html code for the table containing infos
'''
body = '''<table width="%(table_width)s">\n''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">ID</td>\n''' \
''' <td>%(server_id)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Name</td>\n''' \
''' <td>%(server_name)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Host</td>\n''' \
''' <td>%(server_host)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Username</td>\n''' \
''' <td>%(username)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Password</td>\n''' \
''' <td>%(password)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Email</td>\n''' \
''' <td>%(email)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Realm</td>\n''' \
''' <td>%(realm)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \
''' <td>%(url_base_record)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\
''' <td>%(url_servicedocument)s</td>\n''' \
''' </tr>\n ''' \
'''</table>''' % {
'table_width' : '50%',
'server_id' : server_info['server_id'],
'server_name' : server_info['server_name'],
'server_host' : server_info['server_host'],
'username' : server_info['username'],
'password' : server_info['password'],
'email' : server_info['email'],
'realm' : server_info['realm'],
'url_base_record' : server_info['url_base_record'],
'url_servicedocument': server_info['url_servicedocument']
}
return body
def tmpl_display_remote_servers(self, remote_servers, id_record,
error_messages):
'''
format the html code that display a dropdown list containing the
servers
@param self: reference to the current instance of the class
@param remote_servers: list of tuple containing server's infos
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_server"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Forward a record</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Enter the number of the report to submit: </p>
</td>
<td align="left" width="%(row_width)s">
<input type="text" name="id_record" size="20"
value="%(id_record)s"/>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a remote server: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_remote_server" size="1">
<option value="0">-- select a remote server --</option>
%(remote_server)s
</select>
</td>
</tr>
<tr>
<td colspan="2" align="center">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'row_width' : '50%',
'id_record' : id_record,
'remote_server': \
self.fill_dropdown_remote_servers(remote_servers)
}
return body
def tmpl_display_collections(self, selected_server, server_infos,
collections, id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server and a dropdown list containing the server's
collections
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param collections: list contianing server's collections
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_collection"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2></td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">Select a collection: </td>
<td align="left" width="%(row_width)s">
<select name="id_collection" size="1">
<option value="0">-- select a collection --</option>
%(collection)s
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : \
self.display_error_message_row(error_messages),
'id_server' : selected_server['id'],
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload': server_infos['maxUploadSize'],
'collection' : \
self.fill_dropdown_collections(collections),
'id_record' : id_record,
'recid' : recid
}
return body
def tmpl_display_categories(self, selected_server, server_infos,
selected_collection, collection_infos,
primary_categories, secondary_categories,
id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server, the selected collections, the informations
about the collection and a dropdown list containing the server's
primary and secondary categories
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param selected_collection: selected collection
@param collection_infos: tuple containing infos about selected col
@param primary_categories: list of mandated categories for the col
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_primary_category"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Collection</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(collection_name)s</h2>
</td>
<td align="left">
URL: %(collection_url)s
</td>
</tr>
<tr>
<td align="left">
Accepted media types:
<ul>%(collection_accept)s</ul>
</td>
</tr>
<tr>
<td align="left" colspan=2>
<input type="submit" value="Modify collection" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Mandatory category</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a mandated category: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_primary" size="1">
<option value="0">-- select a category --</option>
%(primary_categories)s
</select>
</td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Optional categories</h2>
</td>
</tr>
<td align="right" width="%(row_width)s">
<p>Select optional categories: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_categories" size="10" multiple>
%(secondary_categories)s
</select>
</td>
</tr>
</table>
<p> </p>
<center>
<input type="submit" value="Select" name="submit"/>
</center>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : self.display_error_message_row(
error_messages),
# hidden input
'id_server' : selected_server['id'],
'id_collection' : selected_collection['id'],
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection_name' : selected_collection['label'],
'collection_accept': ''.join([
'''<li>%(name)s </li>''' % {
'name': accept
} for accept in collection_infos['accept'] ]),
'collection_url' : selected_collection['url'],
'primary_categories' : self.fill_dropdown_primary(
primary_categories),
'secondary_categories': self.fill_dropdown_secondary(
secondary_categories)
}
return body
def tmpl_display_metadata(self, user, server, collection, primary,
categories, medias, metadata, id_record, recid,
error_messages):
'''
format a string containing every informations before a submission
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="check_submission"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_primary" value="%(id_primary)s"/>
<input type="hidden" name="id_categories" value="%(id_categories)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Destination</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="3" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
Collection: %(collection_name)s ( %(collection_url)s )
</td>
</tr>
<tr>
<td align="left">
Primary category: %(primary_name)s ( %(primary_url)s )
</td>
</tr>
%(categories)s
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify destination" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa">
<h2>Submitter</h2>
</td>
</tr>
<tr>
<td width="%(row_width)s">Name:</td>
<td><input type="text" name="author_name" size="100"
value="%(user_name)s"/></td>
</tr>
<tr>
<td>Email:</td>
<td><input type="text" name="author_email" size="100"
value="%(user_email)s"/></td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td>
</tr>
<tr><td colspan="4">%(medias)s%(media_help)s</td></tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning:</b> modification(s) will not be saved on the %(CFG_SITE_NAME)s</font>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Report Number<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="id" size="100" value="%(id)s"/></td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Title<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="title" size="100" value="%(title)s"/>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Summary<span style="color:#f00">*</span>:</p></td>
<td>
<textarea name="summary" rows="4" cols="100">%(summary)s
</textarea>
</td>
</tr>
%(contributors)s
%(journal_refs)s
%(report_nos)s
</table>
<p><font color="red">The fields having a * are mandatory</font></p>
<center>
<input type="submit" value="Submit" name="submit"/>
</center>
<form>''' % {
'table_width' : '100%',
'row_width' : '25%',
'error_message' : \
self.display_error_message_row(error_messages),
'CFG_SITE_NAME': CFG_SITE_NAME,
# hidden input
'id_server' : server['id'],
'id_collection' : collection['id'],
'id_primary' : primary['id'],
'id_categories' : self.get_list_id_categories(categories),
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : server['name'],
'collection_name' : collection['label'],
'collection_url' : collection['url'],
'primary_name' : primary['label'],
'primary_url' : primary['url'],
'categories' : self.fill_optional_category_list(categories),
#user
'user_name' : user['nickname'],
'user_email' : user['email'],
# media
'medias' : self.fill_media_list(medias, server['id']),
'media_help' : self.fill_arxiv_help_message(),
# metadata
'id' : metadata['id'],
'title' : metadata['title'],
'summary' : metadata['summary'],
'contributors' : self.fill_contributors_list(
metadata['contributors']),
'journal_refs' : self.fill_journal_refs_list(
metadata['journal_refs']),
'report_nos' : self.fill_report_nos_list(
metadata['report_nos'])
}
return body
def tmpl_display_list_submission(self, submissions):
'''
Display the data of submitted recods
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Document successfully submitted !</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
<a href=%(CFG_SITE_URL)s/bibsword>Return</a>
</form>''' % {
'table_width' : '100%',
'submissions' : self.fill_submission_table(submissions),
'CFG_SITE_URL' : CFG_SITE_URL
}
return body
#***************************************************************************
# Private functions
#***************************************************************************
def display_error_message_row(self, error_messages):
'''
return a list of error_message in form of a bullet list
@param error_messages: list of error_messages to display
@return: html code that display list of errors
'''
# if no errors, return nothing
if len(error_messages) == 0:
return ''
if len(error_messages) == 1:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> The following error was found: </p>
<ul>
'''
else:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> Following errors were found: </p>
<ul>
'''
# insert each error lines
for error_message in error_messages:
body = body + '''
<li>%(error)s</li>''' % {
'error': error_message
}
body = body + '''
</ul>
</font>
</td>
</tr>'''
return body
def fill_submission_table(self, submissions):
'''
This method return the body of the submission state table. each
submissions given in parameters has one row
@param submissions: submission status list
@return: html table body
'''
return ''.join([
''' <tr>
<td>%(id_server)s: <a href="%(server_infos)s">
%(server_name)s</a></td>
<td>%(user_name)s <br/> %(user_email)s</td
<td>%(id_bibrec)s: <a href="%(cfg_site_url)s/%(CFG_SITE_RECORD)s/%(id_bibrec)s"
target="_blank">%(no_bibrec)s</a></td>
<td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank">
%(id_remote)s</a></td>
<td>%(status)s</td>
<td><b>submission: </b> %(submission_date)s <br/>
<b>publication: </b> %(publication_date)s <br/>
<b>removal: </b> %(removal_date)s </td>
<td><b>media: </b> <a href="%(media_link)s" target="_blank">
%(media_link)s</a> <br/>
<b>metadata: </b> <a href="%(metadata_link)s" target="_blank">
%(metadata_link)s</a> <br />
<b>status: </b> <a href="%(status_link)s" target="_blank">
%(status_link)s</a></td>
</tr>''' % {
'id_server' : str(submission['id_server']),
'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \
(CFG_SITE_URL, submission['id_server']),
'server_name' : str(submission['server_name']),
'user_name' : str(submission['user_name']),
'user_email' : str(submission['user_email']),
'id_bibrec' : str(submission['id_record']),
'no_bibrec' : str(submission['report_no']),
'id_remote' : str(submission['id_remote']),
'status' : str(submission['status']),
'submission_date' : str(submission['submission_date']),
'publication_date' : str(submission['publication_date']),
'removal_date' : str(submission['removal_date']),
'media_link' : str(submission['link_medias']),
'metadata_link' : str(submission['link_metadata']),
'status_link' : str(submission['link_status']),
'url_base_remote' : str(submission['url_base_remote']),
'cfg_site_url' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD
} for submission in submissions])
def fill_dropdown_remote_servers(self, remote_servers):
'''
This method fill a dropdown list of remote servers.
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s - %(host)s</option>''' % {
'id': str(remote_server['id']),
'name': remote_server['name'],
'host': remote_server['host']
} for remote_server in remote_servers])
def fill_dropdown_collections(self, collections):
'''
This method fill a dropdown list of collection.
@param collections: list of all collections with name - url
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': str(collection['id']),
'name': collection['label']
} for collection in collections])
def fill_dropdown_primary(self, primary_categories):
'''
This method fill the primary dropdown list with the data given in
parameter
@param primary_categories: list of 'url' 'name' tuples
@return: html code generated to display the list
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': primary_categorie['id'],
'name': primary_categorie['label']
} for primary_categorie in primary_categories])
def fill_dropdown_secondary(self, categories):
'''
This method fill a category list. This list is allows the multi-selection
or items. To proced to select more than one categorie through a browser
ctrl + clic
@param categories: list of all categories in the format name - url
@return: the html code that display each dropdown list
'''
if len(categories) == '':
return ''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': category['id'],
'name': category['label']
} for category in categories])
def fill_optional_category_list(self, categories):
'''
This method fill a table row that contains name and url of the selected
optional categories
@param self: reference to the current instance of the class
@param categories: list of tuples containing selected categories
@return: html code generated to display the list
'''
if len(categories) == 0:
return ''
else:
body = '<tr><td>'
body = body + ''.join([
'''<p>Category: %(category_name)s ( %(category_url)s )</p>'''%{
'category_name' : category['label'],
'category_url' : category['url']
} for category in categories
])
body = body + '</td></tr>'
return body
def fill_media_list(self, medias, id_server, from_websubmit=False):
'''
Concatenate the string that contains all informations about the medias
'''
text = ''
if id_server == 1:
media_type = self.format_media_list_by_type(medias)
text = '''<h2>Please select files you would like to push to arXiv:</h2>'''
for mtype in media_type:
text += '''<h3><b>%s: </b></h3>''' % mtype['media_type']
text += '''<blockquote>'''
for media in mtype['media_list']:
text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])
text += "</blockquote>"
text += '''<h3>Upload</h3>'''
text += '''<blockquote>'''
text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''
if from_websubmit == False:
text += '''<input type="file" name="new_media" size="60"/>'''
return text
def fill_arxiv_help_message(self):
text = '''</blockquote><h3>Help</h3>'''
text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see:'''\
'''<ul>'''\
'''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\
'''arXiv submission process</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\
'''arXiv TeX submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\
'''arXiv Docx submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\
'''arXiv PDF submission</a></li>'''\
'''</ul></blockquote>'''
return text
def fill_contributors_list(self, contributors):
'''
This method display each contributors in the format of an editable input
text. This allows the user to modifie it.
@param contributors: The list of all contributors of the document
@return: the html code that display each dropdown list
'''
output = ''
is_author = True
for author in contributors:
nb_rows = 2
author_name = \
'''<LABEL for="name">Name: </LABEL><input type = "text" ''' \
'''name = "contributor_name" size = "100" value = "%s" ''' \
'''id="name"/>''' % author['name']
author_email = \
'''<LABEL for = "email">Email: </LABEL>''' \
'''<input type = "text" name = "contributor_email" ''' \
'''size = "100" value = "%s" id = "email"/>''' % author['email']
author_affiliations = []
for affiliation in author['affiliation']:
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \
'''<input type="text" name = "contributor_affiliation" ''' \
'''size = "100" value = "%s" id = "affiliation"/>''' % \
affiliation
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL>''' \
'''<input type = "text" name = "contributor_affiliation" ''' \
'''size = "100" id = "affiliation"/>'''
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
if is_author:
output += '''<tr><td rowspan = "%s">Author: </td>''' % nb_rows
is_author = False
else:
output += '''<tr><td rowspan = "%s">Contributor: </td>''' % \
nb_rows
output += '''<td>%s</td></tr>''' % author_name
if author_email != '':
output += '''<tr><td>%s</td></tr>''' % author_email
for affiliation in author_affiliations:
output += '''<tr><td>%s</td></tr>''' % affiliation
output += \
'''<input type = "hidden" name = "contributor_affiliation" ''' \
'''value = "next"/>'''
return output
def fill_journal_refs_list(self, journal_refs):
'''
This method display each journal references in the format of an editable
input text. This allows the user to modifie it.
@param journal_refs: The list of all journal references of the document
@return: the html code that display each dropdown list
'''
html = ''
if len(journal_refs) > 0:
html += '''
<tr>
<td align="left"><p>Journal references: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="journal_refs" size="100" ''' \
'''value="%(journal_ref)s"/></p>
''' % {
'journal_ref': journal_ref
} for journal_ref in journal_refs
])
html = html + '''
</td>
</tr>
'''
return html
def fill_report_nos_list(self, report_nos):
'''
Concatate a string containing the report number html table rows
'''
html = ''
if len(report_nos) > 0:
html = '''
<tr>
<td align="left"><p>Report numbers: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="report_nos" size="100" ''' \
'''value="%(report_no)s"/></p>''' % {
'report_no': report_no
} for report_no in report_nos
])
html = html + '''
</td>
</tr>
'''
return html
def get_list_id_categories(self, categories):
'''
gives the id of the categores tuple
'''
id_categories = []
for category in categories:
id_categories.append(category['id'])
return id_categories
def format_media_list_by_type(self, medias):
'''
This function format the media by type (Main, Uploaded, ...)
'''
#format media list by type of document
media_type = []
for media in medias:
# if it is the first media of this type, create a new type
is_type_in_media_type = False
for type in media_type:
if media['collection'] == type['media_type']:
is_type_in_media_type = True
if is_type_in_media_type == False:
type = {}
type['media_type'] = media['collection']
type['media_list'] = []
media_type.append(type)
# insert the media in the good media_type element
for type in media_type:
if type['media_type'] == media['collection']:
type['media_list'].append(media)
return media_type | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* An instantaneous value.
*/
public class Value implements MeasurableStat {
private double value = 0;
@Override
public double measure(MetricConfig config, long now) {
return value;
}
@Override
public void record(MetricConfig config, double value, long timeMs) {
this.value = value;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/metrics/stats/Value.java |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'jlapenna@google.com (Joe LaPenna)'
import unittest
import gdata.projecthosting.client
import gdata.projecthosting.data
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.PROJECT_NAME_OPTION)
conf.options.register_option(conf.ISSUE_ASSIGNEE_OPTION)
class ProjectHostingClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.projecthosting.client.ProjectHostingClient()
conf.configure_client(self.client, 'ProjectHostingClientTest', 'code')
self.project_name = conf.options.get_value('project_name')
self.assignee = conf.options.get_value('issue_assignee')
self.owner = conf.options.get_value('username')
def tearDown(self):
conf.close_client(self.client)
def create_issue(self):
# Add an issue
created = self.client.add_issue(
self.project_name,
'my title',
'my summary',
self.owner,
labels=['label0'])
self.assertEqual(created.title.text, 'my title')
self.assertEqual(created.content.text, 'my summary')
self.assertEqual(len(created.label), 1)
self.assertEqual(created.label[0].text, 'label0')
return created
def test_create_update_close(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Create the issue:
created = self.create_issue()
# Change the issue we just added.
issue_id = created.id.text.split('/')[-1]
update_response = self.client.update_issue(
self.project_name,
issue_id,
self.owner,
comment='My comment here.',
summary='New Summary',
status='Accepted',
owner=self.assignee,
labels=['-label0', 'label1'],
ccs=[self.owner])
updates = update_response.updates
# Make sure it changed our status, summary, and added the comment.
self.assertEqual(update_response.content.text, 'My comment here.')
self.assertEqual(updates.summary.text, 'New Summary')
self.assertEqual(updates.status.text, 'Accepted')
# Make sure it got all our label change requests.
self.assertEquals(len(updates.label), 2)
self.assertEquals(updates.label[0].text, '-label0')
self.assertEquals(updates.label[1].text, 'label1')
# Be sure it saw our CC change. We can't check the specific values (yet)
# because ccUpdate and ownerUpdate responses are mungled.
self.assertEquals(len(updates.ccUpdate), 1)
self.assert_(updates.ownerUpdate.text)
def test_get_issues(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Create an issue so we have something to look up.
created = self.create_issue()
# The fully qualified id is a url, we just want the number.
issue_id = created.id.text.split('/')[-1]
# Get the specific issue in our issues feed. You could use label,
# canned_query and others just the same.
query = gdata.projecthosting.client.Query(label='label0')
feed = self.client.get_issues(self.project_name, query=query)
# Make sure we at least find the entry we created with that label.
self.assert_(len(feed.entry) > 0)
for issue in feed.entry:
label_texts = [label.text for label in issue.label]
self.assert_('label0' in label_texts, 'Issue does not have label label0')
def test_get_comments(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Create an issue so we have something to look up.
created = self.create_issue()
# The fully qualified id is a url, we just want the number.
issue_id = created.id.text.split('/')[-1]
# Now lets add two comments to that issue.
for i in range(2):
update_response = self.client.update_issue(
self.project_name,
issue_id,
self.owner,
comment='My comment here %s' % i)
# We have an issue that has several comments. Lets get them.
comments_feed = self.client.get_comments(self.project_name, issue_id)
# It has 2 comments.
self.assertEqual(2, len(comments_feed.entry))
class ProjectHostingDocExamplesTest(unittest.TestCase):
def setUp(self):
self.project_name = conf.options.get_value('project_name')
self.assignee = conf.options.get_value('issue_assignee')
self.owner = conf.options.get_value('username')
self.password = conf.options.get_value('password')
def test_doc_examples(self):
if not conf.options.get_value('runlive') == 'true':
return
issues_client = gdata.projecthosting.client.ProjectHostingClient()
self.authenticating_client(issues_client, self.owner, self.password)
issue = self.creating_issues(issues_client, self.project_name, self.owner)
issue_id = issue.id.text.split('/')[-1]
self.retrieving_all_issues(issues_client, self.project_name)
self.retrieving_issues_using_query_parameters(
issues_client,
self.project_name)
self.modifying_an_issue_or_creating_issue_comments(
issues_client,
self.project_name,
issue_id,
self.owner,
self.assignee)
self.retrieving_issues_comments_for_an_issue(
issues_client,
self.project_name,
issue_id)
def authenticating_client(self, client, username, password):
return client.client_login(
username,
password,
source='your-client-name',
service='code')
def creating_issues(self, client, project_name, owner):
"""Create an issue."""
return client.add_issue(
project_name,
'my title',
'my summary',
owner,
labels=['label0'])
def retrieving_all_issues(self, client, project_name):
"""Retrieve all the issues in a project."""
feed = client.get_issues(project_name)
for issue in feed.entry:
self.assert_(issue.title.text is not None)
def retrieving_issues_using_query_parameters(self, client, project_name):
"""Retrieve a set of issues in a project."""
query = gdata.projecthosting.client.Query(label='label0', max_results=1000)
feed = client.get_issues(project_name, query=query)
for issue in feed.entry:
self.assert_(issue.title.text is not None)
return feed
def retrieving_issues_comments_for_an_issue(self, client, project_name,
issue_id):
"""Retrieve all issue comments for an issue."""
comments_feed = client.get_comments(project_name, issue_id)
for comment in comments_feed.entry:
self.assert_(comment.content is not None)
return comments_feed
def modifying_an_issue_or_creating_issue_comments(self, client, project_name,
issue_id, owner, assignee):
"""Add a comment and update metadata in an issue."""
return client.update_issue(
project_name,
issue_id,
owner,
comment='My comment here.',
summary='New Summary',
status='Accepted',
owner=assignee,
labels=['-label0', 'label1'],
ccs=[owner])
def suite():
return conf.build_suite([ProjectHostingClientTest,
ProjectHostingDocExamplesTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be exlicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:2]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections will wait for a verack to make sure the connection is fully up
self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:2]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
self.nodes[0].disconnect_p2ps()
self.nodes[2].add_p2p_connection(BaseNode())
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
cr, uid = self.cr, self.uid
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
})
self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave request
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
ids = self.hr_holidays.search(cr, uid, [('name', '=', 'Hol10')])
self.hr_holidays.unlink(cr, uid, ids)
# Employee creates a leave request in a no-limit category
hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
self.hr_holidays.signal_workflow(cr, self.user_employee_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))
# HrUser allocates some leaves to the employee
aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'validate')
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'second_validate')
# Checks Employee has effectively some days left
hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
# Check left days: - 1 virtual remaining day
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'second_validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'refuse')
hol2.refresh()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'reset')
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'reset')
hol2.refresh()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
# HrManager changes the date and put too much days -> crash when confirming
self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=7)),
'number_of_days_temp': 4,
})
with self.assertRaises(except_orm):
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'confirm') | unknown | codeparrot/codeparrot-clean | ||
from django.conf import settings
from guardian.shortcuts import get_objects_for_user
import json
from time import strptime
from datetime import datetime
from pytz import UTC, timezone
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from .forms import task_create_form, TaskCommandFormset, TaskParameterFormset
from .models import (
Application, Environment, Execution, ExecutionLiveLog, ExecutionParameter,
ParameterParser, ServerRole, Task)
@permission_required('task.view_task', (Task, 'id', 'task_id'))
def task_page(request, task_id=None):
data = {}
data['task'] = get_object_or_404(Task, pk=task_id)
return render(request, 'page/task.html', data)
@permission_required('task.execute_task', (Task, 'id', 'task_id'))
def task_execute_page(request, task_id, environment_id=None):
data = {}
task = get_object_or_404(Task, pk=task_id)
data['task'] = task
form_errors = []
if environment_id:
environment = get_object_or_404(Environment, pk=int(environment_id))
data['environment'] = environment
if task.application.id != environment.application.id:
raise ValueError('task.application.id did not match with environment.application.id')
if request.method == 'POST':
parameter_prefix = 'parameter-'
parameters = {}
for name, value in request.POST.items():
if name.startswith(parameter_prefix):
name = name[len(parameter_prefix):]
parameters[name] = value
if 'environment' in parameters and parameters['environment']:
environment = get_object_or_404(Environment, pk=int(parameters['environment']))
if task.application.id != environment.application.id:
raise ValueError('task.application.id did not match with environment.application.id')
duplicateExecution = Execution.objects.filter(task=task, environment=environment,
status__in=[Execution.PENDING, Execution.RUNNING])
if duplicateExecution.count():
form_errors.append('Task %s is already running on %s environment.' %
(task.name, environment.name))
else:
with transaction.atomic():
execution = Execution(task=task, environment=environment, user=request.user)
execution.save()
for name, value in parameters.items():
if name != 'environment':
ExecutionParameter(execution=execution, name=name, value=value).save()
parameter_parser = ParameterParser(execution)
for command in execution.commands.all():
command.command = parameter_parser.process(command.command)
command.save()
execution.start()
return redirect(execution)
else:
form_errors.append('Environment is required')
data['form_errors'] = form_errors
data['environments'] = get_objects_for_user(request.user, 'core.execute_environment').filter(
application=task.application)
return render(request, 'page/task_execute.html', data)
def task_form_page(request, application_id=None, task_id=None):
data = {}
if task_id:
task = get_object_or_404(Task, pk=task_id)
application = task.application
data['task'] = task
args = {}
if not request.user.has_perm('task.change_task', task):
messages.error(request, 'Access denied')
return redirect('index')
elif application_id:
application = get_object_or_404(Application, pk=application_id)
args = {'application_id': application_id}
if not request.user.has_perm('core.change_application', application):
messages.error(request, 'Access denied')
return redirect('index')
form, form_parameters, form_commands = create_forms(request, task_id, args)
if request.method == 'POST':
form_is_valid = form.is_valid()
form_parameters_is_valid = form_parameters.is_valid()
form_commands_is_valid = form_commands.is_valid()
if form_is_valid and form_parameters_is_valid and form_commands_is_valid:
task = form.save(commit=False)
task.save()
data['task'] = task
task_save_formset(form_parameters, task)
task_save_formset(form_commands, task)
if task_id == None:
return redirect(task.get_absolute_url())
request.method = 'GET'
form, form_parameters, form_commands = create_forms(request, task_id, args)
request.method = 'POST'
messages.success(request, 'Saved')
data['application'] = application
data['is_new'] = task_id == None
data['request'] = request
data['form'] = form
data['form_parameters'] = form_parameters
data['form_commands'] = form_commands
data['server_roles'] = ServerRole.objects.all()
data['global_parameters'] = ParameterParser.global_parameters.items()
return render(request, 'page/task_form.html', data)
def create_forms(request, task_id, args):
form = task_create_form('task', request, task_id, args)
form_parameters = create_formset(request, TaskParameterFormset, task_id)
form_commands = create_formset(request, TaskCommandFormset, task_id)
for form_command in form_commands.forms:
form_command.fields['roles'].queryset = ServerRole.objects.filter(department_id=request.current_department_id)
return form, form_parameters, form_commands
def task_save_formset(formset, task):
formset.save(commit=False)
for instance in formset.new_objects:
instance.task_id = task.id
for form in formset.ordered_forms:
form.instance.order = form.cleaned_data['ORDER']
form.instance.save()
formset.save_m2m()
def create_formset(request, formset, parent_id):
model = formset.model
model_queryset = {
'TaskParameter': model.objects.filter(task_id=parent_id).order_by('order'),
'TaskCommand': model.objects.filter(task_id=parent_id).order_by('order')
}
if request.method == "POST":
return formset(request.POST,
queryset=model_queryset[model.__name__],
prefix=model.__name__)
else:
return formset(queryset=model_queryset[model.__name__],
prefix=model.__name__)
@login_required
def log_page(request, model_name, id):
#todo add custom permission check
data = {}
executions = Execution.objects
if model_name == 'application':
executions = executions.filter(environment__application_id=id)
related = get_object_or_404(Application, pk=id)
elif model_name == 'environment':
executions = executions.filter(environment_id=id)
related = get_object_or_404(Environment, pk=id)
elif model_name == 'task':
executions = executions.filter(task_id=id)
related = get_object_or_404(Task, pk=id)
elif model_name == 'user':
executions = executions.filter(user_id=id)
related = get_object_or_404(get_user_model(), pk=id)
else:
raise Http404()
for related_model in ['task', 'user', 'environment', 'parameters']:
executions = executions.prefetch_related(related_model)
data['executions'] = executions.order_by('-time_created')
data['model_name'] = model_name
data['related'] = related
return render(request, 'page/log.html', data)
@permission_required('task.view_task', (Task, 'executions__id', 'execution_id'))
def execution_page(request, execution_id):
data = {}
execution = get_object_or_404(Execution, pk=execution_id)
data['execution'] = execution
return render(request, 'page/execution.html', data)
@permission_required('task.change_task', (Task, 'id', 'task_id'))
def task_delete(request, task_id):
if request.method != 'POST':
return Http404
task = get_object_or_404(Task, pk=task_id)
task.delete()
data = {
'status': True,
'action': 'redirect',
'target': task.application.get_absolute_url()
}
return HttpResponse(json.dumps(data), content_type="application/json")
@permission_required('task.view_task', (Task, 'executions__id', 'execution_id'))
def live_log(request, execution_id, last_id):
data = ExecutionLiveLog.objects.filter(execution_id=execution_id, id__gt=last_id).order_by('id').values('id',
'event',
'data')
for item in data:
item['data'] = json.loads(item['data'])
for key, value in item['data'].items():
if key.startswith('time_'):
value = datetime(*strptime(value, "%Y-%m-%dT%H:%M:%S.%fZ")[:6])
value = value.replace(tzinfo=timezone(settings.TIME_ZONE)).astimezone(request.user.timezone)
value = request.user.timezone.normalize(value)
item['data'][key] = value.strftime("%Y-%m-%d %H:%M")
return HttpResponse(json.dumps(list(data), cls=DjangoJSONEncoder), content_type="application/json")
@permission_required('task.execute_task', (Task, 'executions__id', 'execution_id'))
def execution_abort(request, execution_id):
# if request.method != 'POST':
# return Http404
execution = get_object_or_404(Execution, pk=execution_id)
data = {}
if execution.status in [Execution.PENDING, Execution.RUNNING]:
execution.status = execution.ABORTED
execution.save_end()
ExecutionLiveLog.add(execution_id, 'execution_completed', status=Execution.ABORTED,
time=execution.time,
time_end=execution.time_end)
import signal
from celery.result import AsyncResult
if execution.celery_task_id:
AsyncResult(execution.celery_task_id).revoke()
for commands in execution.commands.all():
for server in commands.servers.all():
if server.celery_task_id:
AsyncResult(server.celery_task_id).revoke(terminate=True, signal=signal.SIGALRM)
data['status'] = True
else:
data['status'] = False
return HttpResponse(json.dumps(list(data), cls=DjangoJSONEncoder), content_type="application/json") | unknown | codeparrot/codeparrot-clean | ||
#ifndef Py_INTERNAL_PYMATH_H
#define Py_INTERNAL_PYMATH_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* _Py_ADJUST_ERANGE1(x)
* _Py_ADJUST_ERANGE2(x, y)
* Set errno to 0 before calling a libm function, and invoke one of these
* macros after, passing the function result(s) (_Py_ADJUST_ERANGE2 is useful
* for functions returning complex results). This makes two kinds of
* adjustments to errno: (A) If it looks like the platform libm set
* errno=ERANGE due to underflow, clear errno. (B) If it looks like the
* platform libm overflowed but didn't set errno, force errno to ERANGE. In
* effect, we're trying to force a useful implementation of C89 errno
* behavior.
* Caution:
* This isn't reliable. C99 no longer requires libm to set errno under
* any exceptional condition, but does require +- HUGE_VAL return
* values on overflow. A 754 box *probably* maps HUGE_VAL to a
* double infinity, and we're cool if that's so, unless the input
* was an infinity and an infinity is the expected result. A C89
* system sets errno to ERANGE, so we check for that too. We're
* out of luck if a C99 754 box doesn't map HUGE_VAL to +Inf, or
* if the returned result is a NaN, or if a C89 box returns HUGE_VAL
* in non-overflow cases.
*/
static inline void _Py_ADJUST_ERANGE1(double x)
{
if (errno == 0) {
if (x == INFINITY || x == -INFINITY) {
errno = ERANGE;
}
}
else if (errno == ERANGE && x == 0.0) {
errno = 0;
}
}
static inline void _Py_ADJUST_ERANGE2(double x, double y)
{
if (x == INFINITY || x == -INFINITY ||
y == INFINITY || y == -INFINITY)
{
if (errno == 0) {
errno = ERANGE;
}
}
else if (errno == ERANGE) {
errno = 0;
}
}
//--- HAVE_PY_SET_53BIT_PRECISION macro ------------------------------------
//
// The functions _Py_dg_strtod() and _Py_dg_dtoa() in Python/dtoa.c (which are
// required to support the short float repr introduced in Python 3.1) require
// that the floating-point unit that's being used for arithmetic operations on
// C doubles is set to use 53-bit precision. It also requires that the FPU
// rounding mode is round-half-to-even, but that's less often an issue.
//
// If your FPU isn't already set to 53-bit precision/round-half-to-even, and
// you want to make use of _Py_dg_strtod() and _Py_dg_dtoa(), then you should:
//
// #define HAVE_PY_SET_53BIT_PRECISION 1
//
// and also give appropriate definitions for the following three macros:
//
// * _Py_SET_53BIT_PRECISION_HEADER: any variable declarations needed to
// use the two macros below.
// * _Py_SET_53BIT_PRECISION_START: store original FPU settings, and
// set FPU to 53-bit precision/round-half-to-even
// * _Py_SET_53BIT_PRECISION_END: restore original FPU settings
//
// The macros are designed to be used within a single C function: see
// Python/pystrtod.c for an example of their use.
// Get and set x87 control word for gcc/x86
#ifdef HAVE_GCC_ASM_FOR_X87
#define HAVE_PY_SET_53BIT_PRECISION 1
// Functions defined in Python/pymath.c
extern unsigned short _Py_get_387controlword(void);
extern void _Py_set_387controlword(unsigned short);
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned short old_387controlword, new_387controlword
#define _Py_SET_53BIT_PRECISION_START \
do { \
old_387controlword = _Py_get_387controlword(); \
new_387controlword = (old_387controlword & ~0x0f00) | 0x0200; \
if (new_387controlword != old_387controlword) { \
_Py_set_387controlword(new_387controlword); \
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_387controlword != old_387controlword) { \
_Py_set_387controlword(old_387controlword); \
} \
} while (0)
#endif
// Get and set x87 control word for VisualStudio/x86.
// x87 is not supported in 64-bit or ARM.
#if defined(_MSC_VER) && !defined(_WIN64) && !defined(_M_ARM)
#define HAVE_PY_SET_53BIT_PRECISION 1
#include <float.h> // __control87_2()
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned int old_387controlword, new_387controlword, out_387controlword
// We use the __control87_2 function to set only the x87 control word.
// The SSE control word is unaffected.
#define _Py_SET_53BIT_PRECISION_START \
do { \
__control87_2(0, 0, &old_387controlword, NULL); \
new_387controlword = \
(old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \
if (new_387controlword != old_387controlword) { \
__control87_2(new_387controlword, _MCW_PC | _MCW_RC, \
&out_387controlword, NULL); \
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_387controlword != old_387controlword) { \
__control87_2(old_387controlword, _MCW_PC | _MCW_RC, \
&out_387controlword, NULL); \
} \
} while (0)
#endif
// MC68881
#ifdef HAVE_GCC_ASM_FOR_MC68881
#define HAVE_PY_SET_53BIT_PRECISION 1
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned int old_fpcr, new_fpcr
#define _Py_SET_53BIT_PRECISION_START \
do { \
__asm__ ("fmove.l %%fpcr,%0" : "=dm" (old_fpcr)); \
/* Set double precision / round to nearest. */ \
new_fpcr = (old_fpcr & ~0xf0) | 0x80; \
if (new_fpcr != old_fpcr) { \
__asm__ volatile ("fmove.l %0,%%fpcr" : : "dm" (new_fpcr)); \
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_fpcr != old_fpcr) { \
__asm__ volatile ("fmove.l %0,%%fpcr" : : "dm" (old_fpcr)); \
} \
} while (0)
#endif
// Default definitions are empty
#ifndef _Py_SET_53BIT_PRECISION_HEADER
# define _Py_SET_53BIT_PRECISION_HEADER
# define _Py_SET_53BIT_PRECISION_START
# define _Py_SET_53BIT_PRECISION_END
#endif
//--- _PY_SHORT_FLOAT_REPR macro -------------------------------------------
// If we can't guarantee 53-bit precision, don't use the code
// in Python/dtoa.c, but fall back to standard code. This
// means that repr of a float will be long (17 significant digits).
//
// Realistically, there are two things that could go wrong:
//
// (1) doubles aren't IEEE 754 doubles, or
// (2) we're on x86 with the rounding precision set to 64-bits
// (extended precision), and we don't know how to change
// the rounding precision.
#if !defined(DOUBLE_IS_LITTLE_ENDIAN_IEEE754) && \
!defined(DOUBLE_IS_BIG_ENDIAN_IEEE754) && \
!defined(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754)
# define _PY_SHORT_FLOAT_REPR 0
#endif
// Double rounding is symptomatic of use of extended precision on x86.
// If we're seeing double rounding, and we don't have any mechanism available
// for changing the FPU rounding precision, then don't use Python/dtoa.c.
#if defined(X87_DOUBLE_ROUNDING) && !defined(HAVE_PY_SET_53BIT_PRECISION)
# define _PY_SHORT_FLOAT_REPR 0
#endif
#ifndef _PY_SHORT_FLOAT_REPR
# define _PY_SHORT_FLOAT_REPR 1
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYMATH_H */ | c | github | https://github.com/python/cpython | Include/internal/pycore_pymath.h |
import sys
import numpy as np
import os
from os import path
class DependencyWriter:
"""
Dependency writer class
"""
def __init__(self):
pass
def save(self, language, heads_pred):
"""Saves predicted dependency trees."""
base_deppars_dir = path.join(path.dirname(__file__), "..", "..", "data", "deppars")
languages = ["danish", "dutch", "portuguese", "english"]
i = 0
word_dict = {}
pos_dict = {}
feat_counts = {}
if language not in languages:
print "Language does not exist: \"%s\": Available are: %s" % (language, languages)
return
# Load test data
n_toks = 0
n_sents = 0
conll_file = open(path.join(base_deppars_dir, language + "_test.conll"))
conll_file_out = open(path.join(base_deppars_dir, language + "_test.conll.pred"), 'w')
for line in conll_file:
line = line.rstrip()
if len(line) == 0:
n_toks = 0
n_sents += 1
conll_file_out.write("\n")
continue
fields = line.split("\t")
fields[6] = "{0}".format(heads_pred[n_sents][n_toks+1])
line_out = "\t".join(fields)
n_toks += 1
conll_file_out.write(line_out)
conll_file_out.write("\n")
conll_file_out.close()
conll_file.close() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.math;
import com.google.common.annotations.GwtCompatible;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.math.BigInteger;
import java.math.RoundingMode;
/**
* A collection of preconditions for math functions.
*
* @author Louis Wasserman
*/
@GwtCompatible
final class MathPreconditions {
@CanIgnoreReturnValue
static int checkPositive(String role, int x) {
if (x <= 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be > 0");
}
return x;
}
@CanIgnoreReturnValue
static long checkPositive(String role, long x) {
if (x <= 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be > 0");
}
return x;
}
@CanIgnoreReturnValue
static BigInteger checkPositive(String role, BigInteger x) {
if (x.signum() <= 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be > 0");
}
return x;
}
@CanIgnoreReturnValue
static int checkNonNegative(String role, int x) {
if (x < 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be >= 0");
}
return x;
}
@CanIgnoreReturnValue
static long checkNonNegative(String role, long x) {
if (x < 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be >= 0");
}
return x;
}
@CanIgnoreReturnValue
static BigInteger checkNonNegative(String role, BigInteger x) {
if (x.signum() < 0) {
throw new IllegalArgumentException(role + " (" + x + ") must be >= 0");
}
return x;
}
@CanIgnoreReturnValue
static double checkNonNegative(String role, double x) {
if (!(x >= 0)) { // not x < 0, to work with NaN.
throw new IllegalArgumentException(role + " (" + x + ") must be >= 0");
}
return x;
}
static void checkRoundingUnnecessary(boolean condition) {
if (!condition) {
throw new ArithmeticException("mode was UNNECESSARY, but rounding was necessary");
}
}
static void checkInRangeForRoundingInputs(boolean condition, double input, RoundingMode mode) {
if (!condition) {
throw new ArithmeticException(
"rounded value is out of range for input " + input + " and rounding mode " + mode);
}
}
static void checkNoOverflow(boolean condition, String methodName, int a, int b) {
if (!condition) {
throw new ArithmeticException("overflow: " + methodName + "(" + a + ", " + b + ")");
}
}
static void checkNoOverflow(boolean condition, String methodName, long a, long b) {
if (!condition) {
throw new ArithmeticException("overflow: " + methodName + "(" + a + ", " + b + ")");
}
}
private MathPreconditions() {}
} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/math/MathPreconditions.java |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
if not module.check_mode:
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None
changed = True
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main() | unknown | codeparrot/codeparrot-clean | ||
framework:
secret: s3cr3t
default_locale: fr
enabled_locales: ['fr', 'en']
csrf_protection: true
form:
csrf_protection:
field_name: _csrf
trust_x_sendfile_type_header: true
esi:
enabled: true
ssi:
enabled: true
profiler:
only_exceptions: true
enabled: false
router:
resource: '%kernel.project_dir%/config/routing.xml'
type: xml
utf8: true
session:
storage_factory_id: session.storage.factory.native
handler_id: session.handler.native_file
name: _SYMFONY
cookie_lifetime: 86400
cookie_path: /
cookie_domain: example.com
cookie_secure: true
cookie_samesite: lax
cookie_httponly: false
use_cookies: true
gc_probability: 1
gc_divisor: 108
gc_maxlifetime: 90000
save_path: /path/to/sessions
assets:
version: v1
translator:
enabled: true
fallback: fr
default_path: '%kernel.project_dir%/translations'
cache_dir: '%kernel.cache_dir%/translations'
paths: ['%kernel.project_dir%/Fixtures/translations']
validation:
enabled: true
email_validation_mode: html5
serializer:
enabled: true
enable_attributes: true
name_converter: serializer.name_converter.camel_case_to_snake_case
circular_reference_handler: my.circular.reference.handler
max_depth_handler: my.max.depth.handler
default_context:
enable_max_depth: true
named_serializers:
api:
include_built_in_normalizers: true
include_built_in_encoders: true
default_context:
enable_max_depth: false
type_info: ~
property_info: ~
ide: file%%link%%format
request:
formats:
csv: ['text/csv', 'text/plain']
pdf: 'application/pdf'
json_streamer: ~ | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/yml/full.yml |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.timeline-modes.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"query": {
"kind": "datasource",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Status grid",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "random_walk",
"seriesCount": 4,
"spread": 14.9,
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "status-history",
"spec": {
"pluginVersion": "7.5.0-pre",
"options": {
"colWidth": 0.9,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"rowHeight": 0.9,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"thresholds": {
"mode": "percentage",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 60,
"color": "#EAB839"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 1
}
},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "State timeline",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [
[
0,
1616551651000
],
[
1,
1616556554000
],
[
2,
1616559873000
],
[
0,
1616561077000
],
[
3,
1616563090000
]
],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "manual_entry",
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [
[
4,
1616555060000
],
[
5,
1616560081000
],
[
4,
1616562217000
],
[
5,
1616565458000
]
],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "manual_entry",
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"points": [
[
4,
1616557148000
],
[
1616558756000
],
[
4,
1616561658000
],
[
1616562446000
],
[
4,
1616564104000
],
[
1616564548000
],
[
4,
1616564871000
]
],
"scenarioId": "manual_entry"
}
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "state-timeline",
"spec": {
"pluginVersion": "7.5.0-pre",
"options": {
"alignValue": "left",
"colWidth": 0.9,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 1,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "State timeline (strings \u0026 booleans)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"scenarioId": "csv_metric_values",
"stringInput": "a,a,b,b,b,b,c,a,a,d,d,d,d,d"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"scenarioId": "csv_metric_values",
"stringInput": "null,null,e,e,e,null,null,e,null,null,e,null,e,e,e,e"
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "true,null,false,null,true,false"
}
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "state-timeline",
"spec": {
"pluginVersion": "7.5.0-pre",
"options": {
"alignValue": "left",
"colWidth": 0.9,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "always",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 1,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 24,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 10,
"width": 24,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 21,
"width": 24,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"timeSettings": {
"timezone": "utc",
"from": "2021-03-24T03:00:00.000Z",
"to": "2021-03-24T07:00:00.000Z",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Timeline Modes",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-timeline/v0alpha1.timeline-modes.v42.v2alpha1.json |
'''
Optimal result for pebble solitaire state
Status: Accepted
'''
PRECOMPUTED = {}
for _i in range(23):
PRECOMPUTED['-' * _i + 'o' + '-' * (22 - _i)] = 1
###############################################################################
def dfs(state):
"""Depth First Search for best possible score"""
global PRECOMPUTED
if state in PRECOMPUTED:
return PRECOMPUTED[state]
score = sum([1 for i in state if i == 'o'])
for i in range(21):
if state[i:i + 3] == 'oo-':
score = min(score, dfs(state[:i] + '--o' + state[i + 3:]))
if state[i:i + 3] == '-oo':
score = min(score, dfs(state[:i] + 'o--' + state[i + 3:]))
PRECOMPUTED[state] = score
return score
###############################################################################
def main():
"""Read input and print output"""
for _ in range(int(input())):
print(dfs(input()))
###############################################################################
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteShareGroupStateRequestData;
import org.apache.kafka.common.message.DeleteShareGroupStateResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class DeleteShareGroupStateRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DeleteShareGroupStateRequest> {
private final DeleteShareGroupStateRequestData data;
public Builder(DeleteShareGroupStateRequestData data) {
super(ApiKeys.DELETE_SHARE_GROUP_STATE);
this.data = data;
}
@Override
public DeleteShareGroupStateRequest build(short version) {
return new DeleteShareGroupStateRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DeleteShareGroupStateRequestData data;
public DeleteShareGroupStateRequest(DeleteShareGroupStateRequestData data, short version) {
super(ApiKeys.DELETE_SHARE_GROUP_STATE, version);
this.data = data;
}
@Override
public DeleteShareGroupStateResponse getErrorResponse(int throttleTimeMs, Throwable e) {
List<DeleteShareGroupStateResponseData.DeleteStateResult> results = new ArrayList<>();
data.topics().forEach(
topicResult -> results.add(new DeleteShareGroupStateResponseData.DeleteStateResult()
.setTopicId(topicResult.topicId())
.setPartitions(topicResult.partitions().stream()
.map(partitionData -> new DeleteShareGroupStateResponseData.PartitionResult()
.setPartition(partitionData.partition())
.setErrorCode(Errors.forException(e).code()))
.collect(Collectors.toList()))));
return new DeleteShareGroupStateResponse(new DeleteShareGroupStateResponseData()
.setResults(results));
}
@Override
public DeleteShareGroupStateRequestData data() {
return data;
}
public static DeleteShareGroupStateRequest parse(Readable readable, short version) {
return new DeleteShareGroupStateRequest(
new DeleteShareGroupStateRequestData(readable, version),
version
);
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java |
# $Id: __init__.py 6433 2010-09-28 08:21:25Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/docs/howto/i18n.html>.
"""
This package contains modules for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils.utils import normalize_language_tag
_languages = {}
def get_language(language_code, reporter):
"""Return module with language localizations.
`language_code` is a "BCP 47" language tag.
If there is no matching module, warn and fall back to English.
"""
# TODO: use a dummy module returning emtpy strings?, configurable?
for tag in normalize_language_tag(language_code):
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals())
except ImportError:
continue
_languages[tag] = module
return module
reporter.warning(
'language "%s" not supported: ' % language_code +
'Docutils-generated text will be in English.')
module = __import__('en', globals(), locals())
_languages[tag] = module # warn only one time!
return module | unknown | codeparrot/codeparrot-clean | ||
/*
* Some parts of this file are derived from Mantine UI (https://github.com/mantinedev/mantine)
* which is distributed under the MIT license:
*
* MIT License
*
* Copyright (c) 2021 Vitaly Rtishchev
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Modifications to this file are licensed under the Apache License, Version 2.0.
*/
import { rem } from "@mantine/core";
export interface AccordionChevronProps
extends React.ComponentPropsWithoutRef<"svg"> {
/** Controls `width` and `height` of the icon, `16` by default */
size?: number | string;
}
export function AccordionChevron({
style,
size = 16,
...others
}: AccordionChevronProps) {
return (
<svg
viewBox="0 0 15 15"
fill="none"
xmlns="http://www.w3.org/2000/svg"
style={{
...style,
width: rem(size),
height: rem(size),
display: "block",
}}
{...others}
>
<path
d="M3.13523 6.15803C3.3241 5.95657 3.64052 5.94637 3.84197 6.13523L7.5 9.56464L11.158 6.13523C11.3595 5.94637 11.6759 5.95657 11.8648 6.15803C12.0536 6.35949 12.0434 6.67591 11.842 6.86477L7.84197 10.6148C7.64964 10.7951 7.35036 10.7951 7.15803 10.6148L3.15803 6.86477C2.95657 6.67591 2.94637 6.35949 3.13523 6.15803Z"
fill="currentColor"
fillRule="evenodd"
clipRule="evenodd"
/>
</svg>
);
}
AccordionChevron.displayName = "@mantine/core/AccordionChevron"; | typescript | github | https://github.com/prometheus/prometheus | web/ui/mantine-ui/src/components/Accordion/AccordionChevron.tsx |
# -*- coding: utf-8 -*-
#from django.shortcuts import render
#from django.views.generic import ListView
from django.utils import timezone
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
#from django.utils.translation import ugettext_lazy as _
from feincms.content.application.models import app_reverse
#from gauth.models import GUser
from gtag.models import Tag # , TaggedItem
from ask.models import Post
from ask.forms import QuestionForm, AnswerForm, CommentForm
#from django.contrib.contenttypes.models import ContentType
def tag_list():
#ct = ContentType.objects.get_for_model(Post)
#return TaggedItem.objects.filter(content_type=ct).select_related()
return Tag.objects.all().select_related()
def tag_post(request, tag):
qs = Post.objects.filter(tags__name__in=[tag]).select_related()
return 'ask/ask_tag_post.html', {
'qs': qs,
'tags': tag_list(),
}
def ask_index(request):
qs = Post.objects.filter(level=0).select_related()
return 'ask/ask_index.html', {
'qs': qs,
'tags': tag_list(),
}
def show_post(request, id):
""" Question display and quick reply. """
q = Post.objects.select_related().get(id=id)
form = AnswerForm
answers = q.get_descendants().select_related()
return 'ask/ask_detail.html', {
'q': q,
'nodes': answers,
'tags': tag_list(),
'form': form(),
}
#else:
# return 'ask/ask_no_permission.html', {
# 'error': _(u"You have no permission to view the question.")
# }
@login_required()
def reply_question(request, id):
form_class = AnswerForm
post = Post.objects.select_related().get(id=id)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
answer = Post.objects.create(
author=request.user,
content=form.cleaned_data['content'],
title=post.title + '\'s answer by ' + request.user.username,
parent=Post.objects.select_related().get(id=id),
creation_date=timezone.now(),
lastedit_date=timezone.now(),
lastedit_user=request.user,
type=2
)
tags = form.cleaned_data['tags'].replace(u',', ',').replace(' ', '').split(',')
for tag in tags:
answer.tags.add(tag)
answer.save()
return HttpResponseRedirect(app_reverse("ask-detail", 'ask.urls', args=id))
else:
return 'ask/ask_new_post.html', {
'form_error': form.errors,
'q': post,
'nodes': post.get_descendants().select_related(),
'tags': tag_list(),
'form': form_class(),
}
else:
form = form_class()
return 'ask/ask_new_post.html', {'form': form}
@login_required()
def comment_post(request, id):
form_class = CommentForm
post = Post.objects.select_related().get(id=id)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
comment = Post.objects.create(
author=request.user,
content=form.cleaned_data['content'],
title=post.title + '\'s comment by ' + request.user.username,
parent=post,
creation_date=timezone.now(),
lastedit_date=timezone.now(),
lastedit_user=request.user,
type=3
)
tags = post.tags.all()
for tag in tags:
comment.tags.add(tag)
comment.save()
return HttpResponseRedirect(app_reverse("ask-detail", 'ask.urls'))
else:
return 'ask/ask_detail.html', {'form_errors': form.errors}
@login_required()
def ask_question(request):
form_class = QuestionForm
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
question = Post.objects.create(
author=request.user,
content=form.cleaned_data['content'],
title=form.cleaned_data['title'],
creation_date=timezone.now(),
lastedit_date=timezone.now(),
lastedit_user=request.user,
type=1
)
tags = form.cleaned_data['tags'].replace(u',', ',').replace(' ', '').split(',')
for tag in tags:
question.tags.add(tag.replace(' ', ''))
question.save()
return HttpResponseRedirect(app_reverse("ask-index", 'ask.urls'))
else:
form = QuestionForm()
return 'ask/ask_new_post.html', {'form': form}
'''
def index(request):
""" QA landing page views. """
user = request.user
auth = user.is_authenticated()
# parse the date request
since = request.GET.get('since', DATE_FILTER[0]).lower()
# set the last active tab
sess.set_tab(tab)
# get the numerical value for these posts
post_type = POST_TYPE_MAP.get(tab, tab)
# override the sort order if the content so requires
sort_type = 'creation' if tab == 'recent' else sort_type
# this here needs to be reworked TODO
if tab == "best":
sort_type = "votes"
since = request.GET.get('since', 'this week')
messages.info(request, "Most <b>upvoted</b> active posts of <b>%s!</b>" % since)
elif tab == "bookmarked":
sort_type = "bookmark"
since = request.GET.get('since', 'this month')
messages.info(request, "Most <b>bookmarked</b> active posts of <b>%s!</b>" % since)
# the params object will carry
layout = const.USER_PILL_BAR if auth else const.ANON_PILL_BAR
# wether to show the type of the post
show_type = post_type in ('all', 'recent')
show_search = True
if tab in VALID_PILLS:
tab, pill = "posts", tab
else:
tab, pill = tab, ""
params = html.Params(
tab=tab,
pill=pill,
sort=sort_type,
sort_choices=SORT_CHOICES,
date_filter=DATE_FILTER,
since=since,
layout=layout,
show_type=show_type,
title="Bioinformatics Answers",
show_search=show_search)
# this will fill in the query (q) and the match (m)parameters
params.parse(request)
# returns the object manager that contains all or only visible posts
posts = get_post_manager(request)
# filter posts by type
posts = filter_by_type(request=request, posts=posts, post_type=post_type)
# apply date filtering
posts = filter_by_date(request=request, posts=posts, since=since)
# reduce SQL query count by preselecting data that will be displayed
posts = posts.select_related('author', 'author__profile', 'lastedit_user', 'lastedit_user__profile')
# sticky is not active on recent and all pages
sticky = (tab != 'recent') and (pill not in ('all', "best", "bookmarked"))
# order may change if it is invalid search
posts = apply_sort(request=request, posts=posts, order=sort_type, sticky=sticky)
# get the counts for the session
counts = sess.get_counts(post_type)
page = get_page(request, posts, per_page=const.POSTS_PER_PAGE)
# save the session
sess.save()
# try to set a more informative title
title_map = dict(
questions="Bioinformatics Questions",
unanswered="Unanswered Questions",
tutorials="Bioinformatics Tutorials",
jobs="Bioinformatics Jobs",
videos="Bioinformatics Videos",
news='Bioinformatics News',
tools="Bioinformatics Tools",
recent="Recent bioinformatics posts",
planet="Bioinformatics Planet",
galaxy="Galaxy on Biostar",
bookmarked="Most bookmarked",
)
params.title = title_map.get(pill) or title_map.get(tab, params.title)
return html.template(request, name='gqanda/index.html', page=page, params=params, counts=counts)
''' | unknown | codeparrot/codeparrot-clean | ||
{
"createdBy": "4FFFg0MNRJT0z0nW4uUizDHfHJV2",
"createdDate": 1613031378505,
"data": {
"author": {
"@type": "@builder.io/core:Reference",
"id": "7b0b333bd44b4e91a6f4bf93158cb62b",
"model": "author"
},
"customFonts": [
{
"family": "Allura",
"isUserFont": true
}
],
"image": "https://cdn.builder.io/api/v1/image/assets%2F8f6bae86bfa3487eb1a18f263118c832%2Ff2b8319ddd4642209af3a9a09f408dfd",
"inputs": [],
"intro": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore",
"slug": "second-one",
"title": "aber",
"blocks": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-003e28d053a145cf808c21773addd456",
"component": {
"name": "Core:Section",
"options": {
"maxWidth": 1200
}
},
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-33d0b6c8b55b497db53fe28330606698",
"component": {
"name": "Columns",
"options": {
"columns": [
{
"blocks": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-9aead75dcd994cacaae186940a24b096",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F8f6bae86bfa3487eb1a18f263118c832%2F83d80cd184c84a93bf9ed141114671b1",
"backgroundSize": "cover",
"backgroundPosition": "top",
"lazy": false,
"aspectRatio": 1.1104,
"height": 1300,
"width": 867,
"sizes": "(max-width: 638px) 76vw, 34vw"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden",
"paddingBottom": "0px"
}
}
}
]
},
{
"blocks": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-9e7671596631470a9ba962fb6d53fe91",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"flexShrink": "0",
"position": "relative",
"marginTop": "30px",
"textAlign": "left",
"lineHeight": "25px",
"height": "auto"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-e4df8c2cf25842b6abee292e4f9fd1e8",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"flexShrink": "0",
"position": "relative",
"marginTop": "30px",
"textAlign": "left",
"lineHeight": "25px",
"height": "auto"
},
"small": {
"fontSize": "14px",
"marginTop": "10px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-c28284ce96b047f79801b04c4b7fd60b",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet,</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"flexShrink": "0",
"position": "relative",
"marginTop": "20px",
"textAlign": "left",
"lineHeight": "25px",
"height": "auto"
},
"small": {
"fontSize": "14px",
"marginTop": "10px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-0ea495fde38a41e883e89d9287e6a604",
"component": {
"name": "Text",
"options": {
"text": "<p>Jane Smith</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"flexShrink": "0",
"position": "relative",
"marginTop": "20px",
"textAlign": "left",
"lineHeight": "25px",
"height": "auto",
"fontFamily": "Allura, sans-serif",
"fontWeight": "600",
"fontSize": "28px"
},
"medium": {
"paddingBottom": "0px"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-fd5fdad1673a4ccfb6b75e013b66502f",
"component": {
"name": "Text",
"options": {
"text": "<p>Co-Founder of store</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"flexShrink": "0",
"position": "relative",
"marginTop": "-1px",
"textAlign": "left",
"lineHeight": "25px",
"height": "auto"
},
"medium": {
"paddingBottom": "20px"
},
"small": {
"fontSize": "14px"
}
}
}
]
}
],
"space": 34,
"stackColumnsAt": "tablet"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "80%",
"marginLeft": "auto",
"marginRight": "auto"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-58f738032526435288296afb482b930b",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-16978c305ac34c5e84353eca5778904f",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-2509355e0a8a4b68861590c796b31701",
"component": {
"name": "Text",
"options": {
"text": "<p>Our approach</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontWeight": "600",
"fontSize": "28px",
"color": "rgba(2, 75, 194, 1)"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"height": "auto",
"paddingBottom": "30px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "30px",
"height": "auto",
"width": "100vw",
"marginLeft": "calc(50% - 50vw)",
"backgroundColor": "rgba(242, 246, 252, 1)",
"paddingBottom": "30px",
"paddingTop": "25px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-514ba8f61b5948e58c0608bf1c8b2b62",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-4e53d65a8c3d4afc8b9d9ec077a00feb",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-9ddf5078aecc4d03bf5cacc6581bf9ef",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F042a6dc74b764273be4758ddb3cf3f31?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-50f46cd9d5d146e58577f499feee20a0",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-3df2858a55bd44dcb795bb412045747b",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-6d6f3f3d45b04df6a71a7d65c7b95d61",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"medium": {
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-21e217f6344a4846bdcb2e9115ef7071",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-093537281eeb474b9840d11ae59128e9",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F75cc3d48325a4b48aeaf1e3650bc12b3?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-a6ef7f24494443e5bbb3c9781112149d",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-43772cda114940d9bdd5ed075c2668ad",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-eed1a02358bc4acfb494dbcca9855dfc",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"medium": {
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-5f72a8af06ce48699f73db3b7264b2c1",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-a090ffb2f1de4c698430f0f9064afe1a",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fea7f818a036f4d8c83a8b24b2cae9f15?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-a9be61d17d7141539031b387eaa442e8",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-28f967a79a434006a31c81bcd1797c68",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-d86dfa71dc8540b2909a91ea8ac6b4b4",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"medium": {
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-eed3f9e5371d43408fb9738a206f73ba",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-476cb53479224af1898ecceadf3a587a",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fa6b91fce9c7943399d6ba504b560e1eb?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-c9563979146c4c229cf5f8788e63e10d",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-afa27b9b60fd4c0880064f476712fa2d",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-d7435c0c8eeb44bfa070cdc4999b2b13",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>\n"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-9e246b64553c40ed8bb49a619056a4d8",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-2c6e3c65976f4e95bbd777542fd5caca",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2F3b73348d1e934e51b29340e41acae8a1?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-7a6999eea6404db28bdbae7e4b433bc3",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-53c35499a0694e939e8b794e103039d2",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-da1cafe52692406aa46592cc0d98ba4d",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-d529c10768684a539019c0d4faf79ab6",
"children": [
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-a13520343f3944aab3f070a76ce648d7",
"component": {
"name": "Image",
"options": {
"image": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=598",
"backgroundSize": "cover",
"backgroundPosition": "center",
"lazy": true,
"aspectRatio": 0.6213,
"sizes": "(max-width: 998px) 31vw, 28vw",
"height": 1300,
"width": 868,
"srcset": "https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=100 100w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=200 200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=400 400w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=800 800w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=1200 1200w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=1600 1600w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=2000 2000w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=598 598w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=307 307w, https://cdn.builder.io/api/v1/image/assets%2F89d6bbb44070475d9580fd22f21ef8f1%2Fbf5de7cba2434d4980bba0b4a293420e?width=384 384w"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"width": "100%",
"minHeight": "20px",
"minWidth": "20px",
"overflow": "hidden"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-e7fac6c2cd8946e4821bd4567a20ac04",
"component": {
"name": "Text",
"options": {
"text": "<p>Collection name</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"fontSize": "18px",
"fontWeight": "600"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-0c7430abba114fdc923e17c8812c2d7f",
"component": {
"name": "Text",
"options": {
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center"
},
"small": {
"fontSize": "14px"
}
}
},
{
"@type": "@builder.io/sdk:Element",
"@version": 2,
"id": "builder-2d2f4bed888240d2a79200b02ff223b2",
"component": {
"name": "Text",
"options": {
"text": "<p>Shop this collection →</p>"
}
},
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "5px",
"lineHeight": "normal",
"height": "auto",
"textAlign": "center",
"color": "rgba(2, 75, 194, 1)"
},
"small": {
"fontSize": "14px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"height": "auto",
"width": "32%",
"paddingBottom": "30px",
"marginLeft": "10px"
},
"medium": {
"width": "45%"
},
"small": {
"width": "100%",
"marginLeft": "0px"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "row",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "20px",
"width": "100%",
"paddingBottom": "30px",
"flexWrap": "wrap",
"justifyContent": "center"
}
}
}
],
"responsiveStyles": {
"large": {
"display": "flex",
"flexDirection": "column",
"alignItems": "stretch",
"position": "relative",
"flexShrink": "0",
"boxSizing": "border-box",
"marginTop": "0px",
"paddingLeft": "20px",
"paddingRight": "20px",
"paddingTop": "50px",
"paddingBottom": "50px",
"width": "100vw",
"marginLeft": "calc(50% - 50vw)"
}
}
},
{
"id": "builder-pixel-o7ofge7v1he",
"@type": "@builder.io/sdk:Element",
"tagName": "img",
"properties": {
"src": "https://cdn.builder.io/api/v1/pixel?apiKey=8f6bae86bfa3487eb1a18f263118c832",
"role": "presentation",
"width": "0",
"height": "0"
},
"responsiveStyles": {
"large": {
"height": "0",
"width": "0",
"display": "inline-block",
"opacity": "0",
"overflow": "hidden",
"pointerEvents": "none"
}
}
}
],
"state": {
"deviceSize": "large",
"location": {
"path": "",
"query": {}
}
}
},
"id": "aa5cde0446204c228a11ea6ff10fff92",
"lastUpdatedBy": "4FFFg0MNRJT0z0nW4uUizDHfHJV2",
"meta": {
"hasLinks": false,
"kind": "component",
"needsHydration": false
},
"modelId": "3f6eda812cf2484088b1451a2150d38f",
"name": "second ",
"published": "published",
"query": [],
"testRatio": 1,
"variations": {},
"lastUpdated": 1613033616315,
"screenshot": "https://cdn.builder.io/api/v1/image/assets%2F8f6bae86bfa3487eb1a18f263118c832%2Fae6b7a78f9994ba2bded4527c22b647f",
"rev": "3mj2dvbqtry",
"@originOrg": "8f6bae86bfa3487eb1a18f263118c832",
"@originContentId": "3ff20a4db1994618bb45ac7c8610300f",
"@originModelId": "7a732bbf5d964e7bbeff4acef2735c8a"
} | json | github | https://github.com/vercel/next.js | examples/cms-builder-io/builder/post/second.json |
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author:
- James Sumners (@jsumners)
version_added: "2.3"
short_description: Manage runit services
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
description:
- Name of the service to manage.
required: yes
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
choices: [ killed, once, reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
type: bool
service_dir:
description:
- directory runsv watches for services
default: /var/service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
default: /etc/sv
'''
EXAMPLES = '''
- name: Start sv dnscache, if not running
runit:
name: dnscache
state: started
- name: Stop sv dnscache, if running
runit:
name: dnscache
state: stopped
- name: Kill sv dnscache, in all cases
runit:
name: dnscache
state: killed
- name: Restart sv dnscache, in all cases
runit:
name: dnscache
state: restarted
- name: Reload sv dnscache, in all cases
runit:
name: dnscache
state: reloaded
- name: Use alternative sv directory location
runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
# full_state *may* contain information about the logger:
# "down: /etc/service/service-without-logger: 1s, normally up\n"
# "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
full_state_no_logger = self.full_state.split("; ")[0]
m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
if m:
self.pid = m.group(1)
m = re.search(r' (\d+)s', full_state_no_logger)
if m:
self.duration = m.group(1)
if re.search(r'^run:', full_state_no_logger):
self.state = 'started'
elif re.search(r'^down:', full_state_no_logger):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
dist=dict(type='str', default='runit'),
service_dir=dict(type='str', default='/var/service'),
service_src=dict(type='str', default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e), exception=traceback.format_exc())
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv, state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.scheduling.concurrent;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.InitializingBean;
/**
* A Spring {@link FactoryBean} that builds and exposes a preconfigured {@link ForkJoinPool}.
*
* @author Juergen Hoeller
* @since 3.1
*/
public class ForkJoinPoolFactoryBean implements FactoryBean<ForkJoinPool>, InitializingBean, DisposableBean {
private boolean commonPool = false;
private int parallelism = Runtime.getRuntime().availableProcessors();
private ForkJoinPool.ForkJoinWorkerThreadFactory threadFactory = ForkJoinPool.defaultForkJoinWorkerThreadFactory;
private Thread.@Nullable UncaughtExceptionHandler uncaughtExceptionHandler;
private boolean asyncMode = false;
private int awaitTerminationSeconds = 0;
private @Nullable ForkJoinPool forkJoinPool;
/**
* Set whether to expose Java's 'common' {@link ForkJoinPool}.
* <p>Default is {@code false} , creating a local {@link ForkJoinPool} instance
* based on the {@link #setParallelism parallelism},
* {@link #setThreadFactory threadFactory},
* {@link #setUncaughtExceptionHandler uncaughtExceptionHandler}, and
* {@link #setAsyncMode asyncMode} properties on this FactoryBean.
* <p><b>NOTE:</b> Setting this flag to {@code true} effectively ignores all other
* properties on this FactoryBean, reusing the shared common JDK {@link ForkJoinPool}
* instead. This is a fine choice but does remove the application's ability
* to customize ForkJoinPool behavior, in particular the use of custom threads.
* @since 3.2
* @see java.util.concurrent.ForkJoinPool#commonPool()
*/
public void setCommonPool(boolean commonPool) {
this.commonPool = commonPool;
}
/**
* Specify the parallelism level. Default is {@link Runtime#availableProcessors()}.
*/
public void setParallelism(int parallelism) {
this.parallelism = parallelism;
}
/**
* Set the factory for creating new ForkJoinWorkerThreads.
* Default is {@link ForkJoinPool#defaultForkJoinWorkerThreadFactory}.
*/
public void setThreadFactory(ForkJoinPool.ForkJoinWorkerThreadFactory threadFactory) {
this.threadFactory = threadFactory;
}
/**
* Set the handler for internal worker threads that terminate due to unrecoverable errors
* encountered while executing tasks. Default is none.
*/
public void setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
this.uncaughtExceptionHandler = uncaughtExceptionHandler;
}
/**
* Specify whether to establish a local first-in-first-out scheduling mode for forked tasks
* that are never joined. This mode (asyncMode = {@code true}) may be more appropriate
* than the default locally stack-based mode in applications in which worker threads only
* process event-style asynchronous tasks. Default is {@code false}.
*/
public void setAsyncMode(boolean asyncMode) {
this.asyncMode = asyncMode;
}
/**
* Set the maximum number of seconds that this ForkJoinPool is supposed to block
* on shutdown in order to wait for remaining tasks to complete their execution
* before the rest of the container continues to shut down. This is particularly
* useful if your remaining tasks are likely to need access to other resources
* that are also managed by the container.
* <p>By default, this ForkJoinPool won't wait for the termination of tasks at all.
* It will continue to fully execute all ongoing tasks as well as all remaining
* tasks in the queue, in parallel to the rest of the container shutting down.
* In contrast, if you specify an await-termination period using this property,
* this executor will wait for the given time (max) for the termination of tasks.
* <p>Note that this feature works for the {@link #setCommonPool "commonPool"}
* mode as well. The underlying ForkJoinPool won't actually terminate in that
* case but will wait for all tasks to terminate.
* @see java.util.concurrent.ForkJoinPool#shutdown()
* @see java.util.concurrent.ForkJoinPool#awaitTermination
*/
public void setAwaitTerminationSeconds(int awaitTerminationSeconds) {
this.awaitTerminationSeconds = awaitTerminationSeconds;
}
@Override
public void afterPropertiesSet() {
this.forkJoinPool = (this.commonPool ? ForkJoinPool.commonPool() :
new ForkJoinPool(this.parallelism, this.threadFactory, this.uncaughtExceptionHandler, this.asyncMode));
}
@Override
public @Nullable ForkJoinPool getObject() {
return this.forkJoinPool;
}
@Override
public Class<?> getObjectType() {
return ForkJoinPool.class;
}
@Override
public boolean isSingleton() {
return true;
}
@Override
public void destroy() {
if (this.forkJoinPool != null) {
// Ignored for the common pool.
this.forkJoinPool.shutdown();
// Wait for all tasks to terminate - works for the common pool as well.
if (this.awaitTerminationSeconds > 0) {
try {
this.forkJoinPool.awaitTermination(this.awaitTerminationSeconds, TimeUnit.SECONDS);
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/scheduling/concurrent/ForkJoinPoolFactoryBean.java |
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.CSRSigningConfiguration)(nil), (*config.CSRSigningConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(a.(*configv1alpha1.CSRSigningConfiguration), b.(*config.CSRSigningConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CSRSigningConfiguration)(nil), (*configv1alpha1.CSRSigningConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(a.(*config.CSRSigningConfiguration), b.(*configv1alpha1.CSRSigningConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.CSRSigningControllerConfiguration)(nil), (*configv1alpha1.CSRSigningControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(a.(*config.CSRSigningControllerConfiguration), b.(*configv1alpha1.CSRSigningControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.CSRSigningControllerConfiguration)(nil), (*config.CSRSigningControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(a.(*configv1alpha1.CSRSigningControllerConfiguration), b.(*config.CSRSigningControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in *configv1alpha1.CSRSigningConfiguration, out *config.CSRSigningConfiguration, s conversion.Scope) error {
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in *configv1alpha1.CSRSigningConfiguration, out *config.CSRSigningConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in, out, s)
}
func autoConvert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in *config.CSRSigningConfiguration, out *configv1alpha1.CSRSigningConfiguration, s conversion.Scope) error {
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration is an autogenerated conversion function.
func Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in *config.CSRSigningConfiguration, out *configv1alpha1.CSRSigningConfiguration, s conversion.Scope) error {
return autoConvert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in, out, s)
}
func autoConvert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(in *configv1alpha1.CSRSigningControllerConfiguration, out *config.CSRSigningControllerConfiguration, s conversion.Scope) error {
out.ClusterSigningCertFile = in.ClusterSigningCertFile
out.ClusterSigningKeyFile = in.ClusterSigningKeyFile
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeletServingSignerConfiguration, &out.KubeletServingSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeletClientSignerConfiguration, &out.KubeletClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeAPIServerClientSignerConfiguration, &out.KubeAPIServerClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.LegacyUnknownSignerConfiguration, &out.LegacyUnknownSignerConfiguration, s); err != nil {
return err
}
out.ClusterSigningDuration = in.ClusterSigningDuration
return nil
}
func autoConvert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(in *config.CSRSigningControllerConfiguration, out *configv1alpha1.CSRSigningControllerConfiguration, s conversion.Scope) error {
out.ClusterSigningCertFile = in.ClusterSigningCertFile
out.ClusterSigningKeyFile = in.ClusterSigningKeyFile
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeletServingSignerConfiguration, &out.KubeletServingSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeletClientSignerConfiguration, &out.KubeletClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeAPIServerClientSignerConfiguration, &out.KubeAPIServerClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.LegacyUnknownSignerConfiguration, &out.LegacyUnknownSignerConfiguration, s); err != nil {
return err
}
out.ClusterSigningDuration = in.ClusterSigningDuration
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/certificates/signer/config/v1alpha1/zz_generated.conversion.go |
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from django.contrib.sitemaps.views import sitemap
from . import views
from .sitemaps import * # pylint: disable=wildcard-import, unused-wildcard-import
sitemaps = {
'play': PlaySitemap('play'),
'study': StudySitemap('study'),
'tools': ToolSitemap('tools'),
'read': ReadSitemap('read'),
'static': StaticViewSitemap('other'),
}
urlpatterns = [
path('api/', include('study_buffalo.api.urls', namespace='api')),
path('play/', include('play.urls')),
path('study/', include('study.urls')),
path('read/', include('read.urls')),
path('tools/', views.tools_index, name='tools_index'),
path('rdrhc-calendar/', include('rdrhc_calendar.urls', namespace='rdrhc_calendar')),
path('tools/alberta-adaptations/', views.alberta_adaptations_index, name='alberta_adaptations_index'),
path('tools/dictionary/', include('dictionary.urls')),
path('tools/dpd/', include('hc_dpd.urls')),
path('tools/drug-price-calculator/', include('drug_price_calculator.urls')),
path('tools/substitutions/', include('substitutions.urls')),
path('tools/vancomycin-calculator/', include('vancomycin_calculator.urls')),
path('users/', include('users.urls', namespace='users')),
path('design/', views.design_index, name='design_index'),
path('privacy-policy/', views.privacy_policy, name='privacy_policy'),
path('robot-policy/', views.robot_policy, name='robot_policy'),
path('contact/', views.contact, name='contact'),
path(
'sitemap/',
views.custom_sitemap,
{'sitemaps': sitemaps, 'template_name': 'sitemap_template.html', 'content_type': None},
name='sitemap'
),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
path('unsubscribe/', views.unsubscribe, name='unsubscribe'),
path('unsubscribe/complete/', views.unsubscribe_complete, name='unsubscribe_complete'),
path('accounts/profile/', views.account_profile, name='account_profile'),
path('accounts/', include('allauth.urls')),
path('robots.txt', TemplateView.as_view(template_name='robots.txt'), name='robots.txt'),
path('', views.Index.as_view(), name='index'),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# TEMPORARY URLS & REDIRECTS
urlpatterns += [
path('cabs2018/', TemplateView.as_view(template_name='pages/cabs2018.html'), name='cabs2018'),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
'400/',
default_views.bad_request,
kwargs={'exception': Exception('Bad Request!')},
),
path(
'403/',
default_views.permission_denied,
kwargs={'exception': Exception('Permission Denied')},
),
path(
'404/',
default_views.page_not_found,
kwargs={'exception': Exception('Page not Found')},
),
path('500/', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))] + urlpatterns | unknown | codeparrot/codeparrot-clean | ||
use {
regex_automata::meta::Regex,
regex_syntax::hir::{
self, Hir,
literal::{Literal, Seq},
},
};
use crate::{config::ConfiguredHIR, error::Error};
/// A type that encapsulates "inner" literal extractiong from a regex.
///
/// It uses a huge pile of heuristics to try to pluck out literals from a regex
/// that are in turn used to build a simpler regex that is more amenable to
/// optimization.
///
/// The main idea underlying the validity of this technique is the fact
/// that ripgrep searches individuals lines and not across lines. (Unless
/// -U/--multiline is enabled.) Namely, we can pluck literals out of the regex,
/// search for them, find the bounds of the line in which that literal occurs
/// and then run the original regex on only that line. This overall works
/// really really well in throughput oriented searches because it potentially
/// allows ripgrep to spend a lot more time in a fast vectorized routine for
/// finding literals as opposed to the (much) slower regex engine.
///
/// This optimization was far more important in the old days, but since then,
/// Rust's regex engine has actually grown its own (albeit limited) support for
/// inner literal optimizations. So this technique doesn't apply as much as it
/// used to.
///
/// A good example of a regex where this particular extractor helps is
/// `\s+(Sherlock|[A-Z]atso[a-z]|Moriarty)\s+`. The `[A-Z]` before the `atso`
/// in particular is what inhibits the regex engine's own inner literal
/// optimizations from kicking in. This particular regex also did not have any
/// inner literals extracted in the old implementation (ripgrep <=13). So this
/// particular implementation represents a strict improvement from both the old
/// implementation and from the regex engine's own optimizations. (Which could
/// in theory be improved still.)
#[derive(Clone, Debug)]
pub(crate) struct InnerLiterals {
seq: Seq,
}
impl InnerLiterals {
/// Create a set of inner literals from the given HIR expression.
///
/// If no line terminator was configured, then this always declines to
/// extract literals because the inner literal optimization may not be
/// valid.
///
/// Note that this requires the actual regex that will be used for a search
/// because it will query some state about the compiled regex. That state
/// may influence inner literal extraction.
pub(crate) fn new(chir: &ConfiguredHIR, re: &Regex) -> InnerLiterals {
// If there's no line terminator, then the inner literal optimization
// at this level is not valid.
if chir.config().line_terminator.is_none() {
log::trace!(
"skipping inner literal extraction, \
no line terminator is set"
);
return InnerLiterals::none();
}
// If we believe the regex is already accelerated, then just let
// the regex engine do its thing. We'll skip the inner literal
// optimization.
//
// ... but only if the regex doesn't have any Unicode word boundaries.
// If it does, there's enough of a chance of the regex engine falling
// back to a slower engine that it's worth trying our own inner literal
// optimization.
if re.is_accelerated() {
if !chir.hir().properties().look_set().contains_word_unicode() {
log::trace!(
"skipping inner literal extraction, \
existing regex is believed to already be accelerated",
);
return InnerLiterals::none();
}
}
// In this case, we pretty much know that the regex engine will handle
// it as best as possible, even if it isn't reported as accelerated.
if chir.hir().properties().is_alternation_literal() {
log::trace!(
"skipping inner literal extraction, \
found alternation of literals, deferring to regex engine",
);
return InnerLiterals::none();
}
let seq = Extractor::new().extract_untagged(chir.hir());
InnerLiterals { seq }
}
/// Returns a infinite set of inner literals, such that it can never
/// produce a matcher.
pub(crate) fn none() -> InnerLiterals {
InnerLiterals { seq: Seq::infinite() }
}
/// If it is deemed advantageous to do so (via various suspicious
/// heuristics), this will return a single regular expression pattern that
/// matches a subset of the language matched by the regular expression that
/// generated these literal sets. The idea here is that the pattern
/// returned by this method is much cheaper to search for. i.e., It is
/// usually a single literal or an alternation of literals.
pub(crate) fn one_regex(&self) -> Result<Option<Regex>, Error> {
let Some(lits) = self.seq.literals() else { return Ok(None) };
if lits.is_empty() {
return Ok(None);
}
let mut alts = vec![];
for lit in lits.iter() {
alts.push(Hir::literal(lit.as_bytes()));
}
let hir = Hir::alternation(alts);
log::debug!("extracted fast line regex: {:?}", hir.to_string());
let re = Regex::builder()
.configure(Regex::config().utf8_empty(false))
.build_from_hir(&hir)
.map_err(Error::regex)?;
Ok(Some(re))
}
}
/// An inner literal extractor.
///
/// This is a somewhat stripped down version of the extractor from
/// regex-syntax. The main difference is that we try to identify a "best" set
/// of required literals while traversing the HIR.
#[derive(Debug)]
struct Extractor {
limit_class: usize,
limit_repeat: usize,
limit_literal_len: usize,
limit_total: usize,
}
impl Extractor {
/// Create a new inner literal extractor with a default configuration.
fn new() -> Extractor {
Extractor {
limit_class: 10,
limit_repeat: 10,
limit_literal_len: 100,
limit_total: 64,
}
}
/// Execute the extractor at the top-level and return an untagged sequence
/// of literals.
fn extract_untagged(&self, hir: &Hir) -> Seq {
let mut seq = self.extract(hir);
log::trace!("extracted inner literals: {:?}", seq.seq);
seq.seq.optimize_for_prefix_by_preference();
log::trace!(
"extracted inner literals after optimization: {:?}",
seq.seq
);
if !seq.is_good() {
log::trace!(
"throwing away inner literals because they might be slow"
);
seq.make_infinite();
}
seq.seq
}
/// Execute the extractor and return a sequence of literals.
fn extract(&self, hir: &Hir) -> TSeq {
use regex_syntax::hir::HirKind::*;
match *hir.kind() {
Empty | Look(_) => TSeq::singleton(self::Literal::exact(vec![])),
Literal(hir::Literal(ref bytes)) => {
let mut seq =
TSeq::singleton(self::Literal::exact(bytes.to_vec()));
self.enforce_literal_len(&mut seq);
seq
}
Class(hir::Class::Unicode(ref cls)) => {
self.extract_class_unicode(cls)
}
Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls),
Repetition(ref rep) => self.extract_repetition(rep),
Capture(hir::Capture { ref sub, .. }) => self.extract(sub),
Concat(ref hirs) => self.extract_concat(hirs.iter()),
Alternation(ref hirs) => self.extract_alternation(hirs.iter()),
}
}
/// Extract a sequence from the given concatenation. Sequences from each of
/// the child HIR expressions are combined via cross product.
///
/// This short circuits once the cross product turns into a sequence
/// containing only inexact literals.
fn extract_concat<'a, I: Iterator<Item = &'a Hir>>(&self, it: I) -> TSeq {
let mut seq = TSeq::singleton(self::Literal::exact(vec![]));
let mut prev: Option<TSeq> = None;
for hir in it {
// If every element in the sequence is inexact, then a cross
// product will always be a no-op. Thus, there is nothing else we
// can add to it and can quit early. Note that this also includes
// infinite sequences.
if seq.is_inexact() {
// If a concatenation has an empty sequence anywhere, then
// it's impossible for the concatenantion to ever match. So we
// can just quit now.
if seq.is_empty() {
return seq;
}
if seq.is_really_good() {
return seq;
}
prev = Some(match prev {
None => seq,
Some(prev) => prev.choose(seq),
});
seq = TSeq::singleton(self::Literal::exact(vec![]));
seq.make_not_prefix();
}
// Note that 'cross' also dispatches based on whether we're
// extracting prefixes or suffixes.
seq = self.cross(seq, self.extract(hir));
}
if let Some(prev) = prev { prev.choose(seq) } else { seq }
}
/// Extract a sequence from the given alternation.
///
/// This short circuits once the union turns into an infinite sequence.
fn extract_alternation<'a, I: Iterator<Item = &'a Hir>>(
&self,
it: I,
) -> TSeq {
let mut seq = TSeq::empty();
for hir in it {
// Once our 'seq' is infinite, every subsequent union
// operation on it will itself always result in an
// infinite sequence. Thus, it can never change and we can
// short-circuit.
if !seq.is_finite() {
break;
}
seq = self.union(seq, &mut self.extract(hir));
}
seq
}
/// Extract a sequence of literals from the given repetition. We do our
/// best, Some examples:
///
/// 'a*' => [inexact(a), exact("")]
/// 'a*?' => [exact(""), inexact(a)]
/// 'a+' => [inexact(a)]
/// 'a{3}' => [exact(aaa)]
/// 'a{3,5} => [inexact(aaa)]
///
/// The key here really is making sure we get the 'inexact' vs 'exact'
/// attributes correct on each of the literals we add. For example, the
/// fact that 'a*' gives us an inexact 'a' and an exact empty string means
/// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)]
/// literals being extracted, which might actually be a better prefilter
/// than just 'a'.
fn extract_repetition(&self, rep: &hir::Repetition) -> TSeq {
let mut subseq = self.extract(&rep.sub);
match *rep {
hir::Repetition { min: 0, max, greedy, .. } => {
// When 'max=1', we can retain exactness, since 'a?' is
// equivalent to 'a|'. Similarly below, 'a??' is equivalent to
// '|a'.
if max != Some(1) {
subseq.make_inexact();
}
let mut empty = TSeq::singleton(Literal::exact(vec![]));
if !greedy {
std::mem::swap(&mut subseq, &mut empty);
}
self.union(subseq, &mut empty)
}
hir::Repetition { min, max: Some(max), .. } if min == max => {
assert!(min > 0); // handled above
let limit =
u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
let mut seq = TSeq::singleton(Literal::exact(vec![]));
for _ in 0..std::cmp::min(min, limit) {
if seq.is_inexact() {
break;
}
seq = self.cross(seq, subseq.clone());
}
if usize::try_from(min).is_err() || min > limit {
seq.make_inexact();
}
seq
}
hir::Repetition { min, max: Some(max), .. } if min < max => {
assert!(min > 0); // handled above
let limit =
u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
let mut seq = TSeq::singleton(Literal::exact(vec![]));
for _ in 0..std::cmp::min(min, limit) {
if seq.is_inexact() {
break;
}
seq = self.cross(seq, subseq.clone());
}
seq.make_inexact();
seq
}
hir::Repetition { .. } => {
subseq.make_inexact();
subseq
}
}
}
/// Convert the given Unicode class into a sequence of literals if the
/// class is small enough. If the class is too big, return an infinite
/// sequence.
fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> TSeq {
if self.class_over_limit_unicode(cls) {
return TSeq::infinite();
}
let mut seq = TSeq::empty();
for r in cls.iter() {
for ch in r.start()..=r.end() {
seq.push(Literal::from(ch));
}
}
self.enforce_literal_len(&mut seq);
seq
}
/// Convert the given byte class into a sequence of literals if the class
/// is small enough. If the class is too big, return an infinite sequence.
fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> TSeq {
if self.class_over_limit_bytes(cls) {
return TSeq::infinite();
}
let mut seq = TSeq::empty();
for r in cls.iter() {
for b in r.start()..=r.end() {
seq.push(Literal::from(b));
}
}
self.enforce_literal_len(&mut seq);
seq
}
/// Returns true if the given Unicode class exceeds the configured limits
/// on this extractor.
fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool {
let mut count = 0;
for r in cls.iter() {
if count > self.limit_class {
return true;
}
count += r.len();
}
count > self.limit_class
}
/// Returns true if the given byte class exceeds the configured limits on
/// this extractor.
fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool {
let mut count = 0;
for r in cls.iter() {
if count > self.limit_class {
return true;
}
count += r.len();
}
count > self.limit_class
}
/// Compute the cross product of the two sequences if the result would be
/// within configured limits. Otherwise, make `seq2` infinite and cross the
/// infinite sequence with `seq1`.
fn cross(&self, mut seq1: TSeq, mut seq2: TSeq) -> TSeq {
if !seq2.prefix {
return seq1.choose(seq2);
}
if seq1
.max_cross_len(&seq2)
.map_or(false, |len| len > self.limit_total)
{
seq2.make_infinite();
}
seq1.cross_forward(&mut seq2);
assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
self.enforce_literal_len(&mut seq1);
seq1
}
/// Union the two sequences if the result would be within configured
/// limits. Otherwise, make `seq2` infinite and union the infinite sequence
/// with `seq1`.
fn union(&self, mut seq1: TSeq, seq2: &mut TSeq) -> TSeq {
if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total)
{
// We try to trim our literal sequences to see if we can make
// room for more literals. The idea is that we'd rather trim down
// literals already in our sequence if it means we can add a few
// more and retain a finite sequence. Otherwise, we'll union with
// an infinite sequence and that infects everything and effectively
// stops literal extraction in its tracks.
//
// We do we keep 4 bytes here? Well, it's a bit of an abstraction
// leakage. Downstream, the literals may wind up getting fed to
// the Teddy algorithm, which supports searching literals up to
// length 4. So that's why we pick that number here. Arguably this
// should be a tuneable parameter, but it seems a little tricky to
// describe. And I'm still unsure if this is the right way to go
// about culling literal sequences.
seq1.keep_first_bytes(4);
seq2.keep_first_bytes(4);
seq1.dedup();
seq2.dedup();
if seq1
.max_union_len(seq2)
.map_or(false, |len| len > self.limit_total)
{
seq2.make_infinite();
}
}
seq1.union(seq2);
assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
seq1.prefix = seq1.prefix && seq2.prefix;
seq1
}
/// Applies the literal length limit to the given sequence. If none of the
/// literals in the sequence exceed the limit, then this is a no-op.
fn enforce_literal_len(&self, seq: &mut TSeq) {
seq.keep_first_bytes(self.limit_literal_len);
}
}
#[derive(Clone, Debug)]
struct TSeq {
seq: Seq,
prefix: bool,
}
#[allow(dead_code)]
impl TSeq {
fn empty() -> TSeq {
TSeq { seq: Seq::empty(), prefix: true }
}
fn infinite() -> TSeq {
TSeq { seq: Seq::infinite(), prefix: true }
}
fn singleton(lit: Literal) -> TSeq {
TSeq { seq: Seq::singleton(lit), prefix: true }
}
fn new<I, B>(it: I) -> TSeq
where
I: IntoIterator<Item = B>,
B: AsRef<[u8]>,
{
TSeq { seq: Seq::new(it), prefix: true }
}
fn literals(&self) -> Option<&[Literal]> {
self.seq.literals()
}
fn push(&mut self, lit: Literal) {
self.seq.push(lit);
}
fn make_inexact(&mut self) {
self.seq.make_inexact();
}
fn make_infinite(&mut self) {
self.seq.make_infinite();
}
fn cross_forward(&mut self, other: &mut TSeq) {
assert!(other.prefix);
self.seq.cross_forward(&mut other.seq);
}
fn union(&mut self, other: &mut TSeq) {
self.seq.union(&mut other.seq);
}
fn dedup(&mut self) {
self.seq.dedup();
}
fn sort(&mut self) {
self.seq.sort();
}
fn keep_first_bytes(&mut self, len: usize) {
self.seq.keep_first_bytes(len);
}
fn is_finite(&self) -> bool {
self.seq.is_finite()
}
fn is_empty(&self) -> bool {
self.seq.is_empty()
}
fn len(&self) -> Option<usize> {
self.seq.len()
}
fn is_exact(&self) -> bool {
self.seq.is_exact()
}
fn is_inexact(&self) -> bool {
self.seq.is_inexact()
}
fn max_union_len(&self, other: &TSeq) -> Option<usize> {
self.seq.max_union_len(&other.seq)
}
fn max_cross_len(&self, other: &TSeq) -> Option<usize> {
assert!(other.prefix);
self.seq.max_cross_len(&other.seq)
}
fn min_literal_len(&self) -> Option<usize> {
self.seq.min_literal_len()
}
fn max_literal_len(&self) -> Option<usize> {
self.seq.max_literal_len()
}
// Below are methods specific to a TSeq that aren't just forwarding calls
// to a Seq method.
/// Tags this sequence as "not a prefix." When this happens, this sequence
/// can't be crossed as a suffix of another sequence.
fn make_not_prefix(&mut self) {
self.prefix = false;
}
/// Returns true if it's believed that the sequence given is "good" for
/// acceleration. This is useful for determining whether a sequence of
/// literals has any shot of being fast.
fn is_good(&self) -> bool {
if self.has_poisonous_literal() {
return false;
}
let Some(min) = self.min_literal_len() else { return false };
let Some(len) = self.len() else { return false };
// If we have some very short literals, then let's require that our
// sequence is itself very small.
if min <= 1 {
return len <= 3;
}
min >= 2 && len <= 64
}
/// Returns true if it's believed that the sequence given is "really
/// good" for acceleration. This is useful for short circuiting literal
/// extraction.
fn is_really_good(&self) -> bool {
if self.has_poisonous_literal() {
return false;
}
let Some(min) = self.min_literal_len() else { return false };
let Some(len) = self.len() else { return false };
min >= 3 && len <= 8
}
/// Returns true if the given sequence contains a poisonous literal.
fn has_poisonous_literal(&self) -> bool {
let Some(lits) = self.literals() else { return false };
lits.iter().any(is_poisonous)
}
/// Compare the two sequences and return the one that is believed to be
/// best according to a hodge podge of heuristics.
fn choose(self, other: TSeq) -> TSeq {
let (mut seq1, mut seq2) = (self, other);
// Whichever one we pick, by virtue of picking one, we choose
// to not take the other. So we must consider the result inexact.
seq1.make_inexact();
seq2.make_inexact();
if !seq1.is_finite() {
return seq2;
} else if !seq2.is_finite() {
return seq1;
}
if seq1.has_poisonous_literal() {
return seq2;
} else if seq2.has_poisonous_literal() {
return seq1;
}
let Some(min1) = seq1.min_literal_len() else { return seq2 };
let Some(min2) = seq2.min_literal_len() else { return seq1 };
if min1 < min2 {
return seq2;
} else if min2 < min1 {
return seq1;
}
// OK because we know both sequences are finite, otherwise they wouldn't
// have a minimum literal length.
let len1 = seq1.len().unwrap();
let len2 = seq2.len().unwrap();
if len1 < len2 {
return seq2;
} else if len2 < len1 {
return seq1;
}
// We could do extra stuff like looking at a background frequency
// distribution of bytes and picking the one that looks more rare, but for
// now we just pick one.
seq1
}
}
impl FromIterator<Literal> for TSeq {
fn from_iter<T: IntoIterator<Item = Literal>>(it: T) -> TSeq {
TSeq { seq: Seq::from_iter(it), prefix: true }
}
}
/// Returns true if it is believe that this literal is likely to match very
/// frequently, and is thus not a good candidate for a prefilter.
fn is_poisonous(lit: &Literal) -> bool {
use regex_syntax::hir::literal::rank;
lit.is_empty() || (lit.len() == 1 && rank(lit.as_bytes()[0]) >= 250)
}
#[cfg(test)]
mod tests {
use super::*;
fn e(pattern: impl AsRef<str>) -> Seq {
let pattern = pattern.as_ref();
let hir = regex_syntax::ParserBuilder::new()
.utf8(false)
.build()
.parse(pattern)
.unwrap();
Extractor::new().extract_untagged(&hir)
}
#[allow(non_snake_case)]
fn E(x: &str) -> Literal {
Literal::exact(x.as_bytes())
}
#[allow(non_snake_case)]
fn I(x: &str) -> Literal {
Literal::inexact(x.as_bytes())
}
fn seq<I: IntoIterator<Item = Literal>>(it: I) -> Seq {
Seq::from_iter(it)
}
fn inexact<I>(it: I) -> Seq
where
I: IntoIterator<Item = Literal>,
{
Seq::from_iter(it)
}
fn exact<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> Seq {
Seq::new(it)
}
#[test]
fn various() {
assert_eq!(e(r"foo"), seq([E("foo")]));
assert_eq!(e(r"[a-z]foo[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z](foo)(bar)[a-z]"), seq([I("foobar")]));
assert_eq!(e(r"[a-z]([a-z]foo)(bar[a-z])[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z]([a-z]foo)([a-z]foo)[a-z]"), seq([I("foo")]));
assert_eq!(e(r"(\d{1,3}\.){3}\d{1,3}"), seq([I(".")]));
assert_eq!(e(r"[a-z]([a-z]foo){3}[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z](foo[a-z]){3}[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z]([a-z]foo[a-z]){3}[a-z]"), seq([I("foo")]));
assert_eq!(
e(r"[a-z]([a-z]foo){3}(bar[a-z]){3}[a-z]"),
seq([I("foo")])
);
}
// These test that some of our suspicious heuristics try to "pick better
// literals."
#[test]
fn heuristics() {
// Here, the first literals we stumble across are {ab, cd, ef}. But we
// keep going and our heuristics decide that {hiya} is better. (And it
// should be, since it's just one literal and it's longer.)
assert_eq!(e(r"[a-z]+(ab|cd|ef)[a-z]+hiya[a-z]+"), seq([I("hiya")]));
// But here, the first alternation becomes "good enough" that literal
// extraction short circuits early. {hiya} is probably still a better
// choice here, but {abc, def, ghi} is not bad.
assert_eq!(
e(r"[a-z]+(abc|def|ghi)[a-z]+hiya[a-z]+"),
seq([I("abc"), I("def"), I("ghi")])
);
}
#[test]
fn literal() {
assert_eq!(exact(["a"]), e("a"));
assert_eq!(exact(["aaaaa"]), e("aaaaa"));
assert_eq!(exact(["A", "a"]), e("(?i-u)a"));
assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab"));
assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c"));
assert_eq!(Seq::infinite(), e(r"(?-u:\xFF)"));
assert_eq!(exact([b"Z"]), e(r"Z"));
assert_eq!(exact(["☃"]), e("☃"));
assert_eq!(exact(["☃"]), e("(?i)☃"));
assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃"));
assert_eq!(exact(["Δ"]), e("Δ"));
assert_eq!(exact(["δ"]), e("δ"));
assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ"));
assert_eq!(exact(["Δ", "δ"]), e("(?i)δ"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ"));
let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ";
assert_eq!(exact([letters]), e(letters));
}
#[test]
fn class() {
assert_eq!(exact(["a", "b", "c"]), e("[abc]"));
assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b"));
assert_eq!(exact(["δ", "ε"]), e("[εδ]"));
assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]"));
}
#[test]
fn look() {
assert_eq!(exact(["ab"]), e(r"a\Ab"));
assert_eq!(exact(["ab"]), e(r"a\zb"));
assert_eq!(exact(["ab"]), e(r"a(?m:^)b"));
assert_eq!(exact(["ab"]), e(r"a(?m:$)b"));
assert_eq!(exact(["ab"]), e(r"a\bb"));
assert_eq!(exact(["ab"]), e(r"a\Bb"));
assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b"));
assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b"));
assert_eq!(exact(["ab"]), e(r"^ab"));
assert_eq!(exact(["ab"]), e(r"$ab"));
assert_eq!(exact(["ab"]), e(r"(?m:^)ab"));
assert_eq!(exact(["ab"]), e(r"(?m:$)ab"));
assert_eq!(exact(["ab"]), e(r"\bab"));
assert_eq!(exact(["ab"]), e(r"\Bab"));
assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab"));
assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab"));
assert_eq!(exact(["ab"]), e(r"ab^"));
assert_eq!(exact(["ab"]), e(r"ab$"));
assert_eq!(exact(["ab"]), e(r"ab(?m:^)"));
assert_eq!(exact(["ab"]), e(r"ab(?m:$)"));
assert_eq!(exact(["ab"]), e(r"ab\b"));
assert_eq!(exact(["ab"]), e(r"ab\B"));
assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)"));
assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)"));
assert_eq!(seq([I("aZ"), E("ab")]), e(r"^aZ*b"));
}
#[test]
fn repetition() {
assert_eq!(Seq::infinite(), e(r"a?"));
assert_eq!(Seq::infinite(), e(r"a??"));
assert_eq!(Seq::infinite(), e(r"a*"));
assert_eq!(Seq::infinite(), e(r"a*?"));
assert_eq!(inexact([I("a")]), e(r"a+"));
assert_eq!(inexact([I("a")]), e(r"(a+)+"));
assert_eq!(exact(["ab"]), e(r"aZ{0}b"));
assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b"));
assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b"));
assert_eq!(inexact([I("aZ"), E("ab")]), e(r"aZ*b"));
assert_eq!(inexact([E("ab"), I("aZ")]), e(r"aZ*?b"));
assert_eq!(inexact([I("aZ")]), e(r"aZ+b"));
assert_eq!(inexact([I("aZ")]), e(r"aZ+?b"));
assert_eq!(exact(["aZZb"]), e(r"aZ{2}b"));
assert_eq!(inexact([I("aZZ")]), e(r"aZ{2,3}b"));
assert_eq!(Seq::infinite(), e(r"(abc)?"));
assert_eq!(Seq::infinite(), e(r"(abc)??"));
assert_eq!(inexact([I("a"), E("b")]), e(r"a*b"));
assert_eq!(inexact([E("b"), I("a")]), e(r"a*?b"));
assert_eq!(inexact([I("ab")]), e(r"ab+"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"a*b*c"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a+)?(b+)?c"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a+|)(b+|)c"));
// A few more similarish but not identical regexes. These may have a
// similar problem as above.
assert_eq!(Seq::infinite(), e(r"a*b*c*"));
assert_eq!(inexact([I("a"), I("b"), I("c")]), e(r"a*b*c+"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+c"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+c*"));
assert_eq!(inexact([I("ab"), E("a")]), e(r"ab*"));
assert_eq!(inexact([I("ab"), E("ac")]), e(r"ab*c"));
assert_eq!(inexact([I("ab")]), e(r"ab+"));
assert_eq!(inexact([I("ab")]), e(r"ab+c"));
assert_eq!(inexact([I("z"), E("azb")]), e(r"z*azb"));
let expected =
exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]);
assert_eq!(expected, e(r"[ab]{3}"));
let expected = inexact([
I("aaa"),
I("aab"),
I("aba"),
I("abb"),
I("baa"),
I("bab"),
I("bba"),
I("bbb"),
]);
assert_eq!(expected, e(r"[ab]{3,4}"));
}
#[test]
fn concat() {
assert_eq!(exact(["abcxyz"]), e(r"abc()xyz"));
assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)"));
assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz"));
assert_eq!(Seq::infinite(), e(r"abc[a&&b]xyz"));
assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz"));
}
#[test]
fn alternation() {
assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz"));
assert_eq!(
inexact([E("abc"), I("mZ"), E("mo"), E("xyz")]),
e(r"abc|mZ*o|xyz")
);
assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz"));
assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz"));
assert_eq!(exact(["aaa"]), e(r"(?:|aa)aaa"));
assert_eq!(Seq::infinite(), e(r"(?:|aa)(?:aaa)*"));
assert_eq!(Seq::infinite(), e(r"(?:|aa)(?:aaa)*?"));
assert_eq!(Seq::infinite(), e(r"a|b*"));
assert_eq!(inexact([E("a"), I("b")]), e(r"a|b+"));
assert_eq!(inexact([I("a"), E("b"), E("c")]), e(r"a*b|c"));
assert_eq!(Seq::infinite(), e(r"a|(?:b|c*)"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a|b)*c|(a|ab)*c"));
assert_eq!(
exact(["abef", "abgh", "cdef", "cdgh"]),
e(r"(ab|cd)(ef|gh)")
);
assert_eq!(
exact([
"abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl",
"cdghij", "cdghkl",
]),
e(r"(ab|cd)(ef|gh)(ij|kl)")
);
}
#[test]
fn impossible() {
// N.B. The extractor in this module "optimizes" the sequence and makes
// it infinite if it isn't "good." An empty sequence (generated by a
// concatenantion containing an expression that can never match) is
// considered "not good." Since infinite sequences are not actionably
// and disable optimizations, this winds up being okay.
//
// The literal extractor in regex-syntax doesn't combine these two
// steps and makes the caller choose to optimize. That is, it returns
// the sequences as they are. Which in this case, for some of the tests
// below, would be an empty Seq and not an infinite Seq.
assert_eq!(Seq::infinite(), e(r"[a&&b]"));
assert_eq!(Seq::infinite(), e(r"a[a&&b]"));
assert_eq!(Seq::infinite(), e(r"[a&&b]b"));
assert_eq!(Seq::infinite(), e(r"a[a&&b]b"));
assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b"));
assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b"));
assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b"));
assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b"));
assert_eq!(Seq::infinite(), e(r"[a&&b]*"));
assert_eq!(exact(["MN"]), e(r"M[a&&b]*N"));
}
// This tests patterns that contain something that defeats literal
// detection, usually because it would blow some limit on the total number
// of literals that can be returned.
//
// The main idea is that when literal extraction sees something that
// it knows will blow a limit, it replaces it with a marker that says
// "any literal will match here." While not necessarily true, the
// over-estimation is just fine for the purposes of literal extraction,
// because the imprecision doesn't matter: too big is too big.
//
// This is one of the trickier parts of literal extraction, since we need
// to make sure all of our literal extraction operations correctly compose
// with the markers.
//
// Note that unlike in regex-syntax, some of these have "inner" literals
// extracted where a prefix or suffix would otherwise not be found.
#[test]
fn anything() {
assert_eq!(Seq::infinite(), e(r"."));
assert_eq!(Seq::infinite(), e(r"(?s)."));
assert_eq!(Seq::infinite(), e(r"[A-Za-z]"));
assert_eq!(Seq::infinite(), e(r"[A-Z]"));
assert_eq!(Seq::infinite(), e(r"[A-Z]{0}"));
assert_eq!(Seq::infinite(), e(r"[A-Z]?"));
assert_eq!(Seq::infinite(), e(r"[A-Z]*"));
assert_eq!(Seq::infinite(), e(r"[A-Z]+"));
assert_eq!(seq([I("1")]), e(r"1[A-Z]"));
assert_eq!(seq([I("1")]), e(r"1[A-Z]2"));
assert_eq!(seq([I("123")]), e(r"[A-Z]+123"));
assert_eq!(seq([I("123")]), e(r"[A-Z]+123[A-Z]+"));
assert_eq!(Seq::infinite(), e(r"1|[A-Z]|3"));
assert_eq!(seq([E("1"), I("2"), E("3")]), e(r"1|2[A-Z]|3"),);
assert_eq!(seq([E("1"), I("2"), E("3")]), e(r"1|[A-Z]2[A-Z]|3"),);
assert_eq!(seq([E("1"), I("2"), E("3")]), e(r"1|[A-Z]2|3"),);
assert_eq!(seq([E("1"), I("2"), E("4")]), e(r"1|2[A-Z]3|4"),);
assert_eq!(seq([I("2")]), e(r"(?:|1)[A-Z]2"));
assert_eq!(inexact([I("a")]), e(r"a.z"));
}
#[test]
fn empty() {
assert_eq!(Seq::infinite(), e(r""));
assert_eq!(Seq::infinite(), e(r"^"));
assert_eq!(Seq::infinite(), e(r"$"));
assert_eq!(Seq::infinite(), e(r"(?m:^)"));
assert_eq!(Seq::infinite(), e(r"(?m:$)"));
assert_eq!(Seq::infinite(), e(r"\b"));
assert_eq!(Seq::infinite(), e(r"\B"));
assert_eq!(Seq::infinite(), e(r"(?-u:\b)"));
assert_eq!(Seq::infinite(), e(r"(?-u:\B)"));
}
#[test]
fn crazy_repeats() {
assert_eq!(Seq::infinite(), e(r"(?:){4294967295}"));
assert_eq!(Seq::infinite(), e(r"(?:){64}{64}{64}{64}{64}{64}"));
assert_eq!(Seq::infinite(), e(r"x{0}{4294967295}"));
assert_eq!(Seq::infinite(), e(r"(?:|){4294967295}"));
assert_eq!(
Seq::infinite(),
e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}")
);
let repa = "a".repeat(100);
assert_eq!(
inexact([I(&repa)]),
e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}")
);
}
#[test]
fn optimize() {
// This gets a common prefix that isn't too short.
let s = e(r"foobarfoobar|foobar|foobarzfoobar|foobarfoobar");
assert_eq!(seq([I("foobar")]), s);
// This also finds a common prefix, but since it's only one byte, it
// prefers the multiple literals.
let s = e(r"abba|akka|abccba");
assert_eq!(exact(["abba", "akka", "abccba"]), s);
let s = e(r"sam|samwise");
assert_eq!(seq([E("sam")]), s);
// The empty string is poisonous, so our seq becomes infinite, even
// though all literals are exact.
let s = e(r"foobarfoo|foo||foozfoo|foofoo");
assert_eq!(Seq::infinite(), s);
// A space is also poisonous, so our seq becomes infinite. But this
// only gets triggered when we don't have a completely exact sequence.
// When the sequence is exact, spaces are okay, since we presume that
// any prefilter will match a space more quickly than the regex engine.
// (When the sequence is exact, there's a chance of the prefilter being
// used without needing the regex engine at all.)
let s = e(r"foobarfoo|foo| |foofoo");
assert_eq!(Seq::infinite(), s);
}
// Regression test for: https://github.com/BurntSushi/ripgrep/issues/2884
#[test]
fn case_insensitive_alternation() {
let s = e(r"(?i:e.x|ex)");
assert_eq!(s, seq([I("X"), I("x")]));
}
} | rust | github | https://github.com/BurntSushi/ripgrep | crates/regex/src/literal.rs |
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import signal
import time
from six import moves
from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
from tempest.stress import cleanup
CONF = config.CONF
LOG = logging.getLogger(__name__)
processes = []
def do_ssh(command, host, ssh_user, ssh_key=None):
ssh_client = ssh.Client(host, ssh_user, key_filename=ssh_key)
try:
return ssh_client.exec_command(command)
except exceptions.SSHExecCommandFailed:
LOG.error('do_ssh raise exception. command:%s, host:%s.'
% (command, host))
return None
def _get_compute_nodes(controller, ssh_user, ssh_key=None):
"""
Returns a list of active compute nodes. List is generated by running
nova-manage on the controller.
"""
nodes = []
cmd = "nova-manage service list | grep ^nova-compute"
output = do_ssh(cmd, controller, ssh_user, ssh_key)
if not output:
return nodes
# For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
# This is fragile but there is, at present, no other way to get this info.
for line in output.split('\n'):
words = line.split()
if len(words) > 0 and words[4] == ":-)":
nodes.append(words[1])
return nodes
def _has_error_in_logs(logfiles, nodes, ssh_user, ssh_key=None,
stop_on_error=False):
"""
Detect errors in the nova log files on the controller and compute nodes.
"""
grep = 'egrep "ERROR|TRACE" %s' % logfiles
ret = False
for node in nodes:
errors = do_ssh(grep, node, ssh_user, ssh_key)
if len(errors) > 0:
LOG.error('%s: %s' % (node, errors))
ret = True
if stop_on_error:
break
return ret
def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
for process in processes:
if (not process['process'].is_alive() and
process['process'].exitcode != 0):
signal.signal(signalnum, signal.SIG_DFL)
terminate_all_processes()
break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
process['process'].terminate()
except Exception:
pass
time.sleep(check_interval)
for process in processes:
if process['process'].is_alive():
try:
pid = process['process'].pid
LOG.warn("Process %d hangs. Send SIGKILL." % pid)
os.kill(pid, signal.SIGKILL)
except Exception:
pass
process['process'].join()
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
"""
admin_manager = clients.AdminManager()
ssh_user = CONF.stress.target_ssh_user
ssh_key = CONF.stress.target_private_key_path
logfiles = CONF.stress.target_logfiles
log_check_interval = int(CONF.stress.log_check_interval)
default_thread_num = int(CONF.stress.default_thread_number_per_action)
if logfiles:
controller = CONF.stress.target_controller
computes = _get_compute_nodes(controller, ssh_user, ssh_key)
for node in computes:
do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
else:
manager = clients.Manager()
for p_number in moves.xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = data_utils.rand_name("stress_user")
tenant_name = data_utils.rand_name("stress_tenant")
password = "pass"
identity_client = admin_manager.identity_client
tenant = identity_client.create_tenant(name=tenant_name)
identity_client.create_user(username,
password,
tenant['id'],
"email")
creds = auth.get_credentials(username=username,
password=password,
tenant_name=tenant_name)
manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
kwargs = test.get('kwargs', {})
test_run.setUp(**dict(kwargs.iteritems()))
LOG.debug("calling Target Object %s" %
test_run.__class__.__name__)
mp_manager = multiprocessing.Manager()
shared_statistic = mp_manager.dict()
shared_statistic['runs'] = 0
shared_statistic['fails'] = 0
p = multiprocessing.Process(target=test_run.execute,
args=(shared_statistic,))
process = {'process': p,
'p_number': p_number,
'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
p.start()
if stop_on_error:
# NOTE(mkoderer): only the parent should register the handler
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
try:
while True:
if max_runs is None:
remaining = end_time - time.time()
if remaining <= 0:
break
else:
remaining = log_check_interval
all_proc_term = True
for process in processes:
if process['process'].is_alive():
all_proc_term = False
break
if all_proc_term:
break
time.sleep(min(remaining, log_check_interval))
if stop_on_error:
if any([True for proc in processes
if proc['statistic']['fails'] > 0]):
break
if not logfiles:
continue
if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
stop_on_error):
had_errors = True
break
except KeyboardInterrupt:
LOG.warning("Interrupted, going to print statistics and exit ...")
if stop_on_error:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
sum_runs = 0
LOG.info("Statistics (per process):")
for process in processes:
if process['statistic']['fails'] > 0:
had_errors = True
sum_runs += process['statistic']['runs']
sum_fails += process['statistic']['fails']
LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
(process['p_number'],
process['action'],
process['statistic']['runs'],
process['statistic']['fails']))
LOG.info("Summary:")
LOG.info("Run %d actions (%d failed)" %
(sum_runs, sum_fails))
if not had_errors and CONF.stress.full_clean_stack:
LOG.info("cleaning up")
cleanup.cleanup()
if had_errors:
return 1
else:
return 0 | unknown | codeparrot/codeparrot-clean | ||
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'BgpAfiSafiEnum' : _MetaInfoEnum('BgpAfiSafiEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'ipv4-mdt':'ipv4_mdt',
'ipv4-multicast':'ipv4_multicast',
'ipv4-unicast':'ipv4_unicast',
'ipv4-mvpn':'ipv4_mvpn',
'ipv4-flowspec':'ipv4_flowspec',
'ipv6-multicast':'ipv6_multicast',
'ipv6-unicast':'ipv6_unicast',
'ipv6-mvpn':'ipv6_mvpn',
'ipv6-flowspec':'ipv6_flowspec',
'l2vpn-vpls':'l2vpn_vpls',
'l2vpn-e-vpn':'l2vpn_e_vpn',
'nsap-unicast':'nsap_unicast',
'rtfilter-unicast':'rtfilter_unicast',
'vpnv4-multicast':'vpnv4_multicast',
'vpnv4-unicast':'vpnv4_unicast',
'vpnv6-unicast':'vpnv6_unicast',
'vpnv6-multicast':'vpnv6_multicast',
'vpnv4-flowspec':'vpnv4_flowspec',
'vpnv6-flowspec':'vpnv6_flowspec',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpOriginCodeEnum' : _MetaInfoEnum('BgpOriginCodeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'origin-igp':'origin_igp',
'origin-egp':'origin_egp',
'origin-incomplete':'origin_incomplete',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRpkiStatusEnum' : _MetaInfoEnum('BgpRpkiStatusEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'rpki-valid':'rpki_valid',
'rpki-invalid':'rpki_invalid',
'rpki-not-found':'rpki_not_found',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpModeEnum' : _MetaInfoEnum('BgpModeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'active':'active',
'passive':'passive',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRouteOptionEnum' : _MetaInfoEnum('BgpRouteOptionEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'bgp-all-routes':'bgp_all_routes',
'bgp-cidr-only-routes':'bgp_cidr_only_routes',
'bgp-dampened-routes':'bgp_dampened_routes',
'bgp-rib-fail-routes':'bgp_rib_fail_routes',
'bgp-injected-routes':'bgp_injected_routes',
'bgp-pending-routes':'bgp_pending_routes',
'bgp-inconsistent-routes':'bgp_inconsistent_routes',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpFsmStateEnum' : _MetaInfoEnum('BgpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'idle':'idle',
'connect':'connect',
'active':'active',
'opensent':'opensent',
'openconfirm':'openconfirm',
'established':'established',
'nonnegotiated':'nonnegotiated',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpLinkEnum' : _MetaInfoEnum('BgpLinkEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'internal':'internal',
'external':'external',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'TcpFsmStateEnum' : _MetaInfoEnum('TcpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'closed':'closed',
'listen':'listen',
'synsent':'synsent',
'synrcvd':'synrcvd',
'established':'established',
'finwait1':'finwait1',
'finwait2':'finwait2',
'closewait':'closewait',
'lastack':'lastack',
'closing':'closing',
'timewait':'timewait',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
False,
[
_MetaInfoClassMember('hold-time', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Hold time
''',
'hold_time',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('keepalive-interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' keepalive interval
''',
'keepalive_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'negotiated-keepalive-timers',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters',
False,
[
_MetaInfoClassMember('inq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input Q depth
''',
'inq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('outq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output Q depth
''',
'outq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-counters',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Connection' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Connection',
False,
[
_MetaInfoClassMember('last-reset', ATTRIBUTE, 'str' , None, None,
[], [],
''' since the peering session was last reset
''',
'last_reset',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'BgpModeEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpModeEnum',
[], [],
''' ''',
'mode',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('reset-reason', ATTRIBUTE, 'str' , None, None,
[], [],
''' The reason for the last reset
''',
'reset_reason',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'TcpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'TcpFsmStateEnum',
[], [],
''' TCP FSM state
''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-dropped', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times that a valid session has failed
or been taken down
''',
'total_dropped',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-established', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times a TCP and BGP connection has been
successfully established
''',
'total_established',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'connection',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Transport' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Transport',
False,
[
_MetaInfoClassMember('foreign-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('foreign-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Remote port used by the peer for the TCP session
''',
'foreign_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('local-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Local TCP port used for TCP session
''',
'local_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum Data segment size
''',
'mss',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path-mtu-discovery', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ''',
'path_mtu_discovery',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'transport',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Received',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity',
False,
[
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefix-activity',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('neighbor-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'neighbor_id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-neighbor-counters', REFERENCE_CLASS, 'BgpNeighborCounters' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters',
[], [],
''' ''',
'bgp_neighbor_counters',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' BGP version being used to communicate with the
remote router
''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('connection', REFERENCE_CLASS, 'Connection' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Connection',
[], [],
''' ''',
'connection',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'description',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('installed-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of installed prefixes
''',
'installed_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-read', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last received a message to this neighbor
''',
'last_read',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-write', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last sent a message from this neighbor
''',
'last_write',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('link', REFERENCE_ENUM_CLASS, 'BgpLinkEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpLinkEnum',
[], [],
''' ''',
'link',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-cap', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Information for bgp neighbor session negotiated
capabilities
''',
'negotiated_cap',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-keepalive-timers', REFERENCE_CLASS, 'NegotiatedKeepaliveTimers' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
[], [],
''' ''',
'negotiated_keepalive_timers',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefix-activity', REFERENCE_CLASS, 'PrefixActivity' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity',
[], [],
''' ''',
'prefix_activity',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('session-state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'session_state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('transport', REFERENCE_CLASS, 'Transport' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Transport',
[], [],
''' ''',
'transport',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' How long the bgp session has been up since
the sessioin was established
''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors',
False,
[
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor',
[], [],
''' ''',
'neighbor',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbors',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Prefixes' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Prefixes',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefixes',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Path' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Path',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.AsPath' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.AsPath',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'as-path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.RouteMap' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.RouteMap',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'route-map',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.FilterList' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.FilterList',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'filter-list',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Activities' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Activities',
False,
[
_MetaInfoClassMember('paths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'paths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('scan-interval', ATTRIBUTE, 'str' , None, None,
[], [],
''' scan interval in second
''',
'scan_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'activities',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
False,
[
_MetaInfoClassMember('id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('input-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'input_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_sent',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('output-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'output_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summary',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
False,
[
_MetaInfoClassMember('bgp-neighbor-summary', REFERENCE_LIST, 'BgpNeighborSummary' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
[], [],
''' ''',
'bgp_neighbor_summary',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summaries',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('activities', REFERENCE_CLASS, 'Activities' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Activities',
[], [],
''' BGP activity information
''',
'activities',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('as-path', REFERENCE_CLASS, 'AsPath' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.AsPath',
[], [],
''' ''',
'as_path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-neighbor-summaries', REFERENCE_CLASS, 'BgpNeighborSummaries' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
[], [],
''' Summary of neighbor
''',
'bgp_neighbor_summaries',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' BGP table version number
''',
'bgp_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('filter-list', REFERENCE_CLASS, 'FilterList' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.FilterList',
[], [],
''' ''',
'filter_list',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path', REFERENCE_CLASS, 'Path' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Path',
[], [],
''' ''',
'path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', REFERENCE_CLASS, 'Prefixes' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Prefixes',
[], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-map', REFERENCE_CLASS, 'RouteMap' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.RouteMap',
[], [],
''' ''',
'route_map',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', REFERENCE_UNION, 'str' , None, None,
[], [],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('routing-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Routing table version number
''',
'routing_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-memory', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'total_memory',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-family',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_LIST, 'AddressFamily' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily',
[], [],
''' ''',
'address_family',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-families',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState' : {
'meta_info' : _MetaInfoClass('BgpState',
False,
[
_MetaInfoClassMember('address-families', REFERENCE_CLASS, 'AddressFamilies' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies',
[], [],
''' ''',
'address_families',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('neighbors', REFERENCE_CLASS, 'Neighbors' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors',
[], [],
''' ''',
'neighbors',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-state',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
}
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Connection']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Transport']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor']['meta_info'].parent =_meta_table['BgpState.Neighbors']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Prefixes']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Path']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.AsPath']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.RouteMap']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.FilterList']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Activities']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info'].parent =_meta_table['BgpState.AddressFamilies']['meta_info']
_meta_table['BgpState.Neighbors']['meta_info'].parent =_meta_table['BgpState']['meta_info']
_meta_table['BgpState.AddressFamilies']['meta_info'].parent =_meta_table['BgpState']['meta_info'] | unknown | codeparrot/codeparrot-clean | ||
#include <ATen/native/vulkan/impl/Arithmetic.h>
#include <ATen/native/vulkan/impl/Common.h>
namespace at {
namespace native {
namespace vulkan {
namespace arithmetic {
api::ShaderInfo get_shader(const OpType type) {
switch (type) {
case OpType::ADD:
return VK_KERNEL(add);
case OpType::SUB:
return VK_KERNEL(sub);
case OpType::MUL:
return VK_KERNEL(mul);
case OpType::DIV:
return VK_KERNEL(div);
case OpType::FLOOR_DIV:
return VK_KERNEL(floor_divide);
case OpType::POW:
return VK_KERNEL(pow);
}
VK_THROW("Invalid OpType");
}
struct Params final {
api::utils::ivec4 outputSizes;
api::utils::ivec4 input1Sizes;
api::utils::ivec4 input2Sizes;
float alpha;
};
void record_op(
api::Context* const context,
const api::ShaderInfo& compute_shader,
vTensor& v_in1,
vTensor& v_in2,
vTensor& v_dst,
const float alpha) {
api::utils::uvec3 global_size = v_dst.extents();
api::utils::uvec3 local_size = adaptive_work_group_size(global_size);
Params block{
api::utils::make_ivec4(
{dim_at<Dim4D::Width>(v_dst),
dim_at<Dim4D::Height>(v_dst),
dim_at<Dim4D::Channel>(v_dst),
dim_at<Dim4D::Batch>(v_dst)}),
api::utils::make_ivec4(
{dim_at<Dim4D::Width>(v_in1),
dim_at<Dim4D::Height>(v_in1),
dim_at<Dim4D::Channel>(v_in1),
dim_at<Dim4D::Batch>(v_in1)}),
api::utils::make_ivec4(
{dim_at<Dim4D::Width>(v_in2),
dim_at<Dim4D::Height>(v_in2),
dim_at<Dim4D::Channel>(v_in2),
dim_at<Dim4D::Batch>(v_in2)}),
alpha,
};
api::UniformParamsBuffer params(context, block);
api::PipelineBarrier pipeline_barrier{};
context->submit_compute_job(
// shader descriptor
compute_shader,
// pipeline barrier
pipeline_barrier,
// global work group size
global_size,
// local work group size
local_size,
// fence handle
VK_NULL_HANDLE,
// shader arguments
v_dst.image(
pipeline_barrier,
api::PipelineStage::COMPUTE,
api::MemoryAccessType::WRITE),
v_in1.image(pipeline_barrier, api::PipelineStage::COMPUTE),
v_in2.image(pipeline_barrier, api::PipelineStage::COMPUTE),
// params buffer
params.buffer());
}
} // namespace arithmetic
} // namespace vulkan
} // namespace native
} // namespace at | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/vulkan/impl/Arithmetic.cpp |
"""Some generic task implementations."""
from axopy.task import Task
from axopy import util
from axopy.gui.graph import SignalWidget
class Oscilloscope(Task):
"""A visualizer for data acquisition devices.
This task connects to the experiment input DAQ and displays each of its
channels on a separate plot. You can optionally pass a :class:`Pipeline`
object to preprocess the input data before displaying it.
Parameters
----------
pipeline : Pipeline, optional
Pipeline to run the input data through before displaying it. Often this
is some preprocessing like filtering. It is often useful to use a
:class:`Windower` in the pipeline to display a larger chunk of data
than is given on each input update of the DAQ. This gives a "scrolling"
view of the input data, which can be helpful for experiment setup (e.g.
placing electrodes, making sure the device is recording properly,
etc.).
"""
def __init__(self, pipeline=None):
super(Oscilloscope, self).__init__()
self.pipeline = pipeline
def prepare_graphics(self, container):
self.scope = SignalWidget()
container.set_widget(self.scope)
def prepare_daq(self, daqstream):
self.daqstream = daqstream
self.connect(daqstream.updated, self.update)
def run(self):
self.daqstream.start()
def update(self, data):
if self.pipeline is not None:
data = self.pipeline.process(data)
self.scope.plot(data)
def key_press(self, key):
if key == util.key_return:
self.daqstream.stop()
self.finish() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python3
import os
from redmine import Redmine
import datetime as dt
import requests
import json
# Set variables
os.chdir(os.path.expanduser("~") + "/.incmgmt/")
prefs = []
for line in open('mw-prefs.txt'):
prefs.append(line)
redmine_project = prefs[0].rstrip()
redmine_server = prefs[1].rstrip()
redmine_key = prefs[2].rstrip()
sn_server = prefs[3].rstrip()
sn_user = prefs[4].rstrip()
sn_pass = prefs[5].rstrip()
wikipage = "https://io.arubanetworks.com/projects/incident_management/wiki/Open_SSID_access_point" # description of how to handle this issue
requests.packages.urllib3.disable_warnings() # turn off SSL warnings
# Connect to redmine
redmine = Redmine(redmine_server, requests={'verify': False}, \
key=redmine_key, version='2.5.1')
project = redmine.project.get(redmine_project)
## Begin functions
# Create an issue in Service Now
def sn_issue(subject, redmine_url, impact, urgency, wikipage):
# Define the headers
headers = {"Content-Type":"application/json", \
"Accept":"application/json"}
# Construct JSON object containing the incident data
incident_data = '{' + \
'"short_description":"' + subject + '",' + \
'"description":' + '"A rogue access point has been discovered on the network. For full information, see: ' + \
redmine_url + ' and for instructions, see: ' + \
wikipage + '",'\
'"u_category":' + '"Intranet",' + \
'"u_subcategory":' + '"Access Issues",' + \
'"impact":' + '"' + str(impact) + '",' + \
'"urgency":' + '"' + str(urgency) + '",' + \
'"contact_type":"Alert"' + '}'
# Create the incident on the Service Now system
response = requests.post(sn_server, auth=(sn_user, sn_pass), \
headers=headers, data=incident_data)
# Capture the ticket number and unique identifier
if response.status_code != 201:
print('Status:', response.status_code, 'Headers:', \
response.headers, 'Error Response:',response.json())
exit()
sn_ticket = response.json()['result']['number']
sys_id = response.json()['result']['sys_id']
print("service now ticket created")
return sn_ticket, sys_id
# Log the created tickets to a file
def log(redmine_issue_id, sn_ticket, sys_id, redmine_url):
# Write log file of tickets created
ticket_log = open('ticketlog.csv','a')
opentix_log = open('opentix.csv','a')
ticket_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + ',' + redmine_url + ',' + '\n')
opentix_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + '\n')
ticket_log.close()
opentix_log.close()
# Calculate interval for checking tickets
def timeRange(interval):
now = dt.datetime.today() # capture the current time
delta = dt.timedelta(minutes=interval) # set the interval
diff = now - delta # calculate the filter start time
return diff
# Determine if the issue is in the relevant interval and create a
# service now ticket if it is
def CheckInterval(created_filter, issue):
issue_time = issue.created_on - dt.timedelta(hours=7) #adjust for UTC
if issue_time - created_filter == abs(issue_time - \
created_filter):
print("issue " + str(issue.id) + " is in the interval")
redmine_url = redmine_server + "/issues/" + str(issue.id)
subject = issue.subject
sn_ticket, sys_id = sn_issue(subject, redmine_url, 2, 2, wikipage)
log(str(issue.id), sn_ticket, sys_id, redmine_url)
else:
return None
## Begin script
# set a time filter for finding newly active tickets.
# Interval in minutes
created_filter = timeRange(30)
# Retrieve all newly-created tickets that relate to a Rogue SSID
for i in project.issues:
try:
if str(i.category).rstrip() == "Rogue SSID":
print("found matching ticket: " + str(i.id))
CheckInterval(created_filter, i) # check if new
except:
pass # ignore errors | unknown | codeparrot/codeparrot-clean | ||
"""Miscellaneous goodies for psycopg2
This module is a generic place used to hold little helper functions
and classes until a better place in the distribution is found.
"""
# psycopg/extras.py - miscellaneous extra goodies for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os as _os
import sys as _sys
import time as _time
import re as _re
try:
import logging as _logging
except:
_logging = None
import psycopg2
from psycopg2 import extensions as _ext
from psycopg2.extensions import cursor as _cursor
from psycopg2.extensions import connection as _connection
from psycopg2.extensions import adapt as _A, quote_ident
from psycopg2._psycopg import ( # noqa
REPLICATION_PHYSICAL, REPLICATION_LOGICAL,
ReplicationConnection as _replicationConnection,
ReplicationCursor as _replicationCursor,
ReplicationMessage)
# expose the json adaptation stuff into the module
from psycopg2._json import ( # noqa
json, Json, register_json, register_default_json, register_default_jsonb)
# Expose range-related objects
from psycopg2._range import ( # noqa
Range, NumericRange, DateRange, DateTimeRange, DateTimeTZRange,
register_range, RangeAdapter, RangeCaster)
# Expose ipaddress-related objects
from psycopg2._ipaddress import register_ipaddress # noqa
class DictCursorBase(_cursor):
"""Base class for all dict-like cursors."""
def __init__(self, *args, **kwargs):
if 'row_factory' in kwargs:
row_factory = kwargs['row_factory']
del kwargs['row_factory']
else:
raise NotImplementedError(
"DictCursorBase can't be instantiated without a row factory.")
super(DictCursorBase, self).__init__(*args, **kwargs)
self._query_executed = 0
self._prefetch = 0
self.row_factory = row_factory
def fetchone(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchone()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchone()
return res
def fetchmany(self, size=None):
if self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
return res
def fetchall(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchall()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchall()
return res
def __iter__(self):
try:
if self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
yield first
while 1:
yield next(res)
except StopIteration:
return
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', DictCursor)
return super(DictConnection, self).cursor(*args, **kwargs)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
super(DictCursor, self).__init__(*args, **kwargs)
self._prefetch = 1
def execute(self, query, vars=None):
self.index = {}
self._query_executed = 1
return super(DictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.index = {}
self._query_executed = 1
return super(DictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.index[self.description[i][0]] = i
self._query_executed = 0
class DictRow(list):
"""A row object that allow by-column-name access to data."""
__slots__ = ('_index',)
def __init__(self, cursor):
self._index = cursor.index
self[:] = [None] * len(cursor.description)
def __getitem__(self, x):
if not isinstance(x, (int, slice)):
x = self._index[x]
return list.__getitem__(self, x)
def __setitem__(self, x, v):
if not isinstance(x, (int, slice)):
x = self._index[x]
list.__setitem__(self, x, v)
def items(self):
return list(self.items())
def keys(self):
return list(self._index.keys())
def values(self):
return tuple(self[:])
def has_key(self, x):
return x in self._index
def get(self, x, default=None):
try:
return self[x]
except:
return default
def iteritems(self):
for n, v in self._index.items():
yield n, list.__getitem__(self, v)
def iterkeys(self):
return iter(self._index.keys())
def itervalues(self):
return list.__iter__(self)
def copy(self):
return dict(iter(self.items()))
def __contains__(self, x):
return x in self._index
def __getstate__(self):
return self[:], self._index.copy()
def __setstate__(self, data):
self[:] = data[0]
self._index = data[1]
# drop the crusty Py2 methods
if _sys.version_info[0] > 2:
items = iteritems # noqa
keys = iterkeys # noqa
values = itervalues # noqa
del iteritems, iterkeys, itervalues, has_key
class RealDictConnection(_connection):
"""A connection that uses `RealDictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', RealDictCursor)
return super(RealDictConnection, self).cursor(*args, **kwargs)
class RealDictCursor(DictCursorBase):
"""A cursor that uses a real dict as the base type for rows.
Note that this cursor is extremely specialized and does not allow
the normal access (using integer indices) to fetched data. If you need
to access database rows both as a dictionary and a list, then use
the generic `DictCursor` instead of `!RealDictCursor`.
"""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = RealDictRow
super(RealDictCursor, self).__init__(*args, **kwargs)
self._prefetch = 0
def execute(self, query, vars=None):
self.column_mapping = []
self._query_executed = 1
return super(RealDictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.column_mapping = []
self._query_executed = 1
return super(RealDictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.column_mapping.append(self.description[i][0])
self._query_executed = 0
class RealDictRow(dict):
"""A `!dict` subclass representing a data record."""
__slots__ = ('_column_mapping')
def __init__(self, cursor):
dict.__init__(self)
# Required for named cursors
if cursor.description and not cursor.column_mapping:
cursor._build_index()
self._column_mapping = cursor.column_mapping
def __setitem__(self, name, value):
if type(name) == int:
name = self._column_mapping[name]
return dict.__setitem__(self, name, value)
def __getstate__(self):
return (self.copy(), self._column_mapping[:])
def __setstate__(self, data):
self.update(data[0])
self._column_mapping = data[1]
class NamedTupleConnection(_connection):
"""A connection that uses `NamedTupleCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', NamedTupleCursor)
return super(NamedTupleConnection, self).cursor(*args, **kwargs)
class NamedTupleCursor(_cursor):
"""A cursor that generates results as `~collections.namedtuple`.
`!fetch*()` methods will return named tuples instead of regular tuples, so
their elements can be accessed both as regular numeric items as well as
attributes.
>>> nt_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
>>> rec = nt_cur.fetchone()
>>> rec
Record(id=1, num=100, data="abc'def")
>>> rec[1]
100
>>> rec.data
"abc'def"
"""
Record = None
def execute(self, query, vars=None):
self.Record = None
return super(NamedTupleCursor, self).execute(query, vars)
def executemany(self, query, vars):
self.Record = None
return super(NamedTupleCursor, self).executemany(query, vars)
def callproc(self, procname, vars=None):
self.Record = None
return super(NamedTupleCursor, self).callproc(procname, vars)
def fetchone(self):
t = super(NamedTupleCursor, self).fetchone()
if t is not None:
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return nt._make(t)
def fetchmany(self, size=None):
ts = super(NamedTupleCursor, self).fetchmany(size)
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return list(map(nt._make, ts))
def fetchall(self):
ts = super(NamedTupleCursor, self).fetchall()
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return list(map(nt._make, ts))
def __iter__(self):
try:
it = super(NamedTupleCursor, self).__iter__()
t = next(it)
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
yield nt._make(t)
while 1:
yield nt._make(next(it))
except StopIteration:
return
try:
from collections import namedtuple
except ImportError as _exc:
def _make_nt(self):
raise self._exc
else:
def _make_nt(self, namedtuple=namedtuple):
return namedtuple("Record", [d[0] for d in self.description or ()])
class LoggingConnection(_connection):
"""A connection that logs all queries to a file or logger__ object.
.. __: http://docs.python.org/library/logging.html
"""
def initialize(self, logobj):
"""Initialize the connection to log to `!logobj`.
The `!logobj` parameter can be an open file object or a Logger
instance from the standard logging module.
"""
self._logobj = logobj
if _logging and isinstance(logobj, _logging.Logger):
self.log = self._logtologger
else:
self.log = self._logtofile
def filter(self, msg, curs):
"""Filter the query before logging it.
This is the method to overwrite to filter unwanted queries out of the
log or to add some extra data to the output. The default implementation
just does nothing.
"""
return msg
def _logtofile(self, msg, curs):
msg = self.filter(msg, curs)
if msg:
if _sys.version_info[0] >= 3 and isinstance(msg, bytes):
msg = msg.decode(_ext.encodings[self.encoding], 'replace')
self._logobj.write(msg + _os.linesep)
def _logtologger(self, msg, curs):
msg = self.filter(msg, curs)
if msg:
self._logobj.debug(msg)
def _check(self):
if not hasattr(self, '_logobj'):
raise self.ProgrammingError(
"LoggingConnection object has not been initialize()d")
def cursor(self, *args, **kwargs):
self._check()
kwargs.setdefault('cursor_factory', LoggingCursor)
return super(LoggingConnection, self).cursor(*args, **kwargs)
class LoggingCursor(_cursor):
"""A cursor that logs queries using its connection logging facilities."""
def execute(self, query, vars=None):
try:
return super(LoggingCursor, self).execute(query, vars)
finally:
self.connection.log(self.query, self)
def callproc(self, procname, vars=None):
try:
return super(LoggingCursor, self).callproc(procname, vars)
finally:
self.connection.log(self.query, self)
class MinTimeLoggingConnection(LoggingConnection):
"""A connection that logs queries based on execution time.
This is just an example of how to sub-class `LoggingConnection` to
provide some extra filtering for the logged queries. Both the
`initialize()` and `filter()` methods are overwritten to make sure
that only queries executing for more than ``mintime`` ms are logged.
Note that this connection uses the specialized cursor
`MinTimeLoggingCursor`.
"""
def initialize(self, logobj, mintime=0):
LoggingConnection.initialize(self, logobj)
self._mintime = mintime
def filter(self, msg, curs):
t = (_time.time() - curs.timestamp) * 1000
if t > self._mintime:
return msg + _os.linesep + " (execution time: %d ms)" % t
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', MinTimeLoggingCursor)
return LoggingConnection.cursor(self, *args, **kwargs)
class MinTimeLoggingCursor(LoggingCursor):
"""The cursor sub-class companion to `MinTimeLoggingConnection`."""
def execute(self, query, vars=None):
self.timestamp = _time.time()
return LoggingCursor.execute(self, query, vars)
def callproc(self, procname, vars=None):
self.timestamp = _time.time()
return LoggingCursor.callproc(self, procname, vars)
class LogicalReplicationConnection(_replicationConnection):
def __init__(self, *args, **kwargs):
kwargs['replication_type'] = REPLICATION_LOGICAL
super(LogicalReplicationConnection, self).__init__(*args, **kwargs)
class PhysicalReplicationConnection(_replicationConnection):
def __init__(self, *args, **kwargs):
kwargs['replication_type'] = REPLICATION_PHYSICAL
super(PhysicalReplicationConnection, self).__init__(*args, **kwargs)
class StopReplication(Exception):
"""
Exception used to break out of the endless loop in
`~ReplicationCursor.consume_stream()`.
Subclass of `~exceptions.Exception`. Intentionally *not* inherited from
`~psycopg2.Error` as occurrence of this exception does not indicate an
error.
"""
pass
class ReplicationCursor(_replicationCursor):
"""A cursor used for communication on replication connections."""
def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None):
"""Create streaming replication slot."""
command = "CREATE_REPLICATION_SLOT %s " % quote_ident(slot_name, self)
if slot_type is None:
slot_type = self.connection.replication_type
if slot_type == REPLICATION_LOGICAL:
if output_plugin is None:
raise psycopg2.ProgrammingError(
"output plugin name is required to create "
"logical replication slot")
command += "LOGICAL %s" % quote_ident(output_plugin, self)
elif slot_type == REPLICATION_PHYSICAL:
if output_plugin is not None:
raise psycopg2.ProgrammingError(
"cannot specify output plugin name when creating "
"physical replication slot")
command += "PHYSICAL"
else:
raise psycopg2.ProgrammingError(
"unrecognized replication type: %s" % repr(slot_type))
self.execute(command)
def drop_replication_slot(self, slot_name):
"""Drop streaming replication slot."""
command = "DROP_REPLICATION_SLOT %s" % quote_ident(slot_name, self)
self.execute(command)
def start_replication(self, slot_name=None, slot_type=None, start_lsn=0,
timeline=0, options=None, decode=False):
"""Start replication stream."""
command = "START_REPLICATION "
if slot_type is None:
slot_type = self.connection.replication_type
if slot_type == REPLICATION_LOGICAL:
if slot_name:
command += "SLOT %s " % quote_ident(slot_name, self)
else:
raise psycopg2.ProgrammingError(
"slot name is required for logical replication")
command += "LOGICAL "
elif slot_type == REPLICATION_PHYSICAL:
if slot_name:
command += "SLOT %s " % quote_ident(slot_name, self)
# don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX
else:
raise psycopg2.ProgrammingError(
"unrecognized replication type: %s" % repr(slot_type))
if type(start_lsn) is str:
lsn = start_lsn.split('/')
lsn = "%X/%08X" % (int(lsn[0], 16), int(lsn[1], 16))
else:
lsn = "%X/%08X" % ((start_lsn >> 32) & 0xFFFFFFFF,
start_lsn & 0xFFFFFFFF)
command += lsn
if timeline != 0:
if slot_type == REPLICATION_LOGICAL:
raise psycopg2.ProgrammingError(
"cannot specify timeline for logical replication")
command += " TIMELINE %d" % timeline
if options:
if slot_type == REPLICATION_PHYSICAL:
raise psycopg2.ProgrammingError(
"cannot specify output plugin options for physical replication")
command += " ("
for k, v in options.items():
if not command.endswith('('):
command += ", "
command += "%s %s" % (quote_ident(k, self), _A(str(v)))
command += ")"
self.start_replication_expert(command, decode=decode)
# allows replication cursors to be used in select.select() directly
def fileno(self):
return self.connection.fileno()
# a dbtype and adapter for Python UUID type
class UUID_adapter(object):
"""Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__.
.. __: http://docs.python.org/library/uuid.html
.. __: http://www.postgresql.org/docs/current/static/datatype-uuid.html
"""
def __init__(self, uuid):
self._uuid = uuid
def __conform__(self, proto):
if proto is _ext.ISQLQuote:
return self
def getquoted(self):
return ("'%s'::uuid" % self._uuid).encode('utf8')
def __str__(self):
return "'%s'::uuid" % self._uuid
def register_uuid(oids=None, conn_or_curs=None):
"""Create the UUID type and an uuid.UUID adapter.
:param oids: oid for the PostgreSQL :sql:`uuid` type, or 2-items sequence
with oids of the type and the array. If not specified, use PostgreSQL
standard oids.
:param conn_or_curs: where to register the typecaster. If not specified,
register it globally.
"""
import uuid
if not oids:
oid1 = 2950
oid2 = 2951
elif isinstance(oids, (list, tuple)):
oid1, oid2 = oids
else:
oid1 = oids
oid2 = 2951
_ext.UUID = _ext.new_type((oid1, ), "UUID",
lambda data, cursor: data and uuid.UUID(data) or None)
_ext.UUIDARRAY = _ext.new_array_type((oid2,), "UUID[]", _ext.UUID)
_ext.register_type(_ext.UUID, conn_or_curs)
_ext.register_type(_ext.UUIDARRAY, conn_or_curs)
_ext.register_adapter(uuid.UUID, UUID_adapter)
return _ext.UUID
# a type, dbtype and adapter for PostgreSQL inet type
class Inet(object):
"""Wrap a string to allow for correct SQL-quoting of inet values.
Note that this adapter does NOT check the passed value to make
sure it really is an inet-compatible address but DOES call adapt()
on it to make sure it is impossible to execute an SQL-injection
by passing an evil value to the initializer.
"""
def __init__(self, addr):
self.addr = addr
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.addr)
def prepare(self, conn):
self._conn = conn
def getquoted(self):
obj = _A(self.addr)
if hasattr(obj, 'prepare'):
obj.prepare(self._conn)
return obj.getquoted() + b"::inet"
def __conform__(self, proto):
if proto is _ext.ISQLQuote:
return self
def __str__(self):
return str(self.addr)
def register_inet(oid=None, conn_or_curs=None):
"""Create the INET type and an Inet adapter.
:param oid: oid for the PostgreSQL :sql:`inet` type, or 2-items sequence
with oids of the type and the array. If not specified, use PostgreSQL
standard oids.
:param conn_or_curs: where to register the typecaster. If not specified,
register it globally.
"""
import warnings
warnings.warn(
"the inet adapter is deprecated, it's not very useful",
DeprecationWarning)
if not oid:
oid1 = 869
oid2 = 1041
elif isinstance(oid, (list, tuple)):
oid1, oid2 = oid
else:
oid1 = oid
oid2 = 1041
_ext.INET = _ext.new_type((oid1, ), "INET",
lambda data, cursor: data and Inet(data) or None)
_ext.INETARRAY = _ext.new_array_type((oid2, ), "INETARRAY", _ext.INET)
_ext.register_type(_ext.INET, conn_or_curs)
_ext.register_type(_ext.INETARRAY, conn_or_curs)
return _ext.INET
def register_tstz_w_secs(oids=None, conn_or_curs=None):
"""The function used to register an alternate type caster for
:sql:`TIMESTAMP WITH TIME ZONE` to deal with historical time zones with
seconds in the UTC offset.
These are now correctly handled by the default type caster, so currently
the function doesn't do anything.
"""
import warnings
warnings.warn("deprecated", DeprecationWarning)
def wait_select(conn):
"""Wait until a connection or cursor has data available.
The function is an example of a wait callback to be registered with
`~psycopg2.extensions.set_wait_callback()`. This function uses
:py:func:`~select.select()` to wait for data available.
"""
import select
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
while 1:
try:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
select.select([conn.fileno()], [], [])
elif state == POLL_WRITE:
select.select([], [conn.fileno()], [])
else:
raise conn.OperationalError("bad state from poll: %s" % state)
except KeyboardInterrupt:
conn.cancel()
# the loop will be broken by a server error
continue
def _solve_conn_curs(conn_or_curs):
"""Return the connection and a DBAPI cursor from a connection or cursor."""
if conn_or_curs is None:
raise psycopg2.ProgrammingError("no connection or cursor provided")
if hasattr(conn_or_curs, 'execute'):
conn = conn_or_curs.connection
curs = conn.cursor(cursor_factory=_cursor)
else:
conn = conn_or_curs
curs = conn.cursor(cursor_factory=_cursor)
return conn, curs
class HstoreAdapter(object):
"""Adapt a Python dict to the hstore syntax."""
def __init__(self, wrapped):
self.wrapped = wrapped
def prepare(self, conn):
self.conn = conn
# use an old-style getquoted implementation if required
if conn.server_version < 90000:
self.getquoted = self._getquoted_8
def _getquoted_8(self):
"""Use the operators available in PG pre-9.0."""
if not self.wrapped:
return b"''::hstore"
adapt = _ext.adapt
rv = []
for k, v in self.wrapped.items():
k = adapt(k)
k.prepare(self.conn)
k = k.getquoted()
if v is not None:
v = adapt(v)
v.prepare(self.conn)
v = v.getquoted()
else:
v = b'NULL'
# XXX this b'ing is painfully inefficient!
rv.append(b"(" + k + b" => " + v + b")")
return b"(" + b'||'.join(rv) + b")"
def _getquoted_9(self):
"""Use the hstore(text[], text[]) function."""
if not self.wrapped:
return b"''::hstore"
k = _ext.adapt(list(self.wrapped.keys()))
k.prepare(self.conn)
v = _ext.adapt(list(self.wrapped.values()))
v.prepare(self.conn)
return b"hstore(" + k.getquoted() + b", " + v.getquoted() + b")"
getquoted = _getquoted_9
_re_hstore = _re.compile(r"""
# hstore key:
# a string of normal or escaped chars
"((?: [^"\\] | \\. )*)"
\s*=>\s* # hstore value
(?:
NULL # the value can be null - not catched
# or a quoted string like the key
| "((?: [^"\\] | \\. )*)"
)
(?:\s*,\s*|$) # pairs separated by comma or end of string.
""", _re.VERBOSE)
@classmethod
def parse(self, s, cur, _bsdec=_re.compile(r"\\(.)")):
"""Parse an hstore representation in a Python string.
The hstore is represented as something like::
"a"=>"1", "b"=>"2"
with backslash-escaped strings.
"""
if s is None:
return None
rv = {}
start = 0
for m in self._re_hstore.finditer(s):
if m is None or m.start() != start:
raise psycopg2.InterfaceError(
"error parsing hstore pair at char %d" % start)
k = _bsdec.sub(r'\1', m.group(1))
v = m.group(2)
if v is not None:
v = _bsdec.sub(r'\1', v)
rv[k] = v
start = m.end()
if start < len(s):
raise psycopg2.InterfaceError(
"error parsing hstore: unparsed data after char %d" % start)
return rv
@classmethod
def parse_unicode(self, s, cur):
"""Parse an hstore returning unicode keys and values."""
if s is None:
return None
s = s.decode(_ext.encodings[cur.connection.encoding])
return self.parse(s, cur)
@classmethod
def get_oids(self, conn_or_curs):
"""Return the lists of OID of the hstore and hstore[] types.
"""
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
rv0, rv1 = [], []
# get the oid for the hstore
curs.execute("""\
SELECT t.oid, %s
FROM pg_type t JOIN pg_namespace ns
ON typnamespace = ns.oid
WHERE typname = 'hstore';
""" % typarray)
for oids in curs:
rv0.append(oids[0])
rv1.append(oids[1])
# revert the status of the connection as before the command
if (conn_status != _ext.STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
return tuple(rv0), tuple(rv1)
def register_hstore(conn_or_curs, globally=False, str=False,
oid=None, array_oid=None):
r"""Register adapter and typecaster for `!dict`\-\ |hstore| conversions.
:param conn_or_curs: a connection or cursor: the typecaster will be
registered only on this object unless *globally* is set to `!True`
:param globally: register the adapter globally, not only on *conn_or_curs*
:param unicode: if `!True`, keys and values returned from the database
will be `!unicode` instead of `!str`. The option is not available on
Python 3
:param oid: the OID of the |hstore| type if known. If not, it will be
queried on *conn_or_curs*.
:param array_oid: the OID of the |hstore| array type if known. If not, it
will be queried on *conn_or_curs*.
The connection or cursor passed to the function will be used to query the
database and look for the OID of the |hstore| type (which may be different
across databases). If querying is not desirable (e.g. with
:ref:`asynchronous connections <async-support>`) you may specify it in the
*oid* parameter, which can be found using a query such as :sql:`SELECT
'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid*
using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`.
Note that, when passing a dictionary from Python to the database, both
strings and unicode keys and values are supported. Dictionaries returned
from the database have keys/values according to the *unicode* parameter.
The |hstore| contrib module must be already installed in the database
(executing the ``hstore.sql`` script in your ``contrib`` directory).
Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
if oid is None:
oid = HstoreAdapter.get_oids(conn_or_curs)
if oid is None or not oid[0]:
raise psycopg2.ProgrammingError(
"hstore type not found in the database. "
"please install it from your 'contrib/hstore.sql' file")
else:
array_oid = oid[1]
oid = oid[0]
if isinstance(oid, int):
oid = (oid,)
if array_oid is not None:
if isinstance(array_oid, int):
array_oid = (array_oid,)
else:
array_oid = tuple([x for x in array_oid if x])
# create and register the typecaster
if _sys.version_info[0] < 3 and str:
cast = HstoreAdapter.parse_unicode
else:
cast = HstoreAdapter.parse
HSTORE = _ext.new_type(oid, "HSTORE", cast)
_ext.register_type(HSTORE, not globally and conn_or_curs or None)
_ext.register_adapter(dict, HstoreAdapter)
if array_oid:
HSTOREARRAY = _ext.new_array_type(array_oid, "HSTOREARRAY", HSTORE)
_ext.register_type(HSTOREARRAY, not globally and conn_or_curs or None)
class CompositeCaster(object):
"""Helps conversion of a PostgreSQL composite type into a Python object.
The class is usually created by the `register_composite()` function.
You may want to create and register manually instances of the class if
querying the database at registration time is not desirable (such as when
using an :ref:`asynchronous connections <async-support>`).
"""
def __init__(self, name, oid, attrs, array_oid=None, schema=None):
self.name = name
self.schema = schema
self.oid = oid
self.array_oid = array_oid
self.attnames = [a[0] for a in attrs]
self.atttypes = [a[1] for a in attrs]
self._create_type(name, self.attnames)
self.typecaster = _ext.new_type((oid,), name, self.parse)
if array_oid:
self.array_typecaster = _ext.new_array_type(
(array_oid,), "%sARRAY" % name, self.typecaster)
else:
self.array_typecaster = None
def parse(self, s, curs):
if s is None:
return None
tokens = self.tokenize(s)
if len(tokens) != len(self.atttypes):
raise psycopg2.DataError(
"expecting %d components for the type %s, %d found instead" %
(len(self.atttypes), self.name, len(tokens)))
values = [curs.cast(oid, token)
for oid, token in zip(self.atttypes, tokens)]
return self.make(values)
def make(self, values):
"""Return a new Python object representing the data being casted.
*values* is the list of attributes, already casted into their Python
representation.
You can subclass this method to :ref:`customize the composite cast
<custom-composite>`.
"""
return self._ctor(values)
_re_tokenize = _re.compile(r"""
\(? ([,)]) # an empty token, representing NULL
| \(? " ((?: [^"] | "")*) " [,)] # or a quoted string
| \(? ([^",)]+) [,)] # or an unquoted string
""", _re.VERBOSE)
_re_undouble = _re.compile(r'(["\\])\1')
@classmethod
def tokenize(self, s):
rv = []
for m in self._re_tokenize.finditer(s):
if m is None:
raise psycopg2.InterfaceError("can't parse type: %r" % s)
if m.group(1) is not None:
rv.append(None)
elif m.group(2) is not None:
rv.append(self._re_undouble.sub(r"\1", m.group(2)))
else:
rv.append(m.group(3))
return rv
def _create_type(self, name, attnames):
try:
from collections import namedtuple
except ImportError:
self.type = tuple
self._ctor = self.type
else:
self.type = namedtuple(name, attnames)
self._ctor = self.type._make
@classmethod
def _from_db(self, name, conn_or_curs):
"""Return a `CompositeCaster` instance for the type *name*.
Raise `ProgrammingError` if the type is not found.
"""
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# Use the correct schema
if '.' in name:
schema, tname = name.split('.', 1)
else:
tname = name
schema = 'public'
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
# get the type oid and attributes
curs.execute("""\
SELECT t.oid, %s, attname, atttypid
FROM pg_type t
JOIN pg_namespace ns ON typnamespace = ns.oid
JOIN pg_attribute a ON attrelid = typrelid
WHERE typname = %%s AND nspname = %%s
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum;
""" % typarray, (tname, schema))
recs = curs.fetchall()
# revert the status of the connection as before the command
if (conn_status != _ext.STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
if not recs:
raise psycopg2.ProgrammingError(
"PostgreSQL type '%s' not found" % name)
type_oid = recs[0][0]
array_oid = recs[0][1]
type_attrs = [(r[2], r[3]) for r in recs]
return self(tname, type_oid, type_attrs,
array_oid=array_oid, schema=schema)
def register_composite(name, conn_or_curs, globally=False, factory=None):
"""Register a typecaster to convert a composite type into a tuple.
:param name: the name of a PostgreSQL composite type, e.g. created using
the |CREATE TYPE|_ command
:param conn_or_curs: a connection or cursor used to find the type oid and
components; the typecaster is registered in a scope limited to this
object, unless *globally* is set to `!True`
:param globally: if `!False` (default) register the typecaster only on
*conn_or_curs*, otherwise register it globally
:param factory: if specified it should be a `CompositeCaster` subclass: use
it to :ref:`customize how to cast composite types <custom-composite>`
:return: the registered `CompositeCaster` or *factory* instance
responsible for the conversion
"""
if factory is None:
factory = CompositeCaster
caster = factory._from_db(name, conn_or_curs)
_ext.register_type(caster.typecaster, not globally and conn_or_curs or None)
if caster.array_typecaster is not None:
_ext.register_type(
caster.array_typecaster, not globally and conn_or_curs or None)
return caster
def _paginate(seq, page_size):
"""Consume an iterable and return it in chunks.
Every chunk is at most `page_size`. Never return an empty chunk.
"""
page = []
it = iter(seq)
while 1:
try:
for i in range(page_size):
page.append(next(it))
yield page
page = []
except StopIteration:
if page:
yield page
return
def execute_batch(cur, sql, argslist, page_size=100):
r"""Execute groups of statements in fewer server roundtrips.
Execute *sql* several times, against all parameters set (sequences or
mappings) found in *argslist*.
The function is semantically similar to
.. parsed-literal::
*cur*\.\ `~cursor.executemany`\ (\ *sql*\ , *argslist*\ )
but has a different implementation: Psycopg will join the statements into
fewer multi-statement commands, each one containing at most *page_size*
statements, resulting in a reduced number of server roundtrips.
After the execution of the functtion the `cursor.rowcount` property will
**not** contain a total result.
"""
for page in _paginate(argslist, page_size=page_size):
sqls = [cur.mogrify(sql, args) for args in page]
cur.execute(b";".join(sqls))
def execute_values(cur, sql, argslist, template=None, page_size=100):
'''Execute a statement using :sql:`VALUES` with a sequence of parameters.
:param cur: the cursor to use to execute the query.
:param sql: the query to execute. It must contain a single ``%s``
placeholder, which will be replaced by a `VALUES list`__.
Example: ``"INSERT INTO mytable (id, f1, f2) VALUES %s"``.
:param argslist: sequence of sequences or dictionaries with the arguments
to send to the query. The type and content must be consistent with
*template*.
:param template: the snippet to merge to every item in *argslist* to
compose the query. If *argslist* items are sequences it should contain
positional placeholders (e.g. ``"(%s, %s, %s)"``, or ``"(%s, %s, 42)``"
if there are constants value...); If *argslist* is items are mapping
it should contain named placeholders (e.g. ``"(%(id)s, %(f1)s, 42)"``).
If not specified, assume the arguments are sequence and use a simple
positional template (i.e. ``(%s, %s, ...)``), with the number of
placeholders sniffed by the first element in *argslist*.
:param page_size: maximum number of *argslist* items to include in every
statement. If there are more items the function will execute more than
one statement.
.. __: https://www.postgresql.org/docs/current/static/queries-values.html
After the execution of the functtion the `cursor.rowcount` property will
**not** contain a total result.
While :sql:`INSERT` is an obvious candidate for this function it is
possible to use it with other statements, for example::
>>> cur.execute(
... "create table test (id int primary key, v1 int, v2 int)")
>>> execute_values(cur,
... "INSERT INTO test (id, v1, v2) VALUES %s",
... [(1, 2, 3), (4, 5, 6), (7, 8, 9)])
>>> execute_values(cur,
... """UPDATE test SET v1 = data.v1 FROM (VALUES %s) AS data (id, v1)
... WHERE test.id = data.id""",
... [(1, 20), (4, 50)])
>>> cur.execute("select * from test order by id")
>>> cur.fetchall()
[(1, 20, 3), (4, 50, 6), (7, 8, 9)])
'''
# we can't just use sql % vals because vals is bytes: if sql is bytes
# there will be some decoding error because of stupid codec used, and Py3
# doesn't implement % on bytes.
if not isinstance(sql, bytes):
sql = sql.encode(_ext.encodings[cur.connection.encoding])
pre, post = _split_sql(sql)
for page in _paginate(argslist, page_size=page_size):
if template is None:
template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'
parts = pre[:]
for args in page:
parts.append(cur.mogrify(template, args))
parts.append(b',')
parts[-1:] = post
cur.execute(b''.join(parts))
def _split_sql(sql):
"""Split *sql* on a single ``%s`` placeholder.
Split on the %s, perform %% replacement and return pre, post lists of
snippets.
"""
curr = pre = []
post = []
tokens = _re.split(br'(%.)', sql)
for token in tokens:
if len(token) != 2 or token[:1] != b'%':
curr.append(token)
continue
if token[1:] == b's':
if curr is pre:
curr = post
else:
raise ValueError(
"the query contains more than one '%s' placeholder")
elif token[1:] == b'%':
curr.append(b'%')
else:
raise ValueError("unsupported format character: '%s'"
% token[1:].decode('ascii', 'replace'))
if curr is pre:
raise ValueError("the query doesn't contain any '%s' placeholder")
return pre, post | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
import CParser
import json
import os
import shutil
from collections import OrderedDict
files = ["../src/gui/widgets/widgets.h",
"../src/gui/widgets/layouts/layouts.h",
"../src/common.h",
"../src/fc/fc.h",
"../src/fc/conf.h"]
layout_file = "../src/gui/widgets/layouts/layouts.cpp"
p = CParser.CParser(files)
# p.printAll()
sizes = {
"uint8_t": 1,
"int8_t": 1,
"uint16_t": 2,
"int16_t": 2,
"uint32_t": 4,
"int32_t": 4,
"float": 4,
"char": 1,
}
# print sizes
def to_int(s):
if s[:2] == "0x":
return int(s[2:], 16)
if s[:2] == "0b":
return int(s[2:], 2)
return int(s)
def parse_layouts(filename, p):
f = open(filename, "r")
data = f.readlines()
f.close()
layouts_files = []
layouts_map = OrderedDict()
layouts_macro_name = {}
layouts_name_map = {}
for name in p.defs["macros"]:
if name[:7] == "LAYOUT_":
layouts_macro_name[to_int(p.defs["macros"][name])] = name
for line in data:
line = line.split()
if len(line) == 0:
continue
if line[0] == "#include":
name = line[1].split('"')[1]
if name[-4:] == ".inc":
layouts_files.append(name)
if line[0][0] == "{":
line = "".join(line).split("{")[1].split("}")[0].split(",")
pos = 0
for var in line:
layouts_map[layouts_macro_name[pos]] = {"widgets": [], "number_of_widgets": False, "id":pos}
layouts_name_map[var[1:]] = layouts_macro_name[pos]
pos += 1
base = os.path.dirname(filename)
for filename in layouts_files:
layout_name = False
f = open(os.path.join(base, filename), "r")
data = f.readlines()
f.close()
for line in data:
line = line.split()
if len(line) == 0:
continue
w = []
a = "".join(line).split("{")
if len(a) == 2:
a = a[1].split("}")
if len(a) == 2:
a = a[0].split(",")
w = map(to_int, a)
if len(w) == 4:
layouts_map[layout_name]["widgets"].append(w)
n = "".join(line).split(",")
if len(n) == 2:
n = to_int(n[0])
layouts_map[layout_name]["number_of_widgets"] = n
if len(line) == 5:
if line[1] == "layout_desc":
layout_name = layouts_name_map[line[2]]
return layouts_map
def map_struct(parser, path, map_path):
global ee_map
global mem_index
for struct in path:
# print struct
struct_name = struct[0]
struct_type = struct[1][0]
if len(struct[1]) == 2:
struct_len = struct[1][1][0]
else:
struct_len = 1
arr_type = False
if struct_type[-4:] == "_arr":
struct_type = struct_type[:-4]
arr_type = True
if struct_type in sizes:
var_size = sizes[struct_type]
if struct_type == "char" or arr_type:
map_name = map_path + "_" + struct_name
ee_map[map_name] = [mem_index, struct_len, struct_type]
mem_index += struct_len
else:
for i in range(struct_len):
if struct_len > 1:
map_name = map_path + "_" + struct_name + "_" + str(i)
else:
map_name = map_path + "_" + struct_name
ee_map[map_name] = [mem_index, var_size, struct_type]
mem_index += var_size
else:
for i in range(struct_len):
next_struct = parser.defs["structs"][struct_type]["members"]
if struct_len > 1:
map_name = map_path + "_" + struct_name + "_" + str(i)
else:
map_name = map_path + "_" + struct_name
map_struct(parser, next_struct, map_name)
mem_index = 0
ee_map = OrderedDict()
f = open("../utils/build/build_number.txt", "r")
number = int(f.readline())
f.close()
print "Configuration mapper (Build %u)" % number
print "-----------------------------------------"
print "Parsing headers...",
p.processAll()
print "ok"
print "Parsing cfg...",
map_struct(p, p.defs["structs"]["cfg_t"]["members"], "cfg")
print "ok"
print "Parsing layouts...",
layouts = parse_layouts(layout_file, p)
print "ok"
path = "../../skydrop_configurator/app/fw/%08d/" % number
try:
os.makedirs(path)
except:
pass
print "Writing description file...",
f = open(os.path.join(path, "ee_map.json"), "w")
f.write(json.dumps({"map": ee_map, "macros": p.defs["macros"], "layouts": layouts}))
f.close()
print "ok"
print "Providing firmware, configuration and description files to configurator...",
#firmware image
shutil.copyfile("UPDATE.FW", os.path.join(path, "UPDATE.FW"))
#eeprom image
shutil.copyfile("UPDATE.EE", os.path.join(path, "UPDATE.EE"))
#disassembly
shutil.copyfile("skydrop.lss", os.path.join(path, "skydrop.lss"))
#copy last update file
shutil.copyfile("UPDATE.EE", os.path.join("../../skydrop_configurator/app/", "UPDATE.EE"))
print "ok"
print "-----------------------------------------" | unknown | codeparrot/codeparrot-clean | ||
"""contain MultipleQueryLauncher Class"""
import unittest
import os.path
from pyramid.paster import get_appsettings
from pyramid import testing
from askomics.libaskomics.rdfdb.MultipleQueryLauncher import MultipleQueryLauncher
from askomics.libaskomics.EndpointManager import EndpointManager
class MultipleQueryLauncher(unittest.TestCase):
"""Test for the MultipleQueryLauncher class"""
def setUp(self):
"""Set up the settings and session"""
self.settings = get_appsettings('configs/tests.ini', name='main')
self.request = testing.DummyRequest()
#self.settings['askomics.fdendpoint'] = 'http://localhost:8890/sparql'
def test_process_query(self):
jm = EndpointManager(self.settings, self.request.session)
jm.save_endpoint("testNameEndpoint",'http://localhost:8890/sparql','Digest',True)
jm.save_endpoint("testNameEndpoint2",'http://localhost:8890/sparql','Digest',True)
#mql = MultipleQueryLauncher(self.settings, self.request.session)
try:
# mql.process_query("SELECT * WHERE { ?a ?b ?c. } LIMIT 1",jm.listEndpoints(),indexByEndpoint=True)
assert True
except ValueError:
assert False | unknown | codeparrot/codeparrot-clean | ||
import rsc from "@vitejs/plugin-rsc";
import react from "@vitejs/plugin-react";
import { defineConfig } from "vite";
export default defineConfig({
plugins: [
react(),
rsc({
entries: {
client: "src/entry.browser.tsx",
rsc: "src/entry.rsc.tsx",
ssr: "src/entry.ssr.tsx",
},
}),
],
}); | typescript | github | https://github.com/remix-run/react-router | integration/helpers/rsc-vite/vite.config.ts |
import pygame
from Networking import Client, Messages
import Util
class Sprite(object):
def __init__(self, uri, x=0, y=0):
self._uri = uri
self._x = x
self._y = y
self._img = pygame.image.load(uri)
self._width = self._img.get_width()
self._height = self._img.get_height()
self._half_width = self._width/2
self._half_height = self._height/2
###
# Properties
###
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def half_width(self):
return self._half_width
@property
def half_height(self):
return self._half_height
@property
def dx(self):
return self._dx
@dx.setter
def dx(self, value):
self._dx = value
@property
def dy(self):
return self._dy
@dy.setter
def dy(self, value):
self._dy = value
@property
def img(self):
return self._img
###
# Properties
###
def update(self, frame_time):
pass
class Ninja(Sprite):
_MAX_SPEED = 1
def __init__(self, uri):
self._uri = uri
self._x = 0
self._y = 0
self._dx = 0
self._dy = 0
self._max_speed = Ninja._MAX_SPEED
self._img = pygame.image.load(uri)
self._width = self._img.get_width()
self._height = self._img.get_height()
self._dies = Util.Event()
self._half_width = self._width/2
self._half_height = self._height/2
###
# Properties
###
@property
def dies(self):
return self._dies
@property
def dx(self):
return self._dx
@dx.setter
def dx(self, value):
if value == self._dx:
return
self._dx = value
Client.send(Messages.NinjaMove(Messages.Orientation.Horizontal, value))
@property
def dy(self):
return self._dy
@dy.setter
def dy(self, value):
if value == self._dy:
return
self._dy = value
Client.send(Messages.NinjaMove(Messages.Orientation.Vertical, value))
@property
def max_speed(self):
return self._max_speed
@max_speed.setter
def max_speed(self, value):
self._max_speed = value
###
# Methods
###
def swing_sword(self):
Client.send(Messages.SwordSwing(self._x, self._y))
def set_position(self, x, y):
self._x = x
self._y = y
Client.send(Messages.NinjaPosition(x, y))
def update(self, frame_time):
self._x += self._dx
self._y += self._dy
self._dx = 0
self._dy = 0
def recv(self, message):
print(message)
if isinstance(message, Messages.NinjaMove):
if message.orientation == Messages.Orientation.Horizontal:
self._dx = message.magnitude
else:
self._dy = message.magnitude
elif isinstance(message, Messages.NinjaPosition):
self._x = message.x
self._y = message.y
elif isinstance(message, Messages.SwordSwing):
if self._x == message.x and self._y == message.y:
self._dies.fire({"sender": self})
Client.send(Messages.NinjaDeath())
elif isinstance(message, Messages.NinjaDeath):
self._dies.fire({"sender": self}) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2002 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a class used to display the Sources part of the project.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QDialog, QInputDialog, QMenu
from E5Gui import E5MessageBox
from UI.BrowserModel import BrowserFileItem, BrowserClassItem, \
BrowserMethodItem, BrowserClassAttributeItem
from .ProjectBrowserModel import ProjectBrowserFileItem, \
ProjectBrowserSimpleDirectoryItem, ProjectBrowserDirectoryItem, \
ProjectBrowserSourceType
from .ProjectBaseBrowser import ProjectBaseBrowser
import Utilities
import UI.PixmapCache
class ProjectSourcesBrowser(ProjectBaseBrowser):
"""
A class used to display the Sources part of the project.
@signal closeSourceWindow(str) emitted after a file has been
removed/deleted from the project
@signal showMenu(str, QMenu) emitted when a menu is about to be shown.
The name of the menu and a reference to the menu are given.
@signal sourceFile(str) emitted to open the given file.
@signal sourceFile(str, int) emitted to open the given file at the given
line.
@signal sourceFile(str, int, str) emitted to open the given file as the
given type at the given line.
"""
showMenu = pyqtSignal(str, QMenu)
def __init__(self, project, parent=None):
"""
Constructor
@param project reference to the project object
@param parent parent widget of this browser (QWidget)
"""
ProjectBaseBrowser.__init__(self, project, ProjectBrowserSourceType,
parent)
self.selectedItemsFilter = \
[ProjectBrowserFileItem, ProjectBrowserSimpleDirectoryItem]
self.setWindowTitle(self.tr('Sources'))
self.setWhatsThis(self.tr(
"""<b>Project Sources Browser</b>"""
"""<p>This allows to easily see all sources contained in the"""
""" current project. Several actions can be executed via the"""
""" context menu.</p>"""
))
project.prepareRepopulateItem.connect(self._prepareRepopulateItem)
project.completeRepopulateItem.connect(self._completeRepopulateItem)
self.codemetrics = None
self.codecoverage = None
self.profiledata = None
self.classDiagram = None
self.importsDiagram = None
self.packageDiagram = None
self.applicationDiagram = None
self.loadedDiagram = None
def __closeAllWindows(self):
"""
Private method to close all project related windows.
"""
self.codemetrics and self.codemetrics.close()
self.codecoverage and self.codecoverage.close()
self.profiledata and self.profiledata.close()
self.classDiagram and self.classDiagram.close()
self.importsDiagram and self.importsDiagram.close()
self.packageDiagram and self.packageDiagram.close()
self.applicationDiagram and self.applicationDiagram.close()
self.loadedDiagram and self.loadedDiagram.close()
def _projectClosed(self):
"""
Protected slot to handle the projectClosed signal.
"""
self.__closeAllWindows()
ProjectBaseBrowser._projectClosed(self)
def _createPopupMenus(self):
"""
Protected overloaded method to generate the popup menu.
"""
ProjectBaseBrowser._createPopupMenus(self)
self.sourceMenuActions = {}
if self.project.isPythonProject():
self.__createPythonPopupMenus()
elif self.project.isRubyProject():
self.__createRubyPopupMenus()
elif self.project.isJavaScriptProject():
self.__createJavaScriptPopupMenus()
def __createPythonPopupMenus(self):
"""
Private method to generate the popup menus for a Python project.
"""
self.checksMenu = QMenu(self.tr('Check'))
self.checksMenu.aboutToShow.connect(self.__showContextMenuCheck)
self.menuShow = QMenu(self.tr('Show'))
self.menuShow.addAction(
self.tr('Code metrics...'), self.__showCodeMetrics)
self.coverageMenuAction = self.menuShow.addAction(
self.tr('Code coverage...'), self.__showCodeCoverage)
self.profileMenuAction = self.menuShow.addAction(
self.tr('Profile data...'), self.__showProfileData)
self.menuShow.aboutToShow.connect(self.__showContextMenuShow)
self.graphicsMenu = QMenu(self.tr('Diagrams'))
self.classDiagramAction = self.graphicsMenu.addAction(
self.tr("Class Diagram..."), self.__showClassDiagram)
self.graphicsMenu.addAction(
self.tr("Package Diagram..."), self.__showPackageDiagram)
self.importsDiagramAction = self.graphicsMenu.addAction(
self.tr("Imports Diagram..."), self.__showImportsDiagram)
self.graphicsMenu.addAction(
self.tr("Application Diagram..."),
self.__showApplicationDiagram)
self.graphicsMenu.addSeparator()
self.graphicsMenu.addAction(
UI.PixmapCache.getIcon("open.png"),
self.tr("Load Diagram..."), self.__loadDiagram)
self.graphicsMenu.aboutToShow.connect(self.__showContextMenuGraphics)
self.unittestAction = self.sourceMenu.addAction(
self.tr('Run unittest...'), self.handleUnittest)
self.sourceMenu.addSeparator()
act = self.sourceMenu.addAction(
self.tr('Rename file'), self._renameFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.menuActions.append(act)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('New package...'), self.__addNewPackage)
self.sourceMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.sourceMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.sourceMenu.addSeparator()
act = self.sourceMenu.addMenu(self.graphicsMenu)
self.sourceMenu.addSeparator()
self.sourceMenu.addMenu(self.checksMenu)
self.sourceMenu.addSeparator()
self.sourceMenuActions["Show"] = self.sourceMenu.addMenu(self.menuShow)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Copy Path to Clipboard'), self._copyToClipboard)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.sourceMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(self.tr('Configure...'), self._configure)
self.menu.addSeparator()
self.menu.addAction(
self.tr('New package...'), self.__addNewPackage)
self.menu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.menu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.menu.addSeparator()
self.menu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.menu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.menu.addSeparator()
self.menu.addAction(self.tr('Configure...'), self._configure)
# create the attribute menu
self.gotoMenu = QMenu(self.tr("Goto"), self)
self.gotoMenu.aboutToShow.connect(self._showGotoMenu)
self.gotoMenu.triggered.connect(self._gotoAttribute)
self.attributeMenu = QMenu(self)
self.attributeMenu.addMenu(self.gotoMenu)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('New package...'), self.__addNewPackage)
self.attributeMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.attributeMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.attributeMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Configure...'), self._configure)
self.backMenu = QMenu(self)
self.backMenu.addAction(
self.tr('New package...'), self.__addNewPackage)
self.backMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.backMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.backMenu.addSeparator()
self.backMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.backMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.backMenu.addSeparator()
self.backMenu.addAction(self.tr('Configure...'), self._configure)
self.backMenu.setEnabled(False)
self.multiMenu.addSeparator()
act = self.multiMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.multiMenuActions.append(act)
act = self.multiMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.multiMenuActions.append(act)
self.multiMenu.addSeparator()
self.multiMenu.addMenu(self.checksMenu)
self.multiMenu.addSeparator()
self.multiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.multiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.multiMenu.addSeparator()
self.multiMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMenu = QMenu(self)
act = self.dirMenu.addAction(
self.tr('Remove from project'), self._removeDir)
self.dirMenuActions.append(act)
act = self.dirMenu.addAction(
self.tr('Delete'), self._deleteDirectory)
self.dirMenuActions.append(act)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('New package...'), self.__addNewPackage)
self.dirMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.dirMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.dirMenu.addSeparator()
act = self.dirMenu.addMenu(self.graphicsMenu)
self.dirMenu.addSeparator()
self.dirMenu.addMenu(self.checksMenu)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Copy Path to Clipboard'), self._copyToClipboard)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMenu.addSeparator()
self.dirMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMultiMenu = QMenu(self)
self.dirMultiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMultiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMultiMenu.addSeparator()
self.dirMultiMenu.addAction(
self.tr('Configure...'), self._configure)
self.sourceMenu.aboutToShow.connect(self.__showContextMenu)
self.multiMenu.aboutToShow.connect(self.__showContextMenuMulti)
self.dirMenu.aboutToShow.connect(self.__showContextMenuDir)
self.dirMultiMenu.aboutToShow.connect(self.__showContextMenuDirMulti)
self.backMenu.aboutToShow.connect(self.__showContextMenuBack)
self.mainMenu = self.sourceMenu
def __createRubyPopupMenus(self):
"""
Private method to generate the popup menus for a Ruby project.
"""
self.graphicsMenu = QMenu(self.tr('Diagrams'))
self.classDiagramAction = self.graphicsMenu.addAction(
self.tr("Class Diagram..."), self.__showClassDiagram)
self.graphicsMenu.addAction(
self.tr("Package Diagram..."), self.__showPackageDiagram)
self.graphicsMenu.addAction(
self.tr("Application Diagram..."),
self.__showApplicationDiagram)
self.graphicsMenu.addSeparator()
self.graphicsMenu.addAction(
UI.PixmapCache.getIcon("fileOpen.png"),
self.tr("Load Diagram..."), self.__loadDiagram)
self.sourceMenu.addSeparator()
act = self.sourceMenu.addAction(
self.tr('Rename file'), self._renameFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.menuActions.append(act)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.sourceMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.sourceMenu.addSeparator()
act = self.sourceMenu.addMenu(self.graphicsMenu)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.sourceMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(self.tr('Configure...'), self._configure)
self.menu.addSeparator()
self.menu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.menu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.menu.addSeparator()
self.menu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.menu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.menu.addSeparator()
self.menu.addAction(self.tr('Configure...'), self._configure)
# create the attribute menu
self.gotoMenu = QMenu(self.tr("Goto"), self)
self.gotoMenu.aboutToShow.connect(self._showGotoMenu)
self.gotoMenu.triggered.connect(self._gotoAttribute)
self.attributeMenu = QMenu(self)
self.attributeMenu.addMenu(self.gotoMenu)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.attributeMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.attributeMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Configure...'), self._configure)
self.backMenu = QMenu(self)
self.backMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.backMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.backMenu.addSeparator()
self.backMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.backMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.backMenu.setEnabled(False)
self.backMenu.addSeparator()
self.backMenu.addAction(self.tr('Configure...'), self._configure)
self.multiMenu.addSeparator()
act = self.multiMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.multiMenuActions.append(act)
act = self.multiMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.multiMenuActions.append(act)
self.multiMenu.addSeparator()
self.multiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.multiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.multiMenu.addSeparator()
self.multiMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMenu = QMenu(self)
act = self.dirMenu.addAction(
self.tr('Remove from project'), self._removeDir)
self.dirMenuActions.append(act)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.dirMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.dirMenu.addSeparator()
act = self.dirMenu.addMenu(self.graphicsMenu)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMenu.addSeparator()
self.dirMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMultiMenu = QMenu(self)
self.dirMultiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMultiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMultiMenu.addSeparator()
self.dirMultiMenu.addAction(
self.tr('Configure...'), self._configure)
self.sourceMenu.aboutToShow.connect(self.__showContextMenu)
self.multiMenu.aboutToShow.connect(self.__showContextMenuMulti)
self.dirMenu.aboutToShow.connect(self.__showContextMenuDir)
self.dirMultiMenu.aboutToShow.connect(self.__showContextMenuDirMulti)
self.backMenu.aboutToShow.connect(self.__showContextMenuBack)
self.mainMenu = self.sourceMenu
def __createJavaScriptPopupMenus(self):
"""
Private method to generate the popup menus for a Python project.
"""
self.checksMenu = QMenu(self.tr('Check'))
self.checksMenu.aboutToShow.connect(self.__showContextMenuCheck)
self.sourceMenu.addSeparator()
act = self.sourceMenu.addAction(
self.tr('Rename file'), self._renameFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.menuActions.append(act)
act = self.sourceMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.menuActions.append(act)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.sourceMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.sourceMenu.addSeparator()
self.sourceMenu.addMenu(self.checksMenu)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Copy Path to Clipboard'), self._copyToClipboard)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.sourceMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.sourceMenu.addSeparator()
self.sourceMenu.addAction(self.tr('Configure...'), self._configure)
self.menu.addSeparator()
self.menu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.menu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.menu.addSeparator()
self.menu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.menu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.menu.addSeparator()
self.menu.addAction(self.tr('Configure...'), self._configure)
# create the attribute menu
self.gotoMenu = QMenu(self.tr("Goto"), self)
self.gotoMenu.aboutToShow.connect(self._showGotoMenu)
self.gotoMenu.triggered.connect(self._gotoAttribute)
self.attributeMenu = QMenu(self)
self.attributeMenu.addMenu(self.gotoMenu)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.attributeMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.attributeMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.attributeMenu.addSeparator()
self.attributeMenu.addAction(
self.tr('Configure...'), self._configure)
self.backMenu = QMenu(self)
self.backMenu.addAction(
self.tr('Add source files...'), self.project.addSourceFiles)
self.backMenu.addAction(
self.tr('Add source directory...'), self.project.addSourceDir)
self.backMenu.addSeparator()
self.backMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.backMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.backMenu.addSeparator()
self.backMenu.addAction(self.tr('Configure...'), self._configure)
self.backMenu.setEnabled(False)
self.multiMenu.addSeparator()
act = self.multiMenu.addAction(
self.tr('Remove from project'), self._removeFile)
self.multiMenuActions.append(act)
act = self.multiMenu.addAction(
self.tr('Delete'), self.__deleteFile)
self.multiMenuActions.append(act)
self.multiMenu.addSeparator()
self.multiMenu.addMenu(self.checksMenu)
self.multiMenu.addSeparator()
self.multiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.multiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.multiMenu.addSeparator()
self.multiMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMenu = QMenu(self)
act = self.dirMenu.addAction(
self.tr('Remove from project'), self._removeDir)
self.dirMenuActions.append(act)
act = self.dirMenu.addAction(
self.tr('Delete'), self._deleteDirectory)
self.dirMenuActions.append(act)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Add source files...'), self.__addSourceFiles)
self.dirMenu.addAction(
self.tr('Add source directory...'), self.__addSourceDirectory)
self.dirMenu.addSeparator()
self.dirMenu.addMenu(self.checksMenu)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Copy Path to Clipboard'), self._copyToClipboard)
self.dirMenu.addSeparator()
self.dirMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMenu.addSeparator()
self.dirMenu.addAction(self.tr('Configure...'), self._configure)
self.dirMultiMenu = QMenu(self)
self.dirMultiMenu.addAction(
self.tr('Expand all directories'), self._expandAllDirs)
self.dirMultiMenu.addAction(
self.tr('Collapse all directories'), self._collapseAllDirs)
self.dirMultiMenu.addSeparator()
self.dirMultiMenu.addAction(
self.tr('Configure...'), self._configure)
self.sourceMenu.aboutToShow.connect(self.__showContextMenu)
self.multiMenu.aboutToShow.connect(self.__showContextMenuMulti)
self.dirMenu.aboutToShow.connect(self.__showContextMenuDir)
self.dirMultiMenu.aboutToShow.connect(self.__showContextMenuDirMulti)
self.backMenu.aboutToShow.connect(self.__showContextMenuBack)
self.mainMenu = self.sourceMenu
def _contextMenuRequested(self, coord):
"""
Protected slot to show the context menu.
@param coord the position of the mouse pointer (QPoint)
"""
if not self.project.isOpen():
return
try:
categories = self.getSelectedItemsCountCategorized(
[ProjectBrowserFileItem, BrowserClassItem,
BrowserMethodItem, ProjectBrowserSimpleDirectoryItem,
BrowserClassAttributeItem])
cnt = categories["sum"]
if cnt <= 1:
index = self.indexAt(coord)
if index.isValid():
self._selectSingleItem(index)
categories = self.getSelectedItemsCountCategorized(
[ProjectBrowserFileItem, BrowserClassItem,
BrowserMethodItem, ProjectBrowserSimpleDirectoryItem,
BrowserClassAttributeItem])
cnt = categories["sum"]
bfcnt = categories[str(ProjectBrowserFileItem)]
cmcnt = categories[str(BrowserClassItem)] + \
categories[str(BrowserMethodItem)] + \
categories[str(BrowserClassAttributeItem)]
sdcnt = categories[str(ProjectBrowserSimpleDirectoryItem)]
if cnt > 1 and cnt == bfcnt:
self.multiMenu.popup(self.mapToGlobal(coord))
elif cnt > 1 and cnt == sdcnt:
self.dirMultiMenu.popup(self.mapToGlobal(coord))
else:
index = self.indexAt(coord)
if cnt == 1 and index.isValid():
if bfcnt == 1 or cmcnt == 1:
itm = self.model().item(index)
if isinstance(itm, ProjectBrowserFileItem):
fn = itm.fileName()
if self.project.isPythonProject():
if fn.endswith('.ptl'):
for act in self.sourceMenuActions.values():
act.setEnabled(False)
self.classDiagramAction.setEnabled(True)
self.importsDiagramAction.setEnabled(True)
self.unittestAction.setEnabled(False)
self.checksMenu.menuAction().setEnabled(
False)
elif fn.endswith('.rb'):
# entry for mixed mode programs
for act in self.sourceMenuActions.values():
act.setEnabled(False)
self.classDiagramAction.setEnabled(True)
self.importsDiagramAction.setEnabled(False)
self.unittestAction.setEnabled(False)
self.checksMenu.menuAction().setEnabled(
False)
elif fn.endswith('.js'):
# entry for mixed mode programs
for act in self.sourceMenuActions.values():
act.setEnabled(False)
self.unittestAction.setEnabled(False)
self.checksMenu.menuAction().setEnabled(
False)
self.graphicsMenu.menuAction().setEnabled(
False)
else:
# assume the source file is a Python file
for act in self.sourceMenuActions.values():
act.setEnabled(True)
self.classDiagramAction.setEnabled(True)
self.importsDiagramAction.setEnabled(True)
self.unittestAction.setEnabled(True)
self.checksMenu.menuAction().setEnabled(
True)
self.sourceMenu.popup(self.mapToGlobal(coord))
elif isinstance(itm, BrowserClassItem) or \
isinstance(itm, BrowserMethodItem):
self.menu.popup(self.mapToGlobal(coord))
elif isinstance(itm, BrowserClassAttributeItem):
self.attributeMenu.popup(self.mapToGlobal(coord))
else:
self.backMenu.popup(self.mapToGlobal(coord))
elif sdcnt == 1:
self.classDiagramAction.setEnabled(False)
self.dirMenu.popup(self.mapToGlobal(coord))
else:
self.backMenu.popup(self.mapToGlobal(coord))
else:
self.backMenu.popup(self.mapToGlobal(coord))
except:
pass
def __showContextMenu(self):
"""
Private slot called by the sourceMenu aboutToShow signal.
"""
ProjectBaseBrowser._showContextMenu(self, self.sourceMenu)
self.showMenu.emit("Main", self.sourceMenu)
def __showContextMenuMulti(self):
"""
Private slot called by the multiMenu aboutToShow signal.
"""
ProjectBaseBrowser._showContextMenuMulti(self, self.multiMenu)
self.showMenu.emit("MainMulti", self.multiMenu)
def __showContextMenuDir(self):
"""
Private slot called by the dirMenu aboutToShow signal.
"""
ProjectBaseBrowser._showContextMenuDir(self, self.dirMenu)
self.showMenu.emit("MainDir", self.dirMenu)
def __showContextMenuDirMulti(self):
"""
Private slot called by the dirMultiMenu aboutToShow signal.
"""
ProjectBaseBrowser._showContextMenuDirMulti(self, self.dirMultiMenu)
self.showMenu.emit("MainDirMulti", self.dirMultiMenu)
def __showContextMenuBack(self):
"""
Private slot called by the backMenu aboutToShow signal.
"""
ProjectBaseBrowser._showContextMenuBack(self, self.backMenu)
self.showMenu.emit("MainBack", self.backMenu)
def __showContextMenuShow(self):
"""
Private slot called before the show menu is shown.
"""
prEnable = False
coEnable = False
# first check if the file belongs to a project and there is
# a project coverage file
fn = self.project.getMainScript(True)
if fn is not None:
tfn = Utilities.getTestFileName(fn)
basename = os.path.splitext(fn)[0]
tbasename = os.path.splitext(tfn)[0]
prEnable = prEnable or \
os.path.isfile("{0}.profile".format(basename)) or \
os.path.isfile("{0}.profile".format(tbasename))
coEnable = (coEnable or
os.path.isfile("{0}.coverage".format(basename)) or
os.path.isfile("{0}.coverage".format(tbasename))) and \
(self.project.isPy3Project() or self.project.isPy2Project())
# now check the selected item
itm = self.model().item(self.currentIndex())
fn = itm.fileName()
if fn is not None:
basename = os.path.splitext(fn)[0]
prEnable = prEnable or \
os.path.isfile("{0}.profile".format(basename))
coEnable = (coEnable or
os.path.isfile("{0}.coverage".format(basename))) and \
(itm.isPython3File() or itm.isPython2File())
self.profileMenuAction.setEnabled(prEnable)
self.coverageMenuAction.setEnabled(coEnable)
self.showMenu.emit("Show", self.menuShow)
def _openItem(self):
"""
Protected slot to handle the open popup menu entry.
"""
itmList = self.getSelectedItems(
[BrowserFileItem, BrowserClassItem, BrowserMethodItem,
BrowserClassAttributeItem])
for itm in itmList:
if isinstance(itm, BrowserFileItem):
if itm.isPython2File():
self.sourceFile[str].emit(itm.fileName())
elif itm.isPython3File():
self.sourceFile[str].emit(itm.fileName())
elif itm.isRubyFile():
self.sourceFile[str, int, str].emit(
itm.fileName(), -1, "Ruby")
elif itm.isDFile():
self.sourceFile[str, int, str].emit(
itm.fileName(), -1, "D")
else:
self.sourceFile[str].emit(itm.fileName())
elif isinstance(itm, BrowserClassItem):
self.sourceFile[str, int].emit(
itm.fileName(), itm.classObject().lineno)
elif isinstance(itm, BrowserMethodItem):
self.sourceFile[str, int].emit(
itm.fileName(), itm.functionObject().lineno)
elif isinstance(itm, BrowserClassAttributeItem):
self.sourceFile[str, int].emit(
itm.fileName(), itm.attributeObject().lineno)
def __addNewPackage(self):
"""
Private method to add a new package to the project.
"""
itm = self.model().item(self.currentIndex())
if isinstance(itm, ProjectBrowserFileItem) or \
isinstance(itm, BrowserClassItem) or \
isinstance(itm, BrowserMethodItem):
dn = os.path.dirname(itm.fileName())
elif isinstance(itm, ProjectBrowserSimpleDirectoryItem) or \
isinstance(itm, ProjectBrowserDirectoryItem):
dn = itm.dirName()
else:
dn = ""
dn = self.project.getRelativePath(dn)
if dn.startswith(os.sep):
dn = dn[1:]
from .NewPythonPackageDialog import NewPythonPackageDialog
dlg = NewPythonPackageDialog(dn, self)
if dlg.exec_() == QDialog.Accepted:
packageName = dlg.getData()
nameParts = packageName.split(".")
packagePath = self.project.ppath
packageFile = ""
for name in nameParts:
packagePath = os.path.join(packagePath, name)
if not os.path.exists(packagePath):
try:
os.mkdir(packagePath)
except OSError as err:
E5MessageBox.critical(
self,
self.tr("Add new Python package"),
self.tr(
"""<p>The package directory <b>{0}</b> could"""
""" not be created. Aborting...</p>"""
"""<p>Reason: {1}</p>""")
.format(packagePath, str(err)))
return
packageFile = os.path.join(packagePath, "__init__.py")
if not os.path.exists(packageFile):
try:
f = open(packageFile, "w", encoding="utf-8")
f.close()
except IOError as err:
E5MessageBox.critical(
self,
self.tr("Add new Python package"),
self.tr(
"""<p>The package file <b>{0}</b> could"""
""" not be created. Aborting...</p>"""
"""<p>Reason: {1}</p>""")
.format(packageFile, str(err)))
return
self.project.appendFile(packageFile)
if packageFile:
self.sourceFile[str].emit(packageFile)
def __addSourceFiles(self):
"""
Private method to add a source file to the project.
"""
itm = self.model().item(self.currentIndex())
if isinstance(itm, ProjectBrowserFileItem) or \
isinstance(itm, BrowserClassItem) or \
isinstance(itm, BrowserMethodItem):
dn = os.path.dirname(itm.fileName())
elif isinstance(itm, ProjectBrowserSimpleDirectoryItem) or \
isinstance(itm, ProjectBrowserDirectoryItem):
dn = itm.dirName()
else:
dn = None
self.project.addFiles('source', dn)
def __addSourceDirectory(self):
"""
Private method to add source files of a directory to the project.
"""
itm = self.model().item(self.currentIndex())
if isinstance(itm, ProjectBrowserFileItem) or \
isinstance(itm, BrowserClassItem) or \
isinstance(itm, BrowserMethodItem):
dn = os.path.dirname(itm.fileName())
elif isinstance(itm, ProjectBrowserSimpleDirectoryItem) or \
isinstance(itm, ProjectBrowserDirectoryItem):
dn = itm.dirName()
else:
dn = None
self.project.addDirectory('source', dn)
def __deleteFile(self):
"""
Private method to delete files from the project.
"""
itmList = self.getSelectedItems()
files = []
fullNames = []
for itm in itmList:
fn2 = itm.fileName()
fullNames.append(fn2)
fn = self.project.getRelativePath(fn2)
files.append(fn)
from UI.DeleteFilesConfirmationDialog import \
DeleteFilesConfirmationDialog
dlg = DeleteFilesConfirmationDialog(
self.parent(),
self.tr("Delete files"),
self.tr(
"Do you really want to delete these files from the project?"),
files)
if dlg.exec_() == QDialog.Accepted:
for fn2, fn in zip(fullNames, files):
self.closeSourceWindow.emit(fn2)
self.project.deleteFile(fn)
###########################################################################
## Methods for the Checks submenu
###########################################################################
def __showContextMenuCheck(self):
"""
Private slot called before the checks menu is shown.
"""
self.showMenu.emit("Checks", self.checksMenu)
###########################################################################
## Methods for the Show submenu
###########################################################################
def __showCodeMetrics(self):
"""
Private method to handle the code metrics context menu action.
"""
itm = self.model().item(self.currentIndex())
fn = itm.fileName()
from DataViews.CodeMetricsDialog import CodeMetricsDialog
self.codemetrics = CodeMetricsDialog()
self.codemetrics.show()
self.codemetrics.start(fn)
def __showCodeCoverage(self):
"""
Private method to handle the code coverage context menu action.
"""
itm = self.model().item(self.currentIndex())
fn = itm.fileName()
pfn = self.project.getMainScript(True)
files = []
if pfn is not None:
tpfn = Utilities.getTestFileName(pfn)
basename = os.path.splitext(pfn)[0]
tbasename = os.path.splitext(tpfn)[0]
f = "{0}.coverage".format(basename)
tf = "{0}.coverage".format(tbasename)
if os.path.isfile(f):
files.append(f)
if os.path.isfile(tf):
files.append(tf)
if fn is not None:
tfn = Utilities.getTestFileName(fn)
basename = os.path.splitext(fn)[0]
tbasename = os.path.splitext(tfn)[0]
f = "{0}.coverage".format(basename)
tf = "{0}.coverage".format(tbasename)
if os.path.isfile(f) and f not in files:
files.append(f)
if os.path.isfile(tf) and tf not in files:
files.append(tf)
if files:
if len(files) > 1:
pfn, ok = QInputDialog.getItem(
None,
self.tr("Code Coverage"),
self.tr("Please select a coverage file"),
files,
0, False)
if not ok:
return
else:
pfn = files[0]
else:
return
from DataViews.PyCoverageDialog import PyCoverageDialog
self.codecoverage = PyCoverageDialog()
self.codecoverage.show()
self.codecoverage.start(pfn, fn)
def __showProfileData(self):
"""
Private method to handle the show profile data context menu action.
"""
itm = self.model().item(self.currentIndex())
fn = itm.fileName()
pfn = self.project.getMainScript(True)
files = []
if pfn is not None:
tpfn = Utilities.getTestFileName(pfn)
basename = os.path.splitext(pfn)[0]
tbasename = os.path.splitext(tpfn)[0]
f = "{0}.profile".format(basename)
tf = "{0}.profile".format(tbasename)
if os.path.isfile(f):
files.append(f)
if os.path.isfile(tf):
files.append(tf)
if fn is not None:
tfn = Utilities.getTestFileName(fn)
basename = os.path.splitext(fn)[0]
tbasename = os.path.splitext(tfn)[0]
f = "{0}.profile".format(basename)
tf = "{0}.profile".format(tbasename)
if os.path.isfile(f) and f not in files:
files.append(f)
if os.path.isfile(tf) and tf not in files:
files.append(tf)
if files:
if len(files) > 1:
pfn, ok = QInputDialog.getItem(
None,
self.tr("Profile Data"),
self.tr("Please select a profile file"),
files,
0, False)
if not ok:
return
else:
pfn = files[0]
else:
return
from DataViews.PyProfileDialog import PyProfileDialog
self.profiledata = PyProfileDialog()
self.profiledata.show()
self.profiledata.start(pfn, fn)
###########################################################################
## Methods for the Graphics submenu
###########################################################################
def __showContextMenuGraphics(self):
"""
Private slot called before the checks menu is shown.
"""
self.showMenu.emit("Graphics", self.graphicsMenu)
def __showClassDiagram(self):
"""
Private method to handle the class diagram context menu action.
"""
itm = self.model().item(self.currentIndex())
try:
fn = itm.fileName()
except AttributeError:
fn = itm.dirName()
res = E5MessageBox.yesNo(
self,
self.tr("Class Diagram"),
self.tr("""Include class attributes?"""),
yesDefault=True)
from Graphics.UMLDialog import UMLDialog
self.classDiagram = UMLDialog(UMLDialog.ClassDiagram, self.project, fn,
self, noAttrs=not res)
self.classDiagram.show()
def __showImportsDiagram(self):
"""
Private method to handle the imports diagram context menu action.
"""
itm = self.model().item(self.currentIndex())
try:
fn = itm.fileName()
except AttributeError:
fn = itm.dirName()
package = os.path.isdir(fn) and fn or os.path.dirname(fn)
res = E5MessageBox.yesNo(
self,
self.tr("Imports Diagram"),
self.tr("""Include imports from external modules?"""))
from Graphics.UMLDialog import UMLDialog
self.importsDiagram = UMLDialog(
UMLDialog.ImportsDiagram, self.project, package,
self, showExternalImports=res)
self.importsDiagram.show()
def __showPackageDiagram(self):
"""
Private method to handle the package diagram context menu action.
"""
itm = self.model().item(self.currentIndex())
try:
fn = itm.fileName()
except AttributeError:
fn = itm.dirName()
package = os.path.isdir(fn) and fn or os.path.dirname(fn)
res = E5MessageBox.yesNo(
self,
self.tr("Package Diagram"),
self.tr("""Include class attributes?"""),
yesDefault=True)
from Graphics.UMLDialog import UMLDialog
self.packageDiagram = UMLDialog(
UMLDialog.PackageDiagram, self.project, package,
self, noAttrs=not res)
self.packageDiagram.show()
def __showApplicationDiagram(self):
"""
Private method to handle the application diagram context menu action.
"""
res = E5MessageBox.yesNo(
self,
self.tr("Application Diagram"),
self.tr("""Include module names?"""),
yesDefault=True)
from Graphics.UMLDialog import UMLDialog
self.applicationDiagram = UMLDialog(
UMLDialog.ApplicationDiagram, self.project,
self, noModules=not res)
self.applicationDiagram.show()
def __loadDiagram(self):
"""
Private slot to load a diagram from file.
"""
from Graphics.UMLDialog import UMLDialog
self.loadedDiagram = None
loadedDiagram = UMLDialog(
UMLDialog.NoDiagram, self.project, parent=self)
if loadedDiagram.load():
self.loadedDiagram = loadedDiagram
self.loadedDiagram.show(fromFile=True) | unknown | codeparrot/codeparrot-clean | ||
# Layering Remix on top of React Router 6.4
Date: 2022-08-16
Status: accepted
## Context
Now that we're almost done [Remixing React Router][remixing-react-router] and will be shipping `react-router@6.4.0` shortly, it's time for us to start thinking about how we can layer Remix on top of the latest React Router. This will allow us to delete a _bunch_ of code from Remix for handling the Data APIs. This document aims to discuss the changes we foresee making and some potential iterative implementation approaches to avoid a big-bang merge.
From an iterative-release viewpoint, there's 4 separate "functional" aspects to consider here:
1. Server data loading
2. Server react component rendering
3. Client hydration
4. Client data loading
(1) can be implemented and deployed in isolation. (2) and (3) need to happen together since the contexts/components need to match. And (4) comes for free since the loaders/actions will be included on the routes we create in (3).
## Decision
The high level approach is as follows
1. SSR data loading
1. Update `handleResourceRequest` to use `createStaticHandler` behind a flag
1. Aim to get unit and integration tests asserting both flows if possible
2. Update `handleDataRequest` in the same manner
3. Update `handleDocumentRequest` in the same manner
1. Confirm unit and integration tests are all passing
4. Write new `RemixContext` data into `EntryContext` and remove old flow
2. Deploy `@remix-run/server-runtime` changes once comfortable
3. Handle `@remix-run/react` in a short-lived feature branch
1. server render without hydration (replace `EntryContext` with `RemixContext`)
2. client-side hydration
3. add backwards compatibility changes
4. Deploy `@remix-run/react` changes once comfortable
## Details
There are 2 main areas where we have to make changes:
1. Handling server-side requests in `@remix-run/server-runtime` (mainly in the `server.ts` file)
2. Handling client-side hydration + routing in `@remix-run/react` (mainly in the `components.ts`, `server.ts` and `browser.ts` files)
Since these are separated by the network chasm, we can actually implement these independent of one another for smaller merges, iterative development, and easier rollbacks should something go wrong.
### Do the server data-fetching migration first
There's two primary reasons it makes sense to handle the server-side data-fetching logic first:
1. It's a smaller surface area change since there's effectively only 1 new API to work with in `createStaticHandler`
2. It's easier to implement in a feature-flagged manner since we're on the server and bundle size is not a concern
We can do this on the server using the [strangler pattern][strangler-pattern] so that we can confirm the new approach is functionally equivalent to the old approach. Depending on how far we take it, we can assert this through unit tests, integration tests, as well as run-time feature flags if desired.
For example, pseudo code for this might look like the following, where we enable via a flag during local development and potentially unit/integration tests. We can throw exceptions anytime the new static handler results in different SSR data. Once we're confident, we delete the current code and remove the flag conditional.
```tsx
// Runtime-agnostic flag to enable behavior, will always be committed as
// `false` initially, and toggled to true during local dev
const ENABLE_REMIX_ROUTER = false;
async function handleDocumentRequest({ request }) {
const appState = {
trackBoundaries: true,
trackCatchBoundaries: true,
catchBoundaryRouteId: null,
renderBoundaryRouteId: null,
loaderBoundaryRouteId: null,
error: undefined,
catch: undefined,
};
// ... do all the current stuff
const serverHandoff = {
actionData,
appState: appState,
matches: entryMatches,
routeData,
};
const entryContext = {
...serverHandoff,
manifest: build.assets,
routeModules,
serverHandoffString: createServerHandoffString(serverHandoff),
};
// If the flag is enabled, process the request again with the new static
// handler and confirm we get the same data on the other side
if (ENABLE_REMIX_ROUTER) {
const staticHandler = unstable_createStaticHandler(routes);
const context = await staticHandler.query(request);
// Note: == only used for brevity ;)
assert(entryContext.matches === context.matches);
assert(entryContext.routeData === context.loaderData);
assert(entryContext.actionData === context.actionData);
if (catchBoundaryRouteId) {
assert(appState.catch === context.errors[catchBoundaryRouteId]);
}
if (loaderBoundaryRouteId) {
assert(appState.error === context.errors[loaderBoundaryRouteId]);
}
}
}
```
We can also split this into iterative approaches on the server too, and do `handleResourceRequest`, `handleDataRequest`, and `handleDocumentRequest` independently (either just implementation or implementation + release). Doing them in that order would also likely go from least to most complex.
#### Notes
- This can't use `process.env` since the code we're changing is runtime agnostic. We'll go with a local hardcoded variable in `server.ts` for now to avoid runtime-specific ENV variable concerns.
- Unit and integration tests may need to have their own copies of this variable as well to remain passing. For example, we have unit tests that assert that a loader is called once for a given route - but when this flag is enabled, that loader will be called twice so we can set up a conditional assertion based on the flag.
- The `remixContext` sent through `entry.server.ts` will be altered in shape. We consider this an opaque API so not a breaking change.
#### Implementation approach
1. Use `createHierarchicalRoutes` to build RR `DataRouteObject` instances
1. See `createStaticHandlerDataRoutes` in the `brophdawg11/rrr` branch
2. Create a static handler per-request using `unstable_createStaticHandler`
3. `handleResourceRequest`
1. This one should be _really_ simple since it should just send back the raw `Response` from `queryRoute`
4. `handleDataRequest`
1. This is only slightly more complicated than resource routes, as it needs to handle serializing errors and processing redirects into 204 Responses for the client
5. `handleDocumentRequest`
1. This is the big one. It simplifies down pretty far, but has the biggest surface area where some things don't quite match up
2. We need to map query "errors" to Remix's definition of error/catch and bubble them upwards accordingly.
1. For example, in a URL like `/a/b/c`, if C exports a `CatchBoundary` but not an `ErrorBoundary`, then it'll be represented in the `DataRouteObject` with `hasErrorBoundary=true` since the `@remix-run/router` doesn't distinguish
2. If C's loader throws an error, the router will "catch" that at C's `errorElement`, but we then need to re-bubble that upwards to the nearest `ErrorBoundary`
3. See `differentiateCatchVersusErrorBoundaries` in the `brophdawg11/rrr` branch
3. New `RemixContext`
1. `manifest`, `routeModules`, `staticHandlerContext`, `serverHandoffString`
2. Create this alongside `EntryContext` assert the values match
4. If we catch an error during render, we'll have tracked the boundaries on `staticHandlerContext` and can use `getStaticContextFromError` to get a new context for the second pass (note the need to re-call `differentiateCatchVersusErrorBoundaries`)
### Do the UI rendering layer second
The rendering layer in `@remix-run/react` is a bit more of a whole-sale replacement and comes with backwards-compatibility concerns, so it makes sense to do second. However, we can still do this iteratively, we just can't deploy iteratively since the SSR and client HTML need to stay synced (and associated hooks need to read from the same contexts). First, we can focus on getting the SSR document rendered properly without `<Scripts/>`. Then second we'll add in client-side hydration.
The main changes here include:
- Removal of `RemixEntry` and it's context in favor of a new `RemixContext.Provider` wrapping `DataStaticRouter`/`DataBrowserRouter`
- All this context needs is the remix-specific aspects (`manifest`, `routeModules`)
- Everything else from the old RemixEntryContext is now in the router contexts (and `staticHandlerContext` during SSR)
- Some aspects of `@remix-run/react`'s `components.tsx` file are now fully redundant and can be removed completely in favor of re-exporting from `react-router-dom`:
- `Form`, `useFormAction`, `useSubmit`, `useMatches`, `useFetchers`
- Other aspects are largely redundant but need some Remix-specific things, so these will require some adjustments:
- `Link`, `useLoaderData`, `useActionData`, `useTransition`, `useFetcher`
#### Backwards Compatibility Notes
- `useLoaderData`/`useActionData` need to retain their generics, and are not currently generic in `react-router`
- `useTransition` needs `submission` and `type` added
- `<Form method="get">` no longer goes into a "submitting" state in `react-router-dom`
- `useFetcher` needs `type` added
- `unstable_shouldReload` replaced by `shouldRevalidate`
- Can we use it if it's there but prefer `shouldRevalidate`?
- Distinction between error and catch boundaries
- `Request.signal` - continue to send separate `signal` param
[remixing-react-router]: https://remix.run/blog/remixing-react-router
[strangler-pattern]: https://martinfowler.com/bliki/StranglerFigApplication.html | unknown | github | https://github.com/remix-run/react-router | decisions/0007-remix-on-react-router-6-4-0.md |
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
import logging
from .base import BaseEndpoint
from .. import errors
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2014, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef RPL_MSR_H
#define RPL_MSR_H
#include "my_config.h"
#include <stddef.h>
#include <sys/types.h>
#include <cstdint> // std::ptrdiff_t
#include <iterator> // std::forward_iterator
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "my_dbug.h"
#include "my_psi_config.h"
#include "sql/mysqld.h" // key_rwlock_channel_map_lock
#include "sql/rpl_channel_service_interface.h" // enum_channel_type
#include "sql/rpl_filter.h"
#include "sql/rpl_gtid.h"
#include "sql/rpl_io_monitor.h"
#include "sql/rpl_mi.h"
class Master_info;
/**
Maps a channel name to it's Master_info.
*/
// Maps a master info object to a channel name
typedef std::map<std::string, Master_info *> mi_map;
// Maps a channel type to a map of channels of that type.
typedef std::map<int, mi_map> replication_channel_map;
// Maps a replication filter to a channel name.
typedef std::map<std::string, Rpl_filter *> filter_map;
// Deduce the iterator type for a range/collection/container as the return
// type for begin(). This is usually either T::iterator or T::const_iterator,
// depending on the const-ness of T.
template <class T>
using Iterator_for = decltype(std::begin(std::declval<T>()));
/// Iterator that provides the elements of a nested map as a linear sequence.
///
/// This satisfies std::forward_iterator.
///
/// @tparam Outer_iterator_t Forward iterator over the outer map.
///
/// @tparam outer_is_map If true, the outer map iterator yields pairs, and the
/// second component of each pair contains the inner map. If false, the outer
/// map iterator yields inner maps directly.
///
/// @tparam inner_is_map If true, the inner map iterator yields pairs, and the
/// second component of each pair contains the value. If false, the inner
/// map iterator yields values directly.
///
/// @todo move this to a library
///
/// @todo support bidirectional/random_access/contiguous iterators when both
/// maps support it.
///
/// @todo Once we have ranges, remove the build-in map support and let users use
/// Denested_map_view<Map | std::ranges::value_view> |
/// std::ranges::value_view instead
template <std::forward_iterator Outer_iterator_t, bool outer_is_map,
bool inner_is_map>
class Denested_map_iterator {
using Self_t =
Denested_map_iterator<Outer_iterator_t, outer_is_map, inner_is_map>;
/// @return Reference to the container that the given outer iterator points
/// to, taking the 'second' element of the pair in case the outer iterator is
/// a map.
static auto &mapped_value(const Outer_iterator_t &outer_iterator) {
if constexpr (outer_is_map)
return outer_iterator->second;
else
return *outer_iterator;
}
using Inner_map_t = decltype(mapped_value(Outer_iterator_t()));
using Inner_iterator_t = Iterator_for<Inner_map_t>;
/// @return Reference to the value that the given inner iterator points to,
/// taking the 'second' element of the pair in case the inner iterator is a
/// map.
static auto &mapped_value(const Inner_iterator_t &inner_iterator) {
if constexpr (inner_is_map)
return inner_iterator->second;
else
return *inner_iterator;
}
public:
using value_type = decltype(mapped_value(Inner_iterator_t()));
using difference_type = std::ptrdiff_t;
/// Default constructor.
///
/// The result is an object that is useless in itself since all member
/// functions are undefined. It can be assigned or moved to, and it is
/// required for iterators to be default-constructible.
Denested_map_iterator() = default;
/// Constructor.
///
/// @param outer_begin Iterator to the first element of the nested map.
///
/// @param outer_end Iterator to the one-past-the-last element of the nested
/// map.
///
/// @param at_end If true, position at the end; if false, position at the
/// beginning.
explicit constexpr Denested_map_iterator(const Outer_iterator_t &outer_begin,
const Outer_iterator_t &outer_end,
bool at_end)
: m_outer_begin(outer_begin),
m_outer_end(outer_end),
m_outer_it(at_end ? outer_end : outer_begin),
m_inner_it(m_outer_it == outer_end
? Inner_iterator_t()
: std::begin(mapped_value(m_outer_it))) {
skip_inner_end_positions();
}
/// Pre-increment
constexpr Self_t &operator++() {
++m_inner_it;
skip_inner_end_positions();
return *this;
}
/// Post-increment
constexpr Self_t operator++(int) {
auto tmp = *this;
++*this;
return tmp;
}
/// Dereference
constexpr decltype(auto) operator*() const {
return mapped_value(m_inner_it);
}
/// Comparison
constexpr bool operator==(const Self_t &other) const {
// Different outer iterators -> different
if (m_outer_it != other.m_outer_it) return false;
// Both outer iterators positioned at end -> equal (don't compare inner
// iterators)
if (m_outer_it == m_outer_end) return true;
// Outer iterators point to same inner array -> inner iterators determine
// equality.
return m_inner_it == other.m_inner_it;
}
private:
/// Maintain the invariant that *either* m_outer_it points to the end, *or*
/// m_inner_it *doesn't* point to the end.
///
/// This may moves the iterators forward until the condition is met.
constexpr void skip_inner_end_positions() {
if (m_outer_it != m_outer_end) {
while (m_inner_it == std::end(mapped_value(m_outer_it))) {
++m_outer_it;
if (m_outer_it == m_outer_end) break;
m_inner_it = std::begin(mapped_value(m_outer_it));
}
}
}
/// Beginning of outer map.
Outer_iterator_t m_outer_begin{};
/// End of outer map.
Outer_iterator_t m_outer_end{};
/// Iterator to the outer map.
Outer_iterator_t m_outer_it{};
/// Iterator to the inner map, or undefined if the outer map points to the
/// end.
Inner_iterator_t m_inner_it{};
};
/// View over a nested map structure, which provides iterators over the elements
/// of the second-level map.
///
/// For example, a view over std::map<int, std::map<std::string, T>> provides
/// iterators over the T objects.
///
/// @tparam Nested_map_t The nested map type.
///
/// @tparam outer_is_map If true, the outer map is assumed to be a map, i.e.,
/// its iterators yield pairs that hold inner maps in their second components.
/// Otherwise, it is assumed that iterators of the outer map provide inner maps
/// directly.
///
/// @tparam inner_is_map If true, the inner maps are assumed to be maps, i.e.,
/// their iterators yield pairs and the view's iterator provides the second
/// components. Otherwise, the view's iterator provides the values of the
/// iterators of the inner maps directly.
template <class Nested_map_t, bool outer_is_map, bool inner_is_map>
class Denested_map_view {
using Iterator_t = Denested_map_iterator<Iterator_for<Nested_map_t>,
outer_is_map, inner_is_map>;
public:
Denested_map_view(Nested_map_t &map) : m_map(&map) {}
auto begin() { return Iterator_t(m_map->begin(), m_map->end(), false); }
auto end() { return Iterator_t(m_map->begin(), m_map->end(), true); }
auto begin() const { return Iterator_t(m_map->begin(), m_map->end(), false); }
auto end() const { return Iterator_t(m_map->begin(), m_map->end(), true); }
private:
Nested_map_t *m_map;
};
/**
Class to store all the Master_info objects of a slave
to access them in the replication code base or performance
schema replication tables.
In a Multisourced replication setup, a slave connects
to several masters (also called as sources). This class
stores the Master_infos where each Master_info belongs
to a slave.
The important objects for a slave are the following:
i) Master_info and Relay_log_info (replica_parallel_workers == 0)
ii) Master_info, Relay_log_info and Slave_worker(replica_parallel_workers >0 )
Master_info is always associated with a Relay_log_info per channel.
So, it is enough to store Master_infos and call the corresponding
Relay_log_info by mi->rli;
This class is not yet thread safe. Any part of replication code that
calls this class member function should always lock the channel_map.
Only a single global object for a server instance should be created.
The two important data structures in this class are
i) C++ std map to store the Master_info pointers with channel name as a key.
These are the base channel maps.
@todo Convert to boost after it's introduction.
ii) C++ std map to store the channel maps with a channel type as its key.
This map stores slave channel maps, group replication channels or others
iii) An array of Master_info pointers to access from performance schema
tables. This array is specifically implemented in a way to make
a) pfs indices simple i.e a simple integer counter
b) To avoid recalibration of data structure if master info is deleted.
* Consider the following high level implementation of a pfs table
to make a row.
@code
highlevel_pfs_funciton()
{
while(replication_table_xxxx.rnd_next())
{
do stuff;
}
}
@endcode
However, we lock channel_map lock for every rnd_next(); There is a gap
where an addition/deletion of a channel would rearrange the map
making the integer indices of the pfs table point to a wrong value.
Either missing a row or duplicating a row.
We solve this problem, by using an array exclusively to use in
replciation pfs tables, by marking a master_info defeated as 0
(i.e NULL). A new master info is added to this array at the
first NULL always.
*/
class Multisource_info {
private:
/* Maximum number of channels per slave */
static const unsigned int MAX_CHANNELS = 256;
/* A Map that maps, a channel name to a Master_info grouped by channel type */
replication_channel_map rep_channel_map;
/* Number of master_infos at the moment*/
uint current_mi_count;
/**
Default_channel for this instance, currently is predefined
and cannot be modified.
*/
static const char *default_channel;
Master_info *default_channel_mi;
static const char *group_replication_channel_names[];
/**
This lock was designed to protect the channel_map from adding or removing
master_info objects from the map (adding or removing replication channels).
In fact it also acts like the LOCK_active_mi of MySQL 5.6, preventing two
replication administrative commands to run in parallel.
*/
Checkable_rwlock *m_channel_map_lock;
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/* Array for replication performance schema related tables */
Master_info *rpl_pfs_mi[MAX_CHANNELS];
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
/*
A empty mi_map to allow Multisource_info::end() to return a
valid constant value.
*/
mi_map empty_mi_map;
public:
/* Constructor for this class.*/
Multisource_info() {
/*
This class should be a singleton.
The assert below is to prevent it to be instantiated more than once.
*/
#ifndef NDEBUG
static int instance_count = 0;
instance_count++;
assert(instance_count == 1);
#endif
current_mi_count = 0;
default_channel_mi = nullptr;
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
init_rpl_pfs_mi();
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
m_channel_map_lock = new Checkable_rwlock(
#ifdef HAVE_PSI_INTERFACE
key_rwlock_channel_map_lock
#endif
);
}
/* Destructor for this class.*/
~Multisource_info() { delete m_channel_map_lock; }
/**
Adds the Master_info object to both replication_channel_map and rpl_pfs_mi
@param[in] channel_name channel name
@param[in] mi pointer to master info corresponding
to this channel
@retval false successfully added
@retval true couldn't add channel
*/
bool add_mi(const char *channel_name, Master_info *mi);
/**
Find the master_info object corresponding to a channel explicitly
from replication channel_map;
Return if it exists, otherwise return 0
@param[in] channel_name channel name for the master info object.
@returns pointer to the master info object if exists
in the map. Otherwise, NULL;
*/
Master_info *get_mi(const char *channel_name);
/**
Return the master_info object corresponding to the default channel.
@retval pointer to the master info object if exists.
Otherwise, NULL;
*/
Master_info *get_default_channel_mi() {
m_channel_map_lock->assert_some_lock();
return default_channel_mi;
}
/**
Remove the entry corresponding to the channel, from the
replication_channel_map and sets index in the multisource_mi to 0;
And also delete the {mi, rli} pair corresponding to this channel
@note this requires the caller to hold the mi->channel_wrlock.
If the method succeeds the master info object is deleted and the lock
is released. If the an error occurs and the method return true, the {mi}
object won't be deleted and the caller should release the channel_wrlock.
@param[in] channel_name Name of the channel for a Master_info
object which must exist.
@return true if an error occurred, false otherwise
*/
bool delete_mi(const char *channel_name);
/**
Get the default channel for this multisourced_slave;
*/
inline const char *get_default_channel() { return default_channel; }
/**
Get the number of instances of Master_info in the map.
@param all If it should count all channels.
If false, only slave channels are counted.
@return The number of channels or 0 if empty.
*/
inline size_t get_num_instances(bool all = false) {
DBUG_TRACE;
m_channel_map_lock->assert_some_lock();
replication_channel_map::iterator map_it;
if (all) {
size_t count = 0;
for (map_it = rep_channel_map.begin(); map_it != rep_channel_map.end();
map_it++) {
count += map_it->second.size();
}
return count;
} else // Return only the slave channels
{
map_it = rep_channel_map.find(SLAVE_REPLICATION_CHANNEL);
if (map_it == rep_channel_map.end())
return 0;
else
return map_it->second.size();
}
}
/**
Get the number of configured asynchronous replication channels,
ignoring the Group Replication channels.
@return The number of channels.
*/
size_t get_number_of_configured_channels() {
DBUG_TRACE;
m_channel_map_lock->assert_some_lock();
size_t count = 0;
replication_channel_map::iterator map_it =
rep_channel_map.find(SLAVE_REPLICATION_CHANNEL);
for (mi_map::iterator it = map_it->second.begin();
it != map_it->second.end(); it++) {
Master_info *mi = it->second;
if (Master_info::is_configured(mi)) {
count++;
}
}
return count;
}
/**
Get the number of running channels which have asynchronous replication
failover feature, i.e. CHANGE REPLICATION SOURCE TO option
SOURCE_CONNECTION_AUTO_FAILOVER, enabled.
@return The number of channels.
*/
size_t get_number_of_connection_auto_failover_channels_running() {
DBUG_TRACE;
m_channel_map_lock->assert_some_lock();
size_t count = 0;
replication_channel_map::iterator map_it =
rep_channel_map.find(SLAVE_REPLICATION_CHANNEL);
for (mi_map::iterator it = map_it->second.begin();
it != map_it->second.end(); it++) {
Master_info *mi = it->second;
if (Master_info::is_configured(mi) &&
mi->is_source_connection_auto_failover()) {
mysql_mutex_lock(&mi->err_lock);
if (mi->slave_running || mi->is_error()) {
count++;
}
mysql_mutex_unlock(&mi->err_lock);
}
}
#ifndef NDEBUG
if (Source_IO_monitor::get_instance()->is_monitoring_process_running()) {
assert(count > 0);
}
#endif
return count;
}
/**
Get max channels allowed for this map.
*/
inline uint get_max_channels() { return MAX_CHANNELS; }
/**
Returns true if the current number of channels in this slave
is less than the MAX_CHANNLES
*/
inline bool is_valid_channel_count() {
m_channel_map_lock->assert_some_lock();
bool is_valid = current_mi_count < MAX_CHANNELS;
DBUG_EXECUTE_IF("max_replication_channels_exceeded", is_valid = false;);
return (is_valid);
}
/// @brief Checks if a channel is the group replication applier channel
/// @param[in] channel Name of the channel to check
/// @returns true if it is the gr applier channel
static bool is_group_replication_applier_channel_name(const char *channel);
/// @brief Checks if a channel is the group replication recovery channel
/// @param[in] channel Name of the channel to check
/// @returns true if it is the gr recovery channel
static bool is_group_replication_recovery_channel_name(const char *channel);
/**
Returns if a channel name is one of the reserved group replication names
@param channel the channel name to test
@retval true the name is a reserved name
@retval false non reserved name
*/
static bool is_group_replication_channel_name(const char *channel);
/// @brief Check if the channel has an hostname or is a GR channel
/// @return true if the channel is configured or is a gr channel,
/// false otherwise
static bool is_channel_configured(const Master_info *mi) {
return mi && (mi->host[0] ||
is_group_replication_channel_name(mi->get_channel()));
}
/**
Forward iterators to initiate traversing of a map.
@todo: Not to expose iterators. But instead to return
only Master_infos or create generators when
c++11 is introduced.
*/
mi_map::iterator begin(
enum_channel_type channel_type = SLAVE_REPLICATION_CHANNEL) {
replication_channel_map::iterator map_it;
map_it = rep_channel_map.find(channel_type);
if (map_it != rep_channel_map.end()) {
return map_it->second.begin();
}
return end(channel_type);
}
mi_map::iterator end(
enum_channel_type channel_type = SLAVE_REPLICATION_CHANNEL) {
replication_channel_map::iterator map_it;
map_it = rep_channel_map.find(channel_type);
if (map_it != rep_channel_map.end()) {
return map_it->second.end();
}
return empty_mi_map.end();
}
auto all_channels_view() {
return Denested_map_view<replication_channel_map, true, true>(
rep_channel_map);
}
auto all_channels_view() const {
return Denested_map_view<const replication_channel_map, true, true>(
rep_channel_map);
}
private:
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/* Initialize the rpl_pfs_mi array to NULLs */
inline void init_rpl_pfs_mi() {
for (uint i = 0; i < MAX_CHANNELS; i++) rpl_pfs_mi[i] = nullptr;
}
/**
Add a master info pointer to the rpl_pfs_mi array at the first
NULL;
@param[in] mi master info object to be added.
@return false if success.Else true.
*/
bool add_mi_to_rpl_pfs_mi(Master_info *mi);
/**
Get the index of the master info corresponding to channel name
from the rpl_pfs_mi array.
@param[in] channel_name Channel name to get the index from
@return index of mi for the channel_name. Else -1;
*/
int get_index_from_rpl_pfs_mi(const char *channel_name);
public:
/**
Used only by replication performance schema indices to get the master_info
at the position 'pos' from the rpl_pfs_mi array.
@param[in] pos the index in the rpl_pfs_mi array
@retval pointer to the master info object at pos 'pos';
*/
Master_info *get_mi_at_pos(uint pos);
#endif /*WITH_PERFSCHEMA_STORAGE_ENGINE */
/**
Acquire the read lock.
*/
inline void rdlock() { m_channel_map_lock->rdlock(); }
/**
Try to acquire a read lock, return 0 if the read lock is held,
otherwise an error will be returned.
@return 0 in case of success, or 1 otherwise.
*/
inline int tryrdlock() { return m_channel_map_lock->tryrdlock(); }
/**
Acquire the write lock.
*/
inline void wrlock() { m_channel_map_lock->wrlock(); }
/**
Try to acquire a write lock, return 0 if the write lock is held,
otherwise an error will be returned.
@return 0 in case of success, or 1 otherwise.
*/
inline int trywrlock() { return m_channel_map_lock->trywrlock(); }
/**
Release the lock (whether it is a write or read lock).
*/
inline void unlock() { m_channel_map_lock->unlock(); }
/**
Assert that some thread holds either the read or the write lock.
*/
inline void assert_some_lock() const {
m_channel_map_lock->assert_some_lock();
}
/**
Assert that some thread holds the write lock.
*/
inline void assert_some_wrlock() const {
m_channel_map_lock->assert_some_wrlock();
}
};
/**
The class is a container for all the per-channel filters, both a map of
Rpl_filter objects and a list of Rpl_pfs_filter objects.
It maintains a filter map which maps a replication filter to a channel
name. Which is needed, because replication channels are not created and
channel_map is not filled in when these global and per-channel replication
filters are evaluated with current code frame.
In theory, after instantiating all channels from the repository and throwing
all the warnings about the filters configured for non-existent channels, we
can forget about its global object rpl_channel_filters and rely only on the
global and per channel Rpl_filter objects. But to avoid holding the
channel_map.rdlock() when querying P_S.replication_applier_filters table,
we keep the rpl_channel_filters. So that we just need to hold the small
rpl_channel_filters.rdlock() when querying P_S.replication_applier_filters
table. Many operations (RESET REPLICA [FOR CHANNEL], START REPLICA, INIT
SLAVE, END SLAVE, CHANGE REPLICATION SOURCE TO, FLUSH RELAY LOGS, START
CHANNEL, PURGE CHANNEL, and so on) hold the channel_map.wrlock().
There is one instance, rpl_channel_filters, created globally for Multisource
channel filters. The rpl_channel_filters is created when the server is
started, destroyed when the server is stopped.
*/
class Rpl_channel_filters {
private:
/* Store all replication filters with channel names. */
filter_map channel_to_filter;
/* Store all Rpl_pfs_filter objects in the channel_to_filter. */
std::vector<Rpl_pfs_filter> rpl_pfs_filter_vec;
/*
This lock was designed to protect the channel_to_filter from reading,
adding, or removing its objects from the map. It is used to preventing
the following commands to run in parallel:
RESET REPLICA ALL [FOR CHANNEL '<channel_name>']
CHANGE REPLICATION SOURCE TO ... FOR CHANNEL
SELECT FROM performance_schema.replication_applier_filters
Please acquire a wrlock when modifying the map structure (RESET REPLICA ALL
[FOR CHANNEL '<channel_name>'], CHANGE REPLICATION SOURCE TO ... FOR
CHANNEL). Please acqurie a rdlock when querying existing filter(s) (SELECT
FROM performance_schema.replication_applier_filters).
Note: To modify the object from the map, please see the protection of
m_rpl_filter_lock in Rpl_filter.
*/
Checkable_rwlock *m_channel_to_filter_lock;
public:
/**
Create a new replication filter and add it into a filter map.
@param channel_name A name of a channel.
@retval Rpl_filter A pointer to a replication filter, or NULL
if we failed to add it into fiter_map.
*/
Rpl_filter *create_filter(const char *channel_name);
/**
Delete the replication filter from the filter map.
@param rpl_filter A pointer to point to a replication filter.
*/
void delete_filter(Rpl_filter *rpl_filter);
/**
Discard all replication filters if they are not attached to channels.
*/
void discard_all_unattached_filters();
/**
discard filters on group replication channels.
*/
void discard_group_replication_filters();
/**
Get a replication filter of a channel.
@param channel_name A name of a channel.
@retval Rpl_filter A pointer to a replication filter, or NULL
if we failed to add a replication filter
into fiter_map when creating it.
*/
Rpl_filter *get_channel_filter(const char *channel_name);
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/**
This member function is called every time a filter is created or deleted,
or its filter rules are changed. Once that happens the PFS view is
recreated.
*/
void reset_pfs_view();
/**
Used only by replication performance schema indices to get the replication
filter at the position 'pos' from the rpl_pfs_filter_vec vector.
@param pos the index in the rpl_pfs_filter_vec vector.
@retval Rpl_filter A pointer to a Rpl_pfs_filter, or NULL if it
arrived the end of the rpl_pfs_filter_vec.
*/
Rpl_pfs_filter *get_filter_at_pos(uint pos);
/**
Used only by replication performance schema indices to get the count
of replication filters from the rpl_pfs_filter_vec vector.
@retval the count of the replication filters.
*/
uint get_filter_count();
#endif /*WITH_PERFSCHEMA_STORAGE_ENGINE */
/**
Traverse the filter map, build do_table and ignore_table
rules to hashes for every filter.
@retval
0 OK
@retval
-1 Error
*/
bool build_do_and_ignore_table_hashes();
/* Constructor for this class.*/
Rpl_channel_filters() {
m_channel_to_filter_lock = new Checkable_rwlock(
#ifdef HAVE_PSI_INTERFACE
key_rwlock_channel_to_filter_lock
#endif
);
}
/* Destructor for this class. */
~Rpl_channel_filters() { delete m_channel_to_filter_lock; }
/**
Traverse the filter map and free all filters. Delete all objects
in the rpl_pfs_filter_vec vector and then clear the vector.
*/
void clean_up() {
/* Traverse the filter map and free all filters */
for (filter_map::iterator it = channel_to_filter.begin();
it != channel_to_filter.end(); it++) {
if (it->second != nullptr) {
delete it->second;
it->second = nullptr;
}
}
rpl_pfs_filter_vec.clear();
}
/**
Acquire the write lock.
*/
inline void wrlock() { m_channel_to_filter_lock->wrlock(); }
/**
Acquire the read lock.
*/
inline void rdlock() { m_channel_to_filter_lock->rdlock(); }
/**
Release the lock (whether it is a write or read lock).
*/
inline void unlock() { m_channel_to_filter_lock->unlock(); }
};
/* Global object for multisourced slave. */
extern Multisource_info channel_map;
/* Global object for storing per-channel replication filters */
extern Rpl_channel_filters rpl_channel_filters;
static bool inline is_slave_configured() {
/* Server was started with server_id == 0
OR
failure to load applier metadata repositories
*/
return (channel_map.get_default_channel_mi() != nullptr);
}
#endif /*RPL_MSR_H*/ | c | github | https://github.com/mysql/mysql-server | sql/rpl_msr.h |
# GNU Solfege - free ear training software
# Copyright (C) 2010, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from solfege import gu
from solfege import abstract
from solfege import lessonfile
from solfege import mpd
from solfege import soundcard
from solfege.mpd import Duration
from solfege.mpd import elems
from solfege.mpd import RhythmWidget, RhythmWidgetController
class Teacher(abstract.Teacher):
ERR_PICKY = 1
OK = 2
def __init__(self, exname):
abstract.Teacher.__init__(self, exname)
self.lessonfileclass = lessonfile.RhythmDictation2Lessonfile
def new_question(self):
"""
We will create a timelist of the question when we create the
question, and compare it to the RhythmStaff.get_timelist.
We will also create a PercussionTrack that will be used when
we play the question.
"""
if self.get_bool('config/picky_on_new_question') \
and self.q_status in [self.QSTATUS_NEW, self.QSTATUS_WRONG]:
return self.ERR_PICKY
self.q_status = self.QSTATUS_NEW
self.m_P.generate_random_question()
self.m_score = self.m_P.m_answer_score
return self.OK
def play_question(self):
score = self.m_P.m_question_score
countin = self.m_P.m_globals.get('countin')
countin = self.m_P.m_questions[self.m_P._idx].get('countin', countin)
if countin:
score = elems.Score.concat2(countin.get_score(self.m_P, as_name='countin'), score)
tracks = mpd.score_to_tracks(score)
tracks[0].prepend_bpm(*self.m_P.get_tempo())
soundcard.synth.play_track(*tracks)
def guess_answer(self, staff):
assert self.q_status not in (self.QSTATUS_NO, self.QSTATUS_GIVE_UP)
if self.m_P.m_question_score.get_timelist() == self.m_P.m_answer_score.get_timelist():
self.q_status = self.QSTATUS_SOLVED
return True
else:
self.q_status = self.QSTATUS_WRONG
return False
def give_up(self):
self.q_status = self.QSTATUS_GIVE_UP
class Gui(abstract.LessonbasedGui):
def __init__(self, teacher):
abstract.LessonbasedGui.__init__(self, teacher)
self.g_w = RhythmWidget()
self.g_w.connect('score-updated', self.on_score_updated)
self.practise_box.pack_start(self.g_w, False)
self.g_c = RhythmWidgetController(self.g_w)
self.practise_box.pack_start(self.g_c, False)
self.g_flashbar = gu.FlashBar()
self.practise_box.pack_start(self.g_flashbar, False)
self.g_flashbar.show()
self.std_buttons_add(
('new', self.new_question),
('guess_answer', self.guess_answer),
('repeat', self.repeat_question),
('give_up', self.give_up))
self.g_w.show()
def on_score_updated(self, w):
self.g_guess_answer.set_sensitive(bool(self.g_w.m_score.get_timelist()))
def new_question(self, *w):
def exception_cleanup():
self.m_t.q_status = self.m_t.QSTATUS_NO
self.std_buttons_exception_cleanup()
try:
g = self.m_t.new_question()
if g == self.m_t.OK:
self.m_t.play_question()
self.std_buttons_new_question()
self.g_w.grab_focus()
self.g_w.set_score(self.m_t.m_score)
self.g_c.set_editable(True)
except Duration.BadStringException, e:
gu.dialog_ok("Lesson file error", secondary_text=u"Bad rhythm string in the elements variable of the lessonfile. Only digits and dots expected: %s" % unicode(e))
exception_cleanup()
except Exception, e:
if not self.standard_exception_handler(e, exception_cleanup):
raise
def guess_answer(self, *w):
if self.m_t.q_status == Teacher.QSTATUS_SOLVED:
if self.m_t.guess_answer(self.g_w.m_score):
self.g_flashbar.flash(_("Correct, but you have already solved this question"))
else:
self.g_flashbar.flash(_("Wrong, but you have already solved this question"))
else:
if self.m_t.guess_answer(self.g_w.m_score):
self.g_flashbar.flash(_("Correct"))
self.std_buttons_answer_correct()
else:
self.g_flashbar.flash(_("Wrong"))
self.std_buttons_answer_wrong()
self.g_w.grab_focus()
def repeat_question(self, *w):
self.g_w.grab_focus()
self.m_t.play_question()
def give_up(self, *w):
# Make a copy of the question asked. Then attach the staff
# the user entered below, set some labels and display it.
score_copy = self.m_t.m_P.m_question_score.copy()
score_copy.m_staffs.append(self.g_w.m_score.m_staffs[0])
score_copy.m_staffs[-1].set_parent(self.m_t.m_P.m_question_score)
score_copy.m_staffs[0].m_label = _("The music played:")
score_copy.m_staffs[-1].m_label = _("The rhythm you entered:")
score_copy.create_shortcuts()
self.g_w.set_score(score_copy, cursor=None)
self.m_t.give_up()
self.g_c.set_editable(False)
self.std_buttons_give_up()
def on_start_practise(self):
super(Gui, self).on_start_practise()
self.std_buttons_start_practise()
self.g_c.set_editable(False)
self.g_w.set_score(elems.Score())
self.g_flashbar.delayed_flash(self.short_delay,
_("Click 'New' to begin."))
def on_end_practise(self):
super(Gui, self).on_end_practise()
self.g_w.set_score(elems.Score()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
from json_parse import OrderedDict
import schema_util
class _TypeDependency(object):
"""Contains information about a dependency a namespace has on a type: the
type's model, and whether that dependency is "hard" meaning that it cannot be
forward declared.
"""
def __init__(self, type_, hard=False):
self.type_ = type_
self.hard = hard
def GetSortKey(self):
return '%s.%s' % (self.type_.namespace.name, self.type_.name)
class CppTypeGenerator(object):
"""Manages the types of properties and provides utilities for getting the
C++ type out of a model.Property
"""
def __init__(self, model, schema_loader, default_namespace=None):
"""Creates a cpp_type_generator. The given root_namespace should be of the
format extensions::api::sub. The generator will generate code suitable for
use in the given model's namespace.
"""
self._default_namespace = default_namespace
if self._default_namespace is None:
self._default_namespace = model.namespaces.values()[0]
self._schema_loader = schema_loader
def GetEnumNoneValue(self, type_):
"""Gets the enum value in the given model.Property indicating no value has
been set.
"""
return '%s_NONE' % self.FollowRef(type_).unix_name.upper()
def GetEnumLastValue(self, type_):
"""Gets the enum value in the given model.Property indicating the last value
for the type.
"""
return '%s_LAST' % self.FollowRef(type_).unix_name.upper()
def GetEnumValue(self, type_, enum_value):
"""Gets the enum value of the given model.Property of the given type.
e.g VAR_STRING
"""
value = cpp_util.Classname(enum_value.name.upper())
prefix = (type_.cpp_enum_prefix_override or
self.FollowRef(type_).unix_name)
value = '%s_%s' % (prefix.upper(), value)
# To avoid collisions with built-in OS_* preprocessor definitions, we add a
# trailing slash to enum names that start with OS_.
if value.startswith("OS_"):
value += "_"
return value
def GetCppType(self, type_, is_ptr=False, is_in_container=False):
"""Translates a model.Property or model.Type into its C++ type.
If REF types from different namespaces are referenced, will resolve
using self._schema_loader.
Use |is_ptr| if the type is optional. This will wrap the type in a
scoped_ptr if possible (it is not possible to wrap an enum).
Use |is_in_container| if the type is appearing in a collection, e.g. a
std::vector or std::map. This will wrap it in the correct type with spacing.
"""
cpp_type = None
if type_.property_type == PropertyType.REF:
ref_type = self._FindType(type_.ref_type)
if ref_type is None:
raise KeyError('Cannot find referenced type: %s' % type_.ref_type)
cpp_type = self.GetCppType(ref_type)
elif type_.property_type == PropertyType.BOOLEAN:
cpp_type = 'bool'
elif type_.property_type == PropertyType.INTEGER:
cpp_type = 'int'
elif type_.property_type == PropertyType.INT64:
cpp_type = 'int64'
elif type_.property_type == PropertyType.DOUBLE:
cpp_type = 'double'
elif type_.property_type == PropertyType.STRING:
cpp_type = 'std::string'
elif type_.property_type in (PropertyType.ENUM,
PropertyType.OBJECT,
PropertyType.CHOICES):
if self._default_namespace is type_.namespace:
cpp_type = cpp_util.Classname(type_.name)
else:
cpp_namespace = cpp_util.GetCppNamespace(
type_.namespace.environment.namespace_pattern,
type_.namespace.unix_name)
cpp_type = '%s::%s' % (cpp_namespace,
cpp_util.Classname(type_.name))
elif type_.property_type == PropertyType.ANY:
cpp_type = 'base::Value'
elif type_.property_type == PropertyType.FUNCTION:
# Functions come into the json schema compiler as empty objects. We can
# record these as empty DictionaryValues so that we know if the function
# was passed in or not.
cpp_type = 'base::DictionaryValue'
elif type_.property_type == PropertyType.ARRAY:
item_cpp_type = self.GetCppType(type_.item_type, is_in_container=True)
cpp_type = 'std::vector<%s>' % cpp_util.PadForGenerics(item_cpp_type)
elif type_.property_type == PropertyType.BINARY:
cpp_type = 'std::vector<char>'
else:
raise NotImplementedError('Cannot get type of %s' % type_.property_type)
# HACK: optional ENUM is represented elsewhere with a _NONE value, so it
# never needs to be wrapped in pointer shenanigans.
# TODO(kalman): change this - but it's an exceedingly far-reaching change.
if not self.FollowRef(type_).property_type == PropertyType.ENUM:
if is_in_container and (is_ptr or not self.IsCopyable(type_)):
cpp_type = 'linked_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
elif is_ptr:
cpp_type = 'scoped_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
return cpp_type
def IsCopyable(self, type_):
return not (self.FollowRef(type_).property_type in (PropertyType.ANY,
PropertyType.ARRAY,
PropertyType.OBJECT,
PropertyType.CHOICES))
def GenerateForwardDeclarations(self):
"""Returns the forward declarations for self._default_namespace.
"""
c = Code()
for namespace, deps in self._NamespaceTypeDependencies().iteritems():
filtered_deps = [
dep for dep in deps
# Add more ways to forward declare things as necessary.
if (not dep.hard and
dep.type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT))]
if not filtered_deps:
continue
cpp_namespace = cpp_util.GetCppNamespace(
namespace.environment.namespace_pattern,
namespace.unix_name)
c.Concat(cpp_util.OpenNamespace(cpp_namespace))
for dep in filtered_deps:
c.Append('struct %s;' % dep.type_.name)
c.Concat(cpp_util.CloseNamespace(cpp_namespace))
return c
def GenerateIncludes(self, include_soft=False):
"""Returns the #include lines for self._default_namespace.
"""
c = Code()
for namespace, dependencies in self._NamespaceTypeDependencies().items():
for dependency in dependencies:
if dependency.hard or include_soft:
c.Append('#include "%s/%s.h"' % (namespace.source_file_dir,
namespace.unix_name))
return c
def _FindType(self, full_name):
"""Finds the model.Type with name |qualified_name|. If it's not from
|self._default_namespace| then it needs to be qualified.
"""
namespace = self._schema_loader.ResolveType(full_name,
self._default_namespace)
if namespace is None:
raise KeyError('Cannot resolve type %s. Maybe it needs a prefix '
'if it comes from another namespace?' % full_name)
return namespace.types[schema_util.StripNamespace(full_name)]
def FollowRef(self, type_):
"""Follows $ref link of types to resolve the concrete type a ref refers to.
If the property passed in is not of type PropertyType.REF, it will be
returned unchanged.
"""
if type_.property_type != PropertyType.REF:
return type_
return self.FollowRef(self._FindType(type_.ref_type))
def _NamespaceTypeDependencies(self):
"""Returns a dict ordered by namespace name containing a mapping of
model.Namespace to every _TypeDependency for |self._default_namespace|,
sorted by the type's name.
"""
dependencies = set()
for function in self._default_namespace.functions.values():
for param in function.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
if function.callback:
for param in function.callback.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
for type_ in self._default_namespace.types.values():
for prop in type_.properties.values():
dependencies |= self._TypeDependencies(prop.type_,
hard=not prop.optional)
for event in self._default_namespace.events.values():
for param in event.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
# Make sure that the dependencies are returned in alphabetical order.
dependency_namespaces = OrderedDict()
for dependency in sorted(dependencies, key=_TypeDependency.GetSortKey):
namespace = dependency.type_.namespace
if namespace is self._default_namespace:
continue
if namespace not in dependency_namespaces:
dependency_namespaces[namespace] = []
dependency_namespaces[namespace].append(dependency)
return dependency_namespaces
def _TypeDependencies(self, type_, hard=False):
"""Gets all the type dependencies of a property.
"""
deps = set()
if type_.property_type == PropertyType.REF:
deps.add(_TypeDependency(self._FindType(type_.ref_type), hard=hard))
elif type_.property_type == PropertyType.ARRAY:
# Non-copyable types are not hard because they are wrapped in linked_ptrs
# when generated. Otherwise they're typedefs, so they're hard (though we
# could generate those typedefs in every dependent namespace, but that
# seems weird).
deps = self._TypeDependencies(type_.item_type,
hard=self.IsCopyable(type_.item_type))
elif type_.property_type == PropertyType.CHOICES:
for type_ in type_.choices:
deps |= self._TypeDependencies(type_, hard=self.IsCopyable(type_))
elif type_.property_type == PropertyType.OBJECT:
for p in type_.properties.values():
deps |= self._TypeDependencies(p.type_, hard=not p.optional)
return deps
def GeneratePropertyValues(self, prop, line, nodoc=False):
"""Generates the Code to display all value-containing properties.
"""
c = Code()
if not nodoc:
c.Comment(prop.description)
if prop.value is not None:
cpp_type = self.GetCppType(prop.type_)
cpp_value = prop.value
if cpp_type == 'std::string':
cpp_value = '"%s"' % cpp_type
c.Append(line % {
"type": cpp_type,
"name": prop.name,
"value": cpp_value
})
else:
has_child_code = False
c.Sblock('namespace %s {' % prop.name)
for child_property in prop.type_.properties.values():
child_code = self.GeneratePropertyValues(child_property,
line,
nodoc=nodoc)
if child_code:
has_child_code = True
c.Concat(child_code)
c.Eblock('} // namespace %s' % prop.name)
if not has_child_code:
c = None
return c | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from os.path import exists, dirname
from robot.output.loggerhelper import LEVELS
from robot.utils import (attribute_escape, get_link_path, html_escape,
html_format, is_string, is_unicode, timestamp_to_secs,
unic)
from .stringcache import StringCache
class JsBuildingContext(object):
def __init__(self, log_path=None, split_log=False, prune_input=False):
# log_path can be a custom object in unit tests
self._log_dir = dirname(log_path) if is_string(log_path) else None
self._split_log = split_log
self._prune_input = prune_input
self._strings = self._top_level_strings = StringCache()
self.basemillis = None
self.split_results = []
self.min_level = 'NONE'
self._msg_links = {}
def string(self, string, escape=True, attr=False):
if escape and string:
if not is_unicode(string):
string = unic(string)
string = (html_escape if not attr else attribute_escape)(string)
return self._strings.add(string)
def html(self, string):
return self.string(html_format(string), escape=False)
def relative_source(self, source):
rel_source = get_link_path(source, self._log_dir) \
if self._log_dir and source and exists(source) else ''
return self.string(rel_source)
def timestamp(self, time):
if not time:
return None
millis = int(timestamp_to_secs(time) * 1000)
if self.basemillis is None:
self.basemillis = millis
return millis - self.basemillis
def message_level(self, level):
if LEVELS[level] < LEVELS[self.min_level]:
self.min_level = level
def create_link_target(self, msg):
id = self._top_level_strings.add(msg.parent.id)
self._msg_links[self._link_key(msg)] = id
def link(self, msg):
return self._msg_links.get(self._link_key(msg))
def _link_key(self, msg):
return (msg.message, msg.level, msg.timestamp)
@property
def strings(self):
return self._strings.dump()
def start_splitting_if_needed(self, split=False):
if self._split_log and split:
self._strings = StringCache()
return True
return False
def end_splitting(self, model):
self.split_results.append((model, self.strings))
self._strings = self._top_level_strings
return len(self.split_results)
@contextmanager
def prune_input(self, *items):
yield
if self._prune_input:
for item in items:
item.clear() | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
test_description='miscellaneous basic tests for cherry-pick and revert'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success setup '
for l in a b c d e f g h i j k l m n o
do
echo $l$l$l$l$l$l$l$l$l || return 1
done >oops &&
test_tick &&
git add oops &&
git commit -m initial &&
git tag initial &&
test_tick &&
echo "Add extra line at the end" >>oops &&
git commit -a -m added &&
git tag added &&
test_tick &&
git mv oops spoo &&
git commit -m rename1 &&
git tag rename1 &&
test_tick &&
git checkout -b side initial &&
git mv oops opos &&
git commit -m rename2 &&
git tag rename2
'
test_expect_success 'cherry-pick --nonsense' '
pos=$(git rev-parse HEAD) &&
git diff --exit-code HEAD &&
test_must_fail git cherry-pick --nonsense 2>msg &&
git diff --exit-code HEAD "$pos" &&
test_grep "[Uu]sage:" msg
'
test_expect_success 'revert --nonsense' '
pos=$(git rev-parse HEAD) &&
git diff --exit-code HEAD &&
test_must_fail git revert --nonsense 2>msg &&
git diff --exit-code HEAD "$pos" &&
test_grep "[Uu]sage:" msg
'
# the following two test cherry-pick and revert with renames
#
# --
# + rename2: renames oops to opos
# + rename1: renames oops to spoo
# + added: adds extra line to oops
# ++ initial: has lines in oops
test_expect_success 'cherry-pick after renaming branch' '
git checkout rename2 &&
git cherry-pick added &&
test_cmp_rev rename2 HEAD^ &&
grep "Add extra line at the end" opos &&
git reflog -1 | grep cherry-pick
'
test_expect_success 'revert after renaming branch' '
git checkout rename1 &&
git revert added &&
test_cmp_rev rename1 HEAD^ &&
test_path_is_file spoo &&
test_cmp_rev initial:oops HEAD:spoo &&
git reflog -1 | grep revert
'
test_expect_success 'cherry-pick on stat-dirty working tree' '
git clone . copy &&
(
cd copy &&
git checkout initial &&
test-tool chmtime +40 oops &&
git cherry-pick added
)
'
test_expect_success 'revert forbidden on dirty working tree' '
echo content >extra_file &&
git add extra_file &&
test_must_fail git revert HEAD 2>errors &&
test_grep "your local changes would be overwritten by " errors
'
test_expect_success 'cherry-pick on unborn branch' '
git switch --orphan unborn &&
git rm --cached -r . &&
git cherry-pick initial &&
git diff --exit-code initial &&
test_cmp_rev ! initial HEAD
'
test_expect_success 'cherry-pick on unborn branch with --allow-empty' '
git checkout --detach &&
git branch -D unborn &&
git switch --orphan unborn &&
git cherry-pick initial --allow-empty &&
git diff --exit-code initial &&
test_cmp_rev ! initial HEAD
'
test_expect_success 'cherry-pick "-" to pick from previous branch' '
git checkout unborn &&
test_commit to-pick actual content &&
git checkout main &&
git cherry-pick - &&
echo content >expect &&
test_cmp expect actual
'
test_expect_success 'cherry-pick "-" is meaningless without checkout' '
test_create_repo afresh &&
(
cd afresh &&
test_commit one &&
test_commit two &&
test_commit three &&
test_must_fail git cherry-pick -
)
'
test_expect_success 'cherry-pick "-" works with arguments' '
git checkout -b side-branch &&
test_commit change actual change &&
git checkout main &&
git cherry-pick -s - &&
echo "Signed-off-by: C O Mitter <committer@example.com>" >expect &&
git cat-file commit HEAD | grep ^Signed-off-by: >signoff &&
test_cmp expect signoff &&
echo change >expect &&
test_cmp expect actual
'
test_expect_success 'cherry-pick works with dirty renamed file' '
test_commit to-rename &&
git checkout -b unrelated &&
test_commit unrelated &&
git checkout @{-1} &&
git mv to-rename.t renamed &&
test_tick &&
git commit -m renamed &&
echo modified >renamed &&
git cherry-pick refs/heads/unrelated &&
test $(git rev-parse :0:renamed) = $(git rev-parse HEAD~2:to-rename.t) &&
grep -q "^modified$" renamed
'
test_expect_success 'advice from failed revert' '
test_when_finished "git reset --hard" &&
test_commit --no-tag "add dream" dream dream &&
dream_oid=$(git rev-parse --short HEAD) &&
cat <<-EOF >expected &&
error: could not revert $dream_oid... add dream
hint: After resolving the conflicts, mark them with
hint: "git add/rm <pathspec>", then run
hint: "git revert --continue".
hint: You can instead skip this commit with "git revert --skip".
hint: To abort and get back to the state before "git revert",
hint: run "git revert --abort".
hint: Disable this message with "git config set advice.mergeConflict false"
EOF
test_commit --append --no-tag "double-add dream" dream dream &&
test_must_fail git revert HEAD^ 2>actual &&
test_cmp expected actual
'
test_expect_subject () {
echo "$1" >expect &&
git log -1 --pretty=%s >actual &&
test_cmp expect actual
}
test_expect_success 'titles of fresh reverts' '
test_commit --no-tag A file1 &&
test_commit --no-tag B file1 &&
git revert --no-edit HEAD &&
test_expect_subject "Revert \"B\"" &&
git revert --no-edit HEAD &&
test_expect_subject "Reapply \"B\"" &&
git revert --no-edit HEAD &&
test_expect_subject "Revert \"Reapply \"B\"\""
'
test_expect_success 'title of legacy double revert' '
test_commit --no-tag "Revert \"Revert \"B\"\"" file1 &&
git revert --no-edit HEAD &&
test_expect_subject "Revert \"Revert \"Revert \"B\"\"\""
'
test_expect_success 'identification of reverted commit (default)' '
test_commit to-ident &&
test_when_finished "git reset --hard to-ident" &&
git checkout --detach to-ident &&
git revert --no-edit HEAD &&
git cat-file commit HEAD >actual.raw &&
grep "^This reverts " actual.raw >actual &&
echo "This reverts commit $(git rev-parse HEAD^)." >expect &&
test_cmp expect actual
'
test_expect_success 'identification of reverted commit (--reference)' '
git checkout --detach to-ident &&
git revert --reference --no-edit HEAD &&
git cat-file commit HEAD >actual.raw &&
grep "^This reverts " actual.raw >actual &&
echo "This reverts commit $(git show -s --pretty=reference HEAD^)." >expect &&
test_cmp expect actual
'
test_expect_success 'git revert --reference with core.commentChar' '
test_when_finished "git reset --hard to-ident" &&
git checkout --detach to-ident &&
GIT_EDITOR="head -n4 >actual" git -c core.commentChar=% revert \
--edit --reference HEAD &&
cat <<-EOF >expect &&
% *** SAY WHY WE ARE REVERTING ON THE TITLE LINE ***
This reverts commit $(git show -s --pretty=reference HEAD^).
EOF
test_cmp expect actual
'
test_expect_success 'identification of reverted commit (revert.reference)' '
git checkout --detach to-ident &&
git -c revert.reference=true revert --no-edit HEAD &&
git cat-file commit HEAD >actual.raw &&
grep "^This reverts " actual.raw >actual &&
echo "This reverts commit $(git show -s --pretty=reference HEAD^)." >expect &&
test_cmp expect actual
'
test_expect_success 'cherry-pick is unaware of --reference (for now)' '
test_when_finished "git reset --hard" &&
test_must_fail git cherry-pick --reference HEAD 2>actual &&
grep "^usage: git cherry-pick" actual
'
test_done | unknown | github | https://github.com/git/git | t/t3501-revert-cherry-pick.sh |
/* CC0 (Public domain) - see ccan/licenses/CC0 file for details */
#ifndef CCAN_CHECK_TYPE_H
#define CCAN_CHECK_TYPE_H
/**
* ccan_check_type - issue a warning or build failure if type is not correct.
* @expr: the expression whose type we should check (not evaluated).
* @type: the exact type we expect the expression to be.
*
* This macro is usually used within other macros to try to ensure that a macro
* argument is of the expected type. No type promotion of the expression is
* done: an unsigned int is not the same as an int!
*
* ccan_check_type() always evaluates to 0.
*
* If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check).
*
* Example:
* // They should always pass a 64-bit value to _set_some_value!
* #define set_some_value(expr) \
* _set_some_value((ccan_check_type((expr), uint64_t), (expr)))
*/
/**
* ccan_check_types_match - issue a warning or build failure if types are not same.
* @expr1: the first expression (not evaluated).
* @expr2: the second expression (not evaluated).
*
* This macro is usually used within other macros to try to ensure that
* arguments are of identical types. No type promotion of the expressions is
* done: an unsigned int is not the same as an int!
*
* ccan_check_types_match() always evaluates to 0.
*
* If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check).
*
* Example:
* // Do subtraction to get to enclosing type, but make sure that
* // pointer is of correct type for that member.
* #define ccan_container_of(mbr_ptr, encl_type, mbr) \
* (ccan_check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
* ((encl_type *) \
* ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr))))
*/
#if defined(HAVE_TYPEOF) && HAVE_TYPEOF
#define ccan_check_type(expr, type) \
((typeof(expr) *)0 != (type *)0)
#define ccan_check_types_match(expr1, expr2) \
((typeof(expr1) *)0 != (typeof(expr2) *)0)
#else
#include "ccan/build_assert/build_assert.h"
/* Without typeof, we can only test the sizes. */
#define ccan_check_type(expr, type) \
CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
#define ccan_check_types_match(expr1, expr2) \
CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
#endif /* HAVE_TYPEOF */
#endif /* CCAN_CHECK_TYPE_H */ | c | github | https://github.com/ruby/ruby | ccan/check_type/check_type.h |
---
navigation_title: "Cardinality"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
---
# Cardinality aggregation [search-aggregations-metrics-cardinality-aggregation]
A `single-value` metrics aggregation that calculates an approximate count of distinct values.
Assume you are indexing store sales and would like to count the unique number of sold products that match a query:
```console
POST /sales/_search?size=0
{
"aggs": {
"type_count": {
"cardinality": {
"field": "type"
}
}
}
}
```
% TEST[setup:sales]
Response:
```console-result
{
...
"aggregations": {
"type_count": {
"value": 3
}
}
}
```
% TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
## Precision control [_precision_control]
This aggregation also supports the `precision_threshold` option:
```console
POST /sales/_search?size=0
{
"aggs": {
"type_count": {
"cardinality": {
"field": "type",
"precision_threshold": 100 <1>
}
}
}
}
```
% TEST[setup:sales]
1. The `precision_threshold` options allows to trade memory for accuracy, and defines a unique count below which counts are expected to be close to accurate. Above this value, counts might become a bit more fuzzy. The maximum supported value is 40000, thresholds above this number will have the same effect as a threshold of 40000. The default value is `3000`.
## Counts are approximate [_counts_are_approximate]
Computing exact counts requires loading values into a hash set and returning its size. This doesn’t scale when working on high-cardinality sets and/or large values as the required memory usage and the need to communicate those per-shard sets between nodes would utilize too many resources of the cluster.
This `cardinality` aggregation is based on the [HyperLogLog++](https://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf) algorithm, which counts based on the hashes of the values with some interesting properties:
* configurable precision, which decides on how to trade memory for accuracy,
* excellent accuracy on low-cardinality sets,
* fixed memory usage: no matter if there are tens or billions of unique values, memory usage only depends on the configured precision.
For a precision threshold of `c`, the implementation that we are using requires about `c * 8` bytes.
The following chart shows how the error varies before and after the threshold:

For all 3 thresholds, counts have been accurate up to the configured threshold. Although not guaranteed,
this is likely to be the case. Accuracy in practice depends on the dataset in question. In general,
most datasets show consistently good accuracy. Also note that even with a threshold as low as 100,
the error remains very low (1-6% as seen in the above graph) even when counting millions of items.
The HyperLogLog++ algorithm depends on the leading zeros of hashed values, the exact distributions of
hashes in a dataset can affect the accuracy of the cardinality.
## Pre-computed hashes [_pre_computed_hashes]
On string fields that have a high cardinality, it might be faster to store the hash of your field values in your index and then run the cardinality aggregation on this field. This can either be done by providing hash values from client-side or by letting Elasticsearch compute hash values for you by using the [`mapper-murmur3`](/reference/elasticsearch-plugins/mapper-murmur3.md) plugin.
::::{note}
Pre-computing hashes is usually only useful on very large and/or high-cardinality fields as it saves CPU and memory. However, on numeric fields, hashing is very fast and storing the original values requires as much or less memory than storing the hashes. This is also true on low-cardinality string fields, especially given that those have an optimization in order to make sure that hashes are computed at most once per unique value per segment.
::::
## Script [_script_4]
If you need the cardinality of the combination of two fields, create a [runtime field](docs-content://manage-data/data-store/mapping/runtime-fields.md) combining them and aggregate it.
```console
POST /sales/_search?size=0
{
"runtime_mappings": {
"type_and_promoted": {
"type": "keyword",
"script": "emit(doc['type'].value + ' ' + doc['promoted'].value)"
}
},
"aggs": {
"type_promoted_count": {
"cardinality": {
"field": "type_and_promoted"
}
}
}
}
```
% TEST[setup:sales]
% TEST[s/size=0/size=0&filter_path=aggregations/]
## Missing value [_missing_value_8]
The `missing` parameter defines how documents that are missing a value should be treated. By default they will be ignored but it is also possible to treat them as if they had a value.
```console
POST /sales/_search?size=0
{
"aggs": {
"tag_cardinality": {
"cardinality": {
"field": "tag",
"missing": "N/A" <1>
}
}
}
}
```
% TEST[setup:sales]
1. Documents without a value in the `tag` field will fall into the same bucket as documents that have the value `N/A`.
## Execution hint [_execution_hint_4]
You can run cardinality aggregations using different mechanisms:
* by using field values directly (`direct`)
* by using global ordinals of the field and resolving those values after finishing a shard (`global_ordinals`)
* by using segment ordinal values and resolving those values after each segment (`segment_ordinals`)
Additionally, there are two "heuristic based" modes. These modes will cause {{es}} to use some data about the state of the index to choose an appropriate execution method. The two heuristics are:
* `save_time_heuristic` - this is the default in {{es}} 8.4 and later.
* `save_memory_heuristic` - this was the default in {{es}} 8.3 and earlier
When not specified, {{es}} will apply a heuristic to choose the appropriate mode. Also note that for some data (non-ordinal fields), `direct` is the only option, and the hint will be ignored in these cases. Generally speaking, it should not be necessary to set this value. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/aggregations/search-aggregations-metrics-cardinality-aggregation.md |
//===--- MapReduce.swift --------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import TestsUtils
import Foundation
let t: [BenchmarkCategory] = [.validation, .algorithm]
let ts: [BenchmarkCategory] = [.validation, .algorithm, .String]
public let benchmarks = [
BenchmarkInfo(name: "MapReduce", runFunction: run_MapReduce, tags: t),
BenchmarkInfo(name: "MapReduceAnyCollection",
runFunction: run_MapReduceAnyCollection, tags: t),
BenchmarkInfo(name: "MapReduceAnyCollectionShort",
runFunction: run_MapReduceAnyCollectionShort, tags: t, legacyFactor: 10),
BenchmarkInfo(name: "MapReduceClass2",
runFunction: run_MapReduceClass, tags: t,
setUpFunction: { boxedNumbers(1000) }, tearDownFunction: releaseDecimals),
BenchmarkInfo(name: "MapReduceClassShort2",
runFunction: run_MapReduceClassShort, tags: t,
setUpFunction: { boxedNumbers(10) }, tearDownFunction: releaseDecimals),
BenchmarkInfo(name: "MapReduceNSDecimalNumber",
runFunction: run_MapReduceNSDecimalNumber, tags: t,
setUpFunction: { decimals(1000) }, tearDownFunction: releaseDecimals),
BenchmarkInfo(name: "MapReduceNSDecimalNumberShort",
runFunction: run_MapReduceNSDecimalNumberShort, tags: t,
setUpFunction: { decimals(10) }, tearDownFunction: releaseDecimals),
BenchmarkInfo(name: "MapReduceLazyCollection",
runFunction: run_MapReduceLazyCollection, tags: t),
BenchmarkInfo(name: "MapReduceLazyCollectionShort",
runFunction: run_MapReduceLazyCollectionShort, tags: t),
BenchmarkInfo(name: "MapReduceLazySequence",
runFunction: run_MapReduceLazySequence, tags: t),
BenchmarkInfo(name: "MapReduceSequence",
runFunction: run_MapReduceSequence, tags: t),
BenchmarkInfo(name: "MapReduceShort",
runFunction: run_MapReduceShort, tags: t, legacyFactor: 10),
BenchmarkInfo(name: "MapReduceShortString",
runFunction: run_MapReduceShortString, tags: ts),
BenchmarkInfo(name: "MapReduceString",
runFunction: run_MapReduceString, tags: ts),
]
#if _runtime(_ObjC)
var decimals : [NSDecimalNumber]!
func decimals(_ n: Int) {
decimals = (0..<n).map { NSDecimalNumber(value: $0) }
}
func releaseDecimals() { decimals = nil }
#else
func decimals(_ n: Int) {}
func releaseDecimals() {}
#endif
class Box {
var v: Int
init(_ v: Int) { self.v = v }
}
var boxedNumbers : [Box]!
func boxedNumbers(_ n: Int) { boxedNumbers = (0..<n).map { Box($0) } }
func releaseboxedNumbers() { boxedNumbers = nil }
@inline(never)
public func run_MapReduce(_ n: Int) {
var numbers = [Int](0..<1000)
var c = 0
for _ in 1...n*100 {
numbers = numbers.map { $0 &+ 5 }
c = c &+ numbers.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceAnyCollection(_ n: Int) {
let numbers = AnyCollection([Int](0..<1000))
var c = 0
for _ in 1...n*100 {
let mapped = numbers.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceAnyCollectionShort(_ n: Int) {
let numbers = AnyCollection([Int](0..<10))
var c = 0
for _ in 1...n*1_000 {
let mapped = numbers.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceShort(_ n: Int) {
var numbers = [Int](0..<10)
var c = 0
for _ in 1...n*1_000 {
numbers = numbers.map { $0 &+ 5 }
c = c &+ numbers.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceSequence(_ n: Int) {
let numbers = sequence(first: 0) { $0 < 1000 ? $0 &+ 1 : nil }
var c = 0
for _ in 1...n*100 {
let mapped = numbers.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceLazySequence(_ n: Int) {
let numbers = sequence(first: 0) { $0 < 1000 ? $0 &+ 1 : nil }
var c = 0
for _ in 1...n*100 {
let mapped = numbers.lazy.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceLazyCollection(_ n: Int) {
let numbers = [Int](0..<1000)
var c = 0
for _ in 1...n*100 {
let mapped = numbers.lazy.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceLazyCollectionShort(_ n: Int) {
let numbers = [Int](0..<10)
var c = 0
for _ in 1...n*10000 {
let mapped = numbers.lazy.map { $0 &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceString(_ n: Int) {
let s = "thequickbrownfoxjumpsoverthelazydogusingasmanycharacteraspossible123456789"
var c: UInt64 = 0
for _ in 1...n*100 {
c = c &+ s.utf8.map { UInt64($0 &+ 5) }.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceShortString(_ n: Int) {
let s = "12345"
var c: UInt64 = 0
for _ in 1...n*100 {
c = c &+ s.utf8.map { UInt64($0 &+ 5) }.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceNSDecimalNumber(_ n: Int) {
#if _runtime(_ObjC)
let numbers: [NSDecimalNumber] = decimals
var c = 0
for _ in 1...n*10 {
let mapped = numbers.map { $0.intValue &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
#endif
}
@inline(never)
public func run_MapReduceNSDecimalNumberShort(_ n: Int) {
#if _runtime(_ObjC)
let numbers: [NSDecimalNumber] = decimals
var c = 0
for _ in 1...n*1_000 {
let mapped = numbers.map { $0.intValue &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
#endif
}
@inline(never)
public func run_MapReduceClass(_ n: Int) {
let numbers: [Box] = boxedNumbers
var c = 0
for _ in 1...n*10 {
let mapped = numbers.map { $0.v &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
}
@inline(never)
public func run_MapReduceClassShort(_ n: Int) {
let numbers: [Box] = boxedNumbers
var c = 0
for _ in 1...n*1_000 {
let mapped = numbers.map { $0.v &+ 5 }
c = c &+ mapped.reduce(0, &+)
}
check(c != 0)
} | swift | github | https://github.com/apple/swift | benchmark/single-source/MapReduce.swift |
{
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
},
"dependencies": {
"next": "latest",
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
"devDependencies": {
"@types/node": "^20",
"@types/react": "^18",
"@types/react-dom": "^18",
"typescript": "^5"
}
} | json | github | https://github.com/vercel/next.js | examples/image-secure-compute/package.json |
"""
=====================================
Structured Arrays (aka Record Arrays)
=====================================
Introduction
============
Numpy provides powerful capabilities to create arrays of structs or records.
These arrays permit one to manipulate the data by the structs or by fields of
the struct. A simple example will show what is meant.: ::
>>> x = np.zeros((2,),dtype=('i4,f4,a10'))
>>> x[:] = [(1,2.,'Hello'),(2,3.,"World")]
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a record that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second record: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. In this case the fields have received the
default names 'f0', 'f1' and 'f2'.
>>> y = x['f1']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the record. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the record, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument (as used in the above examples).
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example:
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title.
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the record structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings.
>>> x.dtype.fields['x'][2]
'title 1'
""" | unknown | codeparrot/codeparrot-clean | ||
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
"""
import StringIO
import json
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from guardian.shortcuts import get_anonymous_user
from .forms import DocumentCreateForm
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.base.populate_test_data import create_models
class LayersTest(TestCase):
fixtures = ['intial_data.json', 'bobby']
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
def setUp(self):
create_models('document')
create_models('map')
self.imgfile = StringIO.StringIO(
'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
self.anonymous_user = get_anonymous_user()
def test_create_document_with_no_rel(self):
"""Tests the creation of a document with no relations"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
superuser = get_user_model().objects.get(pk=2)
c = Document.objects.create(
doc_file=f,
owner=superuser,
title='theimg')
c.set_default_permissions()
self.assertEquals(Document.objects.get(pk=c.id).title, 'theimg')
def test_create_document_with_rel(self):
"""Tests the creation of a document with no a map related"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
superuser = get_user_model().objects.get(pk=2)
m = Map.objects.all()[0]
ctype = ContentType.objects.get_for_model(m)
c = Document.objects.create(
doc_file=f,
owner=superuser,
title='theimg',
content_type=ctype,
object_id=m.id)
self.assertEquals(Document.objects.get(pk=c.id).title, 'theimg')
def test_create_document_url(self):
"""Tests creating an external document instead of a file."""
superuser = get_user_model().objects.get(pk=2)
c = Document.objects.create(doc_url="http://geonode.org/map.pdf",
owner=superuser,
title="GeoNode Map",
)
doc = Document.objects.get(pk=c.id)
self.assertEquals(doc.title, "GeoNode Map")
self.assertEquals(doc.extension, "pdf")
def test_create_document_url_view(self):
"""
Tests creating and updating external documents.
"""
self.client.login(username='admin', password='admin')
form_data = {
'title': 'GeoNode Map',
'permissions': '{"users":{"AnonymousUser": ["view_resourcebase"]},"groups":{}}',
'doc_url': 'http://www.geonode.org/map.pdf'}
response = self.client.post(reverse('document_upload'), data=form_data)
self.assertEqual(response.status_code, 302)
d = Document.objects.get(title='GeoNode Map')
self.assertEqual(d.doc_url, 'http://www.geonode.org/map.pdf')
form_data['doc_url'] = 'http://www.geonode.org/mapz.pdf'
response = self.client.post(
reverse(
'document_replace',
args=[
d.id]),
data=form_data)
self.assertEqual(response.status_code, 302)
d = Document.objects.get(title='GeoNode Map')
self.assertEqual(d.doc_url, 'http://www.geonode.org/mapz.pdf')
def test_upload_document_form(self):
"""
Tests the Upload form.
"""
form_data = dict()
form = DocumentCreateForm(data=form_data)
self.assertFalse(form.is_valid())
# title is required
self.assertTrue('title' in form.errors)
# permissions are required
self.assertTrue('permissions' in form.errors)
# since neither a doc_file nor a doc_url are included __all__ should be
# in form.errors.
self.assertTrue('__all__' in form.errors)
form_data = {
'title': 'GeoNode Map',
'permissions': '{"anonymous":"document_readonly","authenticated":"resourcebase_readwrite","users":[]}',
'doc_url': 'http://www.geonode.org/map.pdf'}
form = DocumentCreateForm(data=form_data)
self.assertTrue(form.is_valid())
self.assertTrue(isinstance(form.cleaned_data['permissions'], dict))
# if permissions are not JSON serializable, the field should be in
# form.errors.
form_data['permissions'] = 'non-json string'
self.assertTrue(
'permissions' in DocumentCreateForm(
data=form_data).errors)
form_data = {
'title': 'GeoNode Map',
'permissions': '{"anonymous":"document_readonly","authenticated":"resourcebase_readwrite","users":[]}',
}
file_data = {
'doc_file': SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')}
form = DocumentCreateForm(form_data, file_data)
self.assertTrue(form.is_valid())
# The form should raise a validation error when a url and file is
# present.
form_data['doc_url'] = 'http://www.geonode.org/map.pdf'
form = DocumentCreateForm(form_data, file_data)
self.assertFalse(form.is_valid())
self.assertTrue('__all__' in form.errors)
def test_document_details(self):
"""/documents/1 -> Test accessing the detail view of a document"""
d = Document.objects.get(pk=1)
d.set_default_permissions()
response = self.client.get(reverse('document_detail', args=(str(d.id),)))
self.assertEquals(response.status_code, 200)
def test_access_document_upload_form(self):
"""Test the form page is returned correctly via GET request /documents/upload"""
log = self.client.login(username='bobby', password='bob')
self.assertTrue(log)
response = self.client.get(reverse('document_upload'))
self.assertTrue('Upload Documents' in response.content)
def test_document_isuploaded(self):
"""/documents/upload -> Test uploading a document"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
m = Map.objects.all()[0]
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('document_upload'),
data={
'file': f,
'title': 'uploaded_document',
'q': m.id,
'type': 'map',
'permissions': '{"users":{"AnonymousUser": ["view_resourcebase"]}}'},
follow=True)
self.assertEquals(response.status_code, 200)
# Permissions Tests
def test_set_document_permissions(self):
"""Verify that the set_document_permissions view is behaving as expected
"""
# Get a document to work with
document = Document.objects.all()[0]
# Set the Permissions
document.set_permissions(self.perm_spec)
# Test that the Permissions for anonympus user are set correctly
self.assertFalse(
self.anonymous_user.has_perm(
'view_resourcebase',
document.get_self_resource()))
# Test that previous permissions for users other than ones specified in
# the perm_spec (and the document owner) were removed
current_perms = document.get_all_level_info()
self.assertEqual(len(current_perms['users'].keys()), 2)
# Test that the User permissions specified in the perm_spec were
# applied properly
for username, perm in self.perm_spec['users'].items():
user = get_user_model().objects.get(username=username)
self.assertTrue(user.has_perm(perm, document.get_self_resource()))
def test_ajax_document_permissions(self):
"""Verify that the ajax_document_permissions view is behaving as expected
"""
# Setup some document names to work with
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
superuser = get_user_model().objects.get(pk=2)
document = Document.objects.create(
doc_file=f,
owner=superuser,
title='theimg')
document.set_default_permissions()
document_id = document.id
invalid_document_id = 20
# Test that an invalid document is handled for properly
response = self.client.post(
reverse(
'resource_permissions', args=(
invalid_document_id,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that GET returns permissions
response = self.client.get(reverse('resource_permissions', args=(document_id,)))
assert('permissions' in response.content)
# Test that a user is required to have
# documents.change_layer_permissions
# First test un-authenticated
response = self.client.post(
reverse('resource_permissions', args=(document_id,)),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse('resource_permissions', args=(document_id,)),
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse('resource_permissions', args=(document_id,)),
data=json.dumps(self.perm_spec),
content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.