code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# functions for handling ABI checking of libraries
import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build
from TaskGen import feature, before, after
# these type maps cope with platform specific names for common types
# please add new type mappings into the list below
abi_type_maps = {
'_Bool' : 'bool',
'struct __va_list_tag *' : 'va_list'
}
version_key = lambda x: map(int, x.split("."))
def normalise_signature(sig):
'''normalise a signature from gdb'''
sig = sig.strip()
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig)
sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig)
sig = re.sub('", <incomplete sequence (\\\\[a-z0-9]+)>', r'\1"', sig)
for t in abi_type_maps:
# we need to cope with non-word characters in mapped types
m = t
m = m.replace('*', '\*')
if m[-1].isalnum() or m[-1] == '_':
m += '\\b'
if m[0].isalnum() or m[0] == '_':
m = '\\b' + m
sig = re.sub(m, abi_type_maps[t], sig)
return sig
def normalise_varargs(sig):
'''cope with older versions of gdb'''
sig = re.sub(',\s\.\.\.', '', sig)
return sig
def parse_sigs(sigs, abi_match):
'''parse ABI signatures file'''
abi_match = samba_utils.TO_LIST(abi_match)
ret = {}
a = sigs.split('\n')
for s in a:
if s.find(':') == -1:
continue
sa = s.split(':')
if abi_match:
matched = False
negative = False
for p in abi_match:
if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]):
negative = True
break
elif fnmatch.fnmatch(sa[0], p):
matched = True
break
if (not matched) and negative:
continue
Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1])))
ret[sa[0]] = normalise_signature(sa[1])
return ret
def save_sigs(sig_file, parsed_sigs):
'''save ABI signatures to a file'''
sigs = ''
for s in sorted(parsed_sigs.keys()):
sigs += '%s: %s\n' % (s, parsed_sigs[s])
return samba_utils.save_file(sig_file, sigs, create_dir=True)
def abi_check_task(self):
'''check if the ABI has changed'''
abi_gen = self.ABI_GEN
libpath = self.inputs[0].abspath(self.env)
libname = os.path.basename(libpath)
sigs = Utils.cmd_output([abi_gen, libpath])
parsed_sigs = parse_sigs(sigs, self.ABI_MATCH)
sig_file = self.ABI_FILE
old_sigs = samba_utils.load_file(sig_file)
if old_sigs is None or Options.options.ABI_UPDATE:
if not save_sigs(sig_file, parsed_sigs):
raise Utils.WafError('Failed to save ABI file "%s"' % sig_file)
Logs.warn('Generated ABI signatures %s' % sig_file)
return
parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH)
# check all old sigs
got_error = False
for s in parsed_old_sigs:
if not s in parsed_sigs:
Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % (
libname, s, parsed_old_sigs[s]))
got_error = True
elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]):
Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % (
libname, s, parsed_old_sigs[s], parsed_sigs[s]))
got_error = True
for s in parsed_sigs:
if not s in parsed_old_sigs:
Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % (
libname, s, parsed_sigs[s]))
got_error = True
if got_error:
raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname)
t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin')
t.quiet = True
# allow "waf --abi-check" to force re-checking the ABI
if '--abi-check' in sys.argv:
Task.always_run(t)
@after('apply_link')
@feature('abi_check')
def abi_check(self):
'''check that ABI matches saved signatures'''
env = self.bld.env
if not env.ABI_CHECK or self.abi_directory is None:
return
# if the platform doesn't support -fvisibility=hidden then the ABI
# checks become fairly meaningless
if not env.HAVE_VISIBILITY_ATTR:
return
topsrc = self.bld.srcnode.abspath()
abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh')
abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum)
tsk = self.create_task('abi_check', self.link_task.outputs[0])
tsk.ABI_FILE = abi_file
tsk.ABI_MATCH = self.abi_match
tsk.ABI_GEN = abi_gen
def abi_process_file(fname, version, symmap):
'''process one ABI file, adding new symbols to the symmap'''
for line in Utils.readf(fname).splitlines():
symname = line.split(":")[0]
if not symname in symmap:
symmap[symname] = version
def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match):
"""Write a vscript file for a library in --version-script format.
:param f: File-like object to write to
:param libname: Name of the library, uppercased
:param current_version: Current version
:param versions: Versions to consider
:param symmap: Dictionary mapping symbols -> version
:param abi_match: List of symbols considered to be public in the current
version
"""
invmap = {}
for s in symmap:
invmap.setdefault(symmap[s], []).append(s)
last_key = ""
versions = sorted(versions, key=version_key)
for k in versions:
symver = "%s_%s" % (libname, k)
if symver == current_version:
break
f.write("%s {\n" % symver)
if k in sorted(invmap.keys()):
f.write("\tglobal:\n")
for s in invmap.get(k, []):
f.write("\t\t%s;\n" % s);
f.write("}%s;\n\n" % last_key)
last_key = " %s" % symver
f.write("%s {\n" % current_version)
local_abi = filter(lambda x: x[0] == '!', abi_match)
global_abi = filter(lambda x: x[0] != '!', abi_match)
f.write("\tglobal:\n")
if len(global_abi) > 0:
for x in global_abi:
f.write("\t\t%s;\n" % x)
else:
f.write("\t\t*;\n")
if abi_match != ["*"]:
f.write("\tlocal:\n")
for x in local_abi:
f.write("\t\t%s;\n" % x[1:])
if len(global_abi) > 0:
f.write("\t\t*;\n")
f.write("};\n")
def abi_build_vscript(task):
'''generate a vscript file for our public libraries'''
tgt = task.outputs[0].bldpath(task.env)
symmap = {}
versions = []
for f in task.inputs:
fname = f.abspath(task.env)
basename = os.path.basename(fname)
version = basename[len(task.env.LIBNAME)+1:-len(".sigs")]
versions.append(version)
abi_process_file(fname, version, symmap)
f = open(tgt, mode='w')
try:
abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions,
symmap, task.env.ABI_MATCH)
finally:
f.close()
def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None):
'''generate a vscript file for our public libraries'''
if abi_directory:
source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname))
def abi_file_key(path):
return version_key(path[:-len(".sigs")].rsplit("-")[-1])
source = sorted(source.split(), key=abi_file_key)
else:
source = ''
libname = os.path.basename(libname)
version = os.path.basename(version)
libname = libname.replace("-", "_").replace("+","_").upper()
version = version.replace("-", "_").replace("+","_").upper()
t = bld.SAMBA_GENERATOR(vscript,
rule=abi_build_vscript,
source=source,
group='vscripts',
target=vscript)
if abi_match is None:
abi_match = ["*"]
else:
abi_match = samba_utils.TO_LIST(abi_match)
t.env.ABI_MATCH = abi_match
t.env.VERSION = version
t.env.LIBNAME = libname
t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH']
Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/arm,malidp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Arm Mali Display Processor (Mali-DP)
maintainers:
- Liviu Dudau <Liviu.Dudau@arm.com>
- Andre Przywara <andre.przywara@arm.com>
description:
The following bindings apply to a family of Display Processors sold as
licensable IP by ARM Ltd. The bindings describe the Mali DP500, DP550 and
DP650 processors that offer multiple composition layers, support for
rotation and scaling output.
properties:
compatible:
enum:
- arm,mali-dp500
- arm,mali-dp550
- arm,mali-dp650
reg:
maxItems: 1
interrupts:
items:
- description:
The interrupt used by the Display Engine (DE). Can be shared with
the interrupt for the Scaling Engine (SE), but it will have to be
listed individually.
- description:
The interrupt used by the Scaling Engine (SE). Can be shared with
the interrupt for the Display Engine (DE), but it will have to be
listed individually.
interrupt-names:
items:
- const: DE
- const: SE
clock-names:
items:
- const: pxlclk
- const: mclk
- const: aclk
- const: pclk
clocks:
items:
- description: the pixel clock feeding the output PLL of the processor
- description: the main processor clock
- description: the AXI interface clock
- description: the APB interface clock
memory-region:
maxItems: 1
description:
Phandle to a node describing memory to be used for the framebuffer.
If not present, the framebuffer may be located anywhere in memory.
arm,malidp-output-port-lines:
$ref: /schemas/types.yaml#/definitions/uint8-array
description:
Number of output lines/bits for each colour channel.
items:
- description: number of output lines for the red channel (R)
- description: number of output lines for the green channel (G)
- description: number of output lines for the blue channel (B)
arm,malidp-arqos-value:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Quality-of-Service value for the display engine FIFOs, to write
into the RQOS register of the DP500.
See the ARM Mali-DP500 TRM for details on the encoding.
If omitted, the RQOS register will not be changed.
port:
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
description:
Output endpoint of the controller, connecting the LCD panel signals.
additionalProperties: false
required:
- compatible
- reg
- interrupts
- interrupt-names
- clocks
- clock-names
- port
- arm,malidp-output-port-lines
examples:
- |
dp0: malidp@6f200000 {
compatible = "arm,mali-dp650";
reg = <0x6f200000 0x20000>;
memory-region = <&display_reserved>;
interrupts = <168>, <168>;
interrupt-names = "DE", "SE";
clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
clock-names = "pxlclk", "mclk", "aclk", "pclk";
arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
arm,malidp-arqos-value = <0xd000d000>;
port {
dp0_output: endpoint {
remote-endpoint = <&tda998x_2_input>;
};
};
};
...
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/display/arm,malidp.yaml
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" LDAP datastore. """
import importlib
import logging
import tldap.backend
from tldap import Q
from tldap.database import (
Database,
LdapObject,
LdapObjectClass,
changeset,
delete,
get_one,
preload,
rename,
save,
)
from tldap.exceptions import ObjectDoesNotExist
import karaage.common.trace as trace
from karaage.datastores import base
from karaage.datastores.ldap_schemas import OpenldapAccount, OpenldapGroup
from karaage.machines.models import Account
logger = logging.getLogger(__name__)
def _str_or_none(string):
""" Return a string of None if string is empty. """
if string is None or string == "":
return None
return string
def _lookup(cls: str) -> LdapObjectClass:
""" Lookup module.class. """
if isinstance(cls, str):
module_name, _, name = cls.rpartition(".")
module = importlib.import_module(module_name)
try:
cls = getattr(module, name)
except AttributeError:
raise AttributeError("%s reference cannot be found" % cls)
return cls
class DataStore(base.DataStore):
""" LDAP Account and group datastore. """
def __init__(self, config: dict) -> None:
super(DataStore, self).__init__(config)
using = config['LDAP']
connection = tldap.backend.connections[using]
self._database = Database(connection, config)
self._account_class = _lookup(config['ACCOUNT'])
self._group_class = _lookup(config['GROUP'])
self._primary_group = config.get('PRIMARY_GROUP', 'institute')
self._default_primary_group = config.get('DEFAULT_PRIMARY_GROUP', 'dummy')
self._settings = config
def _get_account(self, uid: str) -> LdapObject:
return get_one(
table=self._account_class,
query=Q(uid=uid),
database=self._database,
)
def _get_group(self, cn: str) -> LdapObject:
return get_one(
table=self._group_class,
query=Q(cn=cn),
database=self._database,
)
def save_account(self, account: Account) -> None:
""" Account was saved. """
person = account.person
if self._primary_group == 'institute':
lgroup = self._get_group(person.institute.group.name)
elif self._primary_group == 'default_project':
if account.default_project is None:
lgroup = self._get_group(self._default_primary_group)
else:
lgroup = self._get_group(account.default_project.group.name)
else:
raise RuntimeError("Unknown value of PRIMARY_GROUP.")
if account.default_project is None:
default_project = "none"
else:
default_project = account.default_project.pid
try:
luser = self._get_account(account.username)
changes = changeset(luser, {})
new_user = False
except ObjectDoesNotExist:
new_user = True
luser = self._account_class()
changes = changeset(luser, {
'uid': account.username
})
changes = changes.merge({
'gidNumber': lgroup['gidNumber'],
'givenName': person.first_name,
'sn': person.last_name,
'telephoneNumber': _str_or_none(person.telephone),
'mail': _str_or_none(person.email),
'title': _str_or_none(person.title),
'o': person.institute.name,
'cn': person.full_name,
'default_project': default_project,
'loginShell': account.shell,
'locked': account.is_locked()
})
save(changes, database=self._database)
if new_user:
# add all groups
for group in account.person.groups.all():
self.add_account_to_group(account, group)
def delete_account(self, account):
""" Account was deleted. """
try:
luser = self._get_account(account.username)
groups = luser['groups'].load(database=self._database)
for group in groups:
changes = changeset(group, {})
changes = group.remove_member(changes, luser)
save(changes, database=self._database)
delete(luser, database=self._database)
except ObjectDoesNotExist:
# it doesn't matter if it doesn't exist
pass
def set_account_password(self, account, raw_password):
""" Account's password was changed. """
luser = self._get_account(account.username)
changes = changeset(luser, {
'password': raw_password,
})
save(changes, database=self._database)
def set_account_username(self, account, old_username, new_username):
""" Account's username was changed. """
luser = self._get_account(old_username)
rename(luser, database=self._database, uid=new_username)
def add_account_to_group(self, account, group):
""" Add account to group. """
lgroup: OpenldapGroup = self._get_group(group.name)
person: OpenldapAccount = self._get_account(account.username)
changes = changeset(lgroup, {})
changes = lgroup.add_member(changes, person)
save(changes, database=self._database)
def remove_account_from_group(self, account, group):
""" Remove account from group. """
lgroup: OpenldapGroup = self._get_group(group.name)
person: OpenldapAccount = self._get_account(account.username)
changes = changeset(lgroup, {})
changes = lgroup.remove_member(changes, person)
save(changes, database=self._database)
def get_account_details(self, account):
""" Get the account details. """
result = {}
try:
luser = self._get_account(account.username)
luser = preload(luser, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in luser.items():
if i != 'userPassword' and j is not None:
result[i] = j
return result
def account_exists(self, username):
""" Does the account exist? """
try:
self._get_account(username)
return True
except ObjectDoesNotExist:
return False
def save_group(self, group):
""" Group was saved. """
# If group already exists, take over existing group rather then error.
try:
lgroup = self._get_group(group.name)
changes = changeset(lgroup, {})
except ObjectDoesNotExist:
lgroup = self._group_class()
changes = changeset(lgroup, {
'cn': group.name,
})
changes = changes.merge({
'description': group.description
})
save(changes, database=self._database)
def delete_group(self, group):
""" Group was deleted. """
try:
lgroup = self._get_group(group.name)
delete(lgroup, database=self._database)
except ObjectDoesNotExist:
# it doesn't matter if it doesn't exist
pass
def set_group_name(self, group, old_name, new_name):
""" Group was renamed. """
lgroup = self._get_group(old_name)
rename(lgroup, database=self._database, cn=new_name)
def get_group_details(self, group):
""" Get the group details. """
result = {}
try:
lgroup = self._get_group(group.name)
lgroup = preload(lgroup, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in lgroup.items():
if j is not None:
result[i] = j
return result
trace.attach(trace.trace(logger), DataStore)
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
navigation_title: "Normalize for Stream"
applies_to:
stack: preview 9.1
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/normalize-for-stream-processor.html
---
# Normalize-for-Stream processor [normalize-for-stream-processor]
Detects whether a document is OpenTelemetry-compliant and if not -
normalizes it as described below. If used in combination with the OTel-related
mappings such as the ones defined in `logs-otel@template`, the resulting
document can be queried seamlessly by clients that expect either [ECS](https://www.elastic.co/guide/en/ecs/current/index.html) or OpenTelemetry-[Semantic-Conventions](https://github.com/open-telemetry/semantic-conventions) formats.
::::{note}
This processor is in tech preview and is not available in our serverless offering.
::::
## Detecting OpenTelemetry compliance
The processor detects OpenTelemetry compliance by checking the following fields:
* `resource` exists as a key and the value is a map
* `resource` either doesn't contain an `attributes` field, or contains an `attributes` field of type map
* `scope` is either missing or a map
* `attributes` is either missing or a map
* `body` is either missing or a map
* `body` either doesn't contain a `text` field, or contains a `text` field of type `String`
* `body` either doesn't contain a `structured` field, or contains a `structured` field that is not of type `String`
If all of these conditions are met, the document is considered OpenTelemetry-compliant and is not modified by the processor.
## Normalization
If the document is not OpenTelemetry-compliant, the processor normalizes it as follows:
* Specific ECS fields are renamed to have their corresponding OpenTelemetry Semantic Conventions attribute names. These include the following:
| ECS Field | Semantic Conventions Attribute |
|-------------|--------------------------------|
| `span.id` | `span_id` |
| `trace.id` | `trace_id` |
| `message` | `body.text` |
| `log.level` | `severity_text` |
The processor first looks for the nested form of the ECS field and if such does not exist, it looks for a top-level field with the dotted field name.
* Other specific ECS fields that describe resources and have corresponding counterparts in the OpenTelemetry Semantic Conventions are moved to the `resource.attributes` map. Fields that are considered resource attributes are such that conform to the following conditions:
* They are ECS fields that have corresponding counterparts (either with
the same name or with a different name) in OpenTelemetry Semantic Conventions.
* The corresponding OpenTelemetry attribute is defined in
[Semantic Conventions](https://github.com/open-telemetry/semantic-conventions/tree/main/model)
within a group that is defined as `type: entity`.
* All other fields, except for `@timestamp`, are moved to the `attributes` map.
* All non-array entries of the `attributes` and `resource.attributes` maps are flattened. Flattening means that nested objects are merged into their parent object, and the keys are concatenated with a dot. See examples below.
## Examples
If an OpenTelemetry-compliant document is detected, the processor does nothing. For example, the following document will stay unchanged:
```js
{
"resource": {
"attributes": {
"service.name": "my-service"
}
},
"scope": {
"name": "my-library",
"version": "1.0.0"
},
"attributes": {
"http.method": "GET"
},
"body": {
"text": "Hello, world!"
}
}
```
% NOTCONSOLE
If a non-OpenTelemetry-compliant document is detected, the processor normalizes it. For example, the following document:
```js
{
"@timestamp": "2023-10-01T12:00:00Z",
"service": {
"name": "my-service",
"version": "1.0.0",
"environment": "production",
"language": {
"name": "python",
"version": "3.8"
}
},
"log": {
"level": "INFO"
},
"message": "Hello, world!",
"http": {
"method": "GET",
"url": {
"path": "/api/v1/resource"
},
"headers": [
{
"name": "Authorization",
"value": "Bearer token"
},
{
"name": "User-Agent",
"value": "my-client/1.0"
}
]
},
"span" : {
"id": "1234567890abcdef"
},
"span.id": "abcdef1234567890",
"trace.id": "abcdef1234567890abcdef1234567890"
}
```
% NOTCONSOLE
will be normalized into the following form:
```js
{
"@timestamp": "2023-10-01T12:00:00Z",
"resource": {
"attributes": {
"service.name": "my-service",
"service.version": "1.0.0",
"service.environment": "production"
}
},
"attributes": {
"service.language.name": "python",
"service.language.version": "3.8",
"http.method": "GET",
"http.url.path": "/api/v1/resource",
"http.headers": [
{
"name": "Authorization",
"value": "Bearer token"
},
{
"name": "User-Agent",
"value": "my-client/1.0"
}
]
},
"severity_text": "INFO",
"body": {
"text": "Hello, world!"
},
"span_id": "1234567890abcdef",
"trace_id": "abcdef1234567890abcdef1234567890"
}
```
% NOTCONSOLE
## Structured `message` field
If the `message` field in the ingested document is structured as a JSON, the
processor will determine whether it is in ECS format or not, based on the
existence or absence of the `@timestamp` field. If the `@timestamp` field is
present, the `message` field will be considered to be in ECS format, and its
contents will be merged into the root of the document and then normalized as
described above. The `@timestamp` from the `message` field will override the
root `@timestamp` field in the resulting document.
If the `@timestamp` field is absent, the `message` field will be moved to
the `body.structured` field as is, without any further normalization.
For example, if the `message` field is an ECS-JSON, as follows:
```js
{
"@timestamp": "2023-10-01T12:00:00Z",
"message": "{\"@timestamp\":\"2023-10-01T12:01:00Z\",\"log.level\":\"INFO\",\"service.name\":\"my-service\",\"message\":\"The actual log message\",\"http\":{\"method\":\"GET\",\"url\":{\"path\":\"/api/v1/resource\"}}}"
}
```
% NOTCONSOLE
it will be normalized into the following form:
```js
{
"@timestamp": "2023-10-01T12:01:00Z",
"severity_text": "INFO",
"body": {
"text": "The actual log message"
},
"resource": {
"attributes": {
"service.name": "my-service"
}
},
"attributes": {
"http.method": "GET",
"http.url.path": "/api/v1/resource"
}
}
```
% NOTCONSOLE
However, if the `message` field is not recognized as ECS format, as follows:
```js
{
"@timestamp": "2023-10-01T12:00:00Z",
"log": {
"level": "INFO"
},
"service": {
"name": "my-service"
},
"tags": ["user-action", "api-call"],
"message": "{\"root_cause\":\"Network error\",\"http\":{\"method\":\"GET\",\"url\":{\"path\":\"/api/v1/resource\"}}}"
}
```
% NOTCONSOLE
it will be normalized into the following form:
```js
{
"@timestamp": "2023-10-01T12:00:00Z",
"severity_text": "INFO",
"resource": {
"attributes": {
"service.name": "my-service"
}
},
"attributes": {
"tags": ["user-action", "api-call"]
},
"body": {
"structured": {
"root_cause": "Network error",
"http": {
"method": "GET",
"url": {
"path": "/api/v1/resource"
}
}
}
}
}
```
% NOTCONSOLE
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/reference/enrich-processor/normalize-for-stream.md
|
from django.db import connection
from django.db.models import CharField
from django.db.models.functions import MD5
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class MD5Tests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.bulk_create(
[
Author(alias="John Smith"),
Author(alias="Jordan Élena"),
Author(alias="皇帝"),
Author(alias=""),
Author(alias=None),
]
)
def test_basic(self):
authors = (
Author.objects.annotate(
md5_alias=MD5("alias"),
)
.values_list("md5_alias", flat=True)
.order_by("pk")
)
self.assertSequenceEqual(
authors,
[
"6117323d2cabbc17d44c2b44587f682c",
"ca6d48f6772000141e66591aee49d56c",
"bf2c13bc1154e3d2e7df848cbc8be73d",
"d41d8cd98f00b204e9800998ecf8427e",
(
"d41d8cd98f00b204e9800998ecf8427e"
if connection.features.interprets_empty_strings_as_nulls
else None
),
],
)
def test_transform(self):
with register_lookup(CharField, MD5):
authors = Author.objects.filter(
alias__md5="6117323d2cabbc17d44c2b44587f682c",
).values_list("alias", flat=True)
self.assertSequenceEqual(authors, ["John Smith"])
|
python
|
github
|
https://github.com/django/django
|
tests/db_functions/text/test_md5.py
|
@import "mixins/banner";
@include bsBanner(Grid);
$include-column-box-sizing: true !default;
@import "functions";
@import "variables";
@import "variables-dark";
@import "maps";
@import "mixins/breakpoints";
@import "mixins/container";
@import "mixins/grid";
@import "mixins/utilities";
@import "vendor/rfs";
@import "containers";
@import "grid";
@import "utilities";
// Only use the utilities we need
// stylelint-disable-next-line scss/dollar-variable-default
$utilities: map-get-multiple(
$utilities,
(
"display",
"order",
"flex",
"flex-direction",
"flex-grow",
"flex-shrink",
"flex-wrap",
"justify-content",
"align-items",
"align-content",
"align-self",
"margin",
"margin-x",
"margin-y",
"margin-top",
"margin-end",
"margin-bottom",
"margin-start",
"negative-margin",
"negative-margin-x",
"negative-margin-y",
"negative-margin-top",
"negative-margin-end",
"negative-margin-bottom",
"negative-margin-start",
"padding",
"padding-x",
"padding-y",
"padding-top",
"padding-end",
"padding-bottom",
"padding-start",
)
);
@import "utilities/api";
|
unknown
|
github
|
https://github.com/twbs/bootstrap
|
scss/bootstrap-grid.scss
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.context.config;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.springframework.boot.context.config.LocationResourceLoader.ResourceType;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import org.springframework.util.Assert;
/**
* {@link ConfigDataLocationResolver} for config tree locations.
*
* @author Madhura Bhave
* @author Phillip Webb
* @since 2.4.0
*/
public class ConfigTreeConfigDataLocationResolver implements ConfigDataLocationResolver<ConfigTreeConfigDataResource> {
private static final String PREFIX = "configtree:";
private final LocationResourceLoader resourceLoader;
public ConfigTreeConfigDataLocationResolver(ResourceLoader resourceLoader) {
this.resourceLoader = new LocationResourceLoader(resourceLoader);
}
@Override
public boolean isResolvable(ConfigDataLocationResolverContext context, ConfigDataLocation location) {
return location.hasPrefix(PREFIX);
}
@Override
public List<ConfigTreeConfigDataResource> resolve(ConfigDataLocationResolverContext context,
ConfigDataLocation location) {
try {
return resolve(location.getNonPrefixedValue(PREFIX));
}
catch (IOException ex) {
throw new ConfigDataLocationNotFoundException(location, ex);
}
}
private List<ConfigTreeConfigDataResource> resolve(String location) throws IOException {
Assert.state(location.endsWith("/"),
() -> String.format("Config tree location '%s' must end with '/'", location));
if (!this.resourceLoader.isPattern(location)) {
return Collections.singletonList(new ConfigTreeConfigDataResource(location));
}
Resource[] resources = this.resourceLoader.getResources(location, ResourceType.DIRECTORY);
List<ConfigTreeConfigDataResource> resolved = new ArrayList<>(resources.length);
for (Resource resource : resources) {
resolved.add(new ConfigTreeConfigDataResource(resource.getFile().toPath()));
}
return resolved;
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigTreeConfigDataLocationResolver.java
|
use std::path::PathBuf;
use super::split_extern_opt;
use crate::EarlyDiagCtxt;
use crate::config::UnstableOptions;
/// Verifies split_extern_opt handles the supported cases.
#[test]
fn test_split_extern_opt() {
let early_dcx = EarlyDiagCtxt::new(<_>::default());
let unstable_opts = &UnstableOptions::default();
let extern_opt =
split_extern_opt(&early_dcx, unstable_opts, "priv,noprelude:foo=libbar.rlib").unwrap();
assert_eq!(extern_opt.crate_name, "foo");
assert_eq!(extern_opt.path, Some(PathBuf::from("libbar.rlib")));
assert_eq!(extern_opt.options, Some("priv,noprelude".to_string()));
let extern_opt = split_extern_opt(&early_dcx, unstable_opts, "priv,noprelude:foo").unwrap();
assert_eq!(extern_opt.crate_name, "foo");
assert_eq!(extern_opt.path, None);
assert_eq!(extern_opt.options, Some("priv,noprelude".to_string()));
let extern_opt = split_extern_opt(&early_dcx, unstable_opts, "foo=libbar.rlib").unwrap();
assert_eq!(extern_opt.crate_name, "foo");
assert_eq!(extern_opt.path, Some(PathBuf::from("libbar.rlib")));
assert_eq!(extern_opt.options, None);
let extern_opt = split_extern_opt(&early_dcx, unstable_opts, "foo").unwrap();
assert_eq!(extern_opt.crate_name, "foo");
assert_eq!(extern_opt.path, None);
assert_eq!(extern_opt.options, None);
}
/// Tests some invalid cases for split_extern_opt.
#[test]
fn test_split_extern_opt_invalid() {
let early_dcx = EarlyDiagCtxt::new(<_>::default());
let unstable_opts = &UnstableOptions::default();
// too many `:`s
let result = split_extern_opt(&early_dcx, unstable_opts, "priv:noprelude:foo=libbar.rlib");
assert!(result.is_err());
let _ = result.map_err(|e| e.cancel());
// can't nest externs without the unstable flag
let result = split_extern_opt(&early_dcx, unstable_opts, "noprelude:foo::bar=libbar.rlib");
assert!(result.is_err());
let _ = result.map_err(|e| e.cancel());
}
/// Tests some cases for split_extern_opt with nested crates like `foo::bar`.
#[test]
fn test_split_extern_opt_nested() {
let early_dcx = EarlyDiagCtxt::new(<_>::default());
let unstable_opts = &UnstableOptions { namespaced_crates: true, ..Default::default() };
let extern_opt =
split_extern_opt(&early_dcx, unstable_opts, "priv,noprelude:foo::bar=libbar.rlib").unwrap();
assert_eq!(extern_opt.crate_name, "foo::bar");
assert_eq!(extern_opt.path, Some(PathBuf::from("libbar.rlib")));
assert_eq!(extern_opt.options, Some("priv,noprelude".to_string()));
let extern_opt =
split_extern_opt(&early_dcx, unstable_opts, "priv,noprelude:foo::bar").unwrap();
assert_eq!(extern_opt.crate_name, "foo::bar");
assert_eq!(extern_opt.path, None);
assert_eq!(extern_opt.options, Some("priv,noprelude".to_string()));
let extern_opt = split_extern_opt(&early_dcx, unstable_opts, "foo::bar=libbar.rlib").unwrap();
assert_eq!(extern_opt.crate_name, "foo::bar");
assert_eq!(extern_opt.path, Some(PathBuf::from("libbar.rlib")));
assert_eq!(extern_opt.options, None);
let extern_opt = split_extern_opt(&early_dcx, unstable_opts, "foo::bar").unwrap();
assert_eq!(extern_opt.crate_name, "foo::bar");
assert_eq!(extern_opt.path, None);
assert_eq!(extern_opt.options, None);
}
/// Tests some invalid cases for split_extern_opt with nested crates like `foo::bar`.
#[test]
fn test_split_extern_opt_nested_invalid() {
let early_dcx = EarlyDiagCtxt::new(<_>::default());
let unstable_opts = &UnstableOptions { namespaced_crates: true, ..Default::default() };
// crates can only be nested one deep.
let result =
split_extern_opt(&early_dcx, unstable_opts, "priv,noprelude:foo::bar::baz=libbar.rlib");
assert!(result.is_err());
let _ = result.map_err(|e| e.cancel());
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_session/src/config/externs/tests.rs
|
from kivy.adapters.dictadapter import DictAdapter
from kivy.adapters.models import SelectableDataItem
from kivy.uix.selectableview import SelectableView
from kivy.uix.listview import ListView, ListItemButton
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.factory import Factory
from fixtures import integers_dict
# [TODO] Will SelectableView be in the kivy/factory_registers.py,
# as a result of setup.py? ListItemButton? others?
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
# [TODO] SelectableView is subclassed here, yet, it is necessary to add the
# index property in the template. Same TODO in list_cascade_images.py.
Builder.load_string('''
[CustomListItem@SelectableView+BoxLayout]:
size_hint_y: ctx.size_hint_y
height: ctx.height
ListItemButton:
text: ctx.text
is_selected: ctx.is_selected
''')
class MainView(GridLayout):
'''Implementation of a list view with a kv template used for the list
item class.
'''
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'is_selected': rec['is_selected'],
'size_hint_y': None,
'height': 25}
# Here we create a dict adapter with 1..100 integer strings as
# sorted_keys, and integers_dict from fixtures as data, passing our
# CompositeListItem kv template for the list item view. Then we
# create a list view using this adapter. args_converter above converts
# dict attributes to ctx attributes.
dict_adapter = DictAdapter(sorted_keys=[str(i) for i in range(100)],
data=integers_dict,
args_converter=list_item_args_converter,
template='CustomListItem')
list_view = ListView(adapter=dict_adapter)
self.add_widget(list_view)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode.testcase import FileBasedTesting
from licensedcode import index
from licensedcode.match import get_texts
from licensedcode.models import Rule
from licensedcode.models import load_rules
from licensedcode import match_seq
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class TestMatchSeq(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_template_with_few_tokens_around_gaps_is_wholly_seq_matched(self):
# was failing when a gapped token (from a template) starts at a
# beginning of an index doc. We may still skip that, but capture a large match anyway.
rule_text = u'''
Copyright {{some copyright}}
THIS IS FROM {{THE CODEHAUS}} AND CONTRIBUTORS
IN NO EVENT SHALL {{THE CODEHAUS}} OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE {{POSSIBILITY OF SUCH}} DAMAGE
'''
rule = Rule(_text=rule_text, licenses=['test'],)
idx = index.LicenseIndex([rule])
querys = u'''
Copyright 2003 (C) James. All Rights Reserved.
THIS IS FROM THE CODEHAUS AND CONTRIBUTORS
IN NO EVENT SHALL THE CODEHAUS OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
result = idx.match(query_string=querys)
assert 1 == len(result)
match = result[0]
assert match_seq.MATCH_SEQ == match.matcher
exp_qtext = u"""
Copyright [2003] [C] [James] [All] [Rights] [Reserved]
THIS IS FROM <THE> [CODEHAUS]
AND CONTRIBUTORS
IN NO EVENT SHALL <THE> [CODEHAUS] OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE [POSSIBILITY] <OF> [SUCH] DAMAGE
""".split()
exp_itext = u"""
Copyright
THIS IS FROM
AND CONTRIBUTORS
IN NO EVENT SHALL OR ITS CONTRIBUTORS BE LIABLE
EVEN IF ADVISED OF THE DAMAGE
""".split()
qtext, itext = get_texts(match, query_string=querys, idx=idx)
assert exp_qtext == qtext.split()
assert exp_qtext == qtext.split()
assert exp_itext == itext.split()
assert 99 <= match.coverage()
def test_match_seq_are_correct_on_apache(self):
rule_dir = self.get_test_loc('match_seq/rules')
idx = index.LicenseIndex(load_rules(rule_dir))
query_loc = self.get_test_loc('match_seq/query')
matches = idx.match(location=query_loc)
assert 1 == len(matches)
match = matches[0]
assert match_seq.MATCH_SEQ == match.matcher
qtext, _itext = get_texts(match, location=query_loc, idx=idx)
expected = u'''
The OpenSymphony Group All rights reserved Redistribution and use in source and
binary forms with or without modification are permitted provided that the following
conditions are met 1 Redistributions of source code must retain the above copyright
notice this list of conditions and the following disclaimer 2 Redistributions in
binary form must reproduce the above copyright notice this list of conditions and the
following disclaimer in the documentation and or other materials provided with the
distribution 3 The end user documentation included with the redistribution if any
must include the following acknowledgment <4> <This> <product> <includes> <software>
<developed> <by> <the> <OpenSymphony> <Group> <http> <www> <opensymphony> <com> <5>
Alternately this acknowledgment may appear in the software itself if and wherever
such third party acknowledgments normally appear The names OpenSymphony and The
OpenSymphony Group must not be used to endorse or promote products derived from this
software without prior written permission For written permission please contact
license opensymphony com Products derived from this software may not be called
OpenSymphony or [OsCore] nor may OpenSymphony or [OsCore] appear in their name
without prior written permission of the OpenSymphony Group THIS SOFTWARE IS PROVIDED
AS IS AND ANY EXPRESSED OR IMPLIED WARRANTIES INCLUDING BUT NOT LIMITED TO THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR ITS CONTRIBUTORS BE
LIABLE FOR ANY DIRECT INDIRECT INCIDENTAL SPECIAL EXEMPLARY OR CONSEQUENTIAL DAMAGES
INCLUDING BUT NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE
DATA OR PROFITS OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY WHETHER IN CONTRACT STRICT LIABILITY OR TORT INCLUDING NEGLIGENCE OR
OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE
'''
assert expected.split() == qtext.split()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <nginx.h>
static void ngx_show_version_info(void);
static ngx_int_t ngx_add_inherited_sockets(ngx_cycle_t *cycle);
static void ngx_cleanup_environment(void *data);
static void ngx_cleanup_environment_variable(void *data);
static ngx_int_t ngx_get_options(int argc, char *const *argv);
static ngx_int_t ngx_process_options(ngx_cycle_t *cycle);
static ngx_int_t ngx_save_argv(ngx_cycle_t *cycle, int argc, char *const *argv);
static void *ngx_core_module_create_conf(ngx_cycle_t *cycle);
static char *ngx_core_module_init_conf(ngx_cycle_t *cycle, void *conf);
static char *ngx_set_user(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static char *ngx_set_env(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static char *ngx_set_priority(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static char *ngx_set_cpu_affinity(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_set_worker_processes(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_load_module(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
#if (NGX_HAVE_DLOPEN)
static void ngx_unload_module(void *data);
#endif
static ngx_conf_enum_t ngx_debug_points[] = {
{ ngx_string("stop"), NGX_DEBUG_POINTS_STOP },
{ ngx_string("abort"), NGX_DEBUG_POINTS_ABORT },
{ ngx_null_string, 0 }
};
static ngx_command_t ngx_core_commands[] = {
{ ngx_string("daemon"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_FLAG,
ngx_conf_set_flag_slot,
0,
offsetof(ngx_core_conf_t, daemon),
NULL },
{ ngx_string("master_process"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_FLAG,
ngx_conf_set_flag_slot,
0,
offsetof(ngx_core_conf_t, master),
NULL },
{ ngx_string("timer_resolution"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_msec_slot,
0,
offsetof(ngx_core_conf_t, timer_resolution),
NULL },
{ ngx_string("pid"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
0,
offsetof(ngx_core_conf_t, pid),
NULL },
{ ngx_string("lock_file"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
0,
offsetof(ngx_core_conf_t, lock_file),
NULL },
{ ngx_string("worker_processes"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_set_worker_processes,
0,
0,
NULL },
{ ngx_string("debug_points"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_enum_slot,
0,
offsetof(ngx_core_conf_t, debug_points),
&ngx_debug_points },
{ ngx_string("user"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE12,
ngx_set_user,
0,
0,
NULL },
{ ngx_string("worker_priority"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_set_priority,
0,
0,
NULL },
{ ngx_string("worker_cpu_affinity"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_1MORE,
ngx_set_cpu_affinity,
0,
0,
NULL },
{ ngx_string("worker_rlimit_nofile"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
0,
offsetof(ngx_core_conf_t, rlimit_nofile),
NULL },
{ ngx_string("worker_rlimit_core"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_off_slot,
0,
offsetof(ngx_core_conf_t, rlimit_core),
NULL },
{ ngx_string("worker_shutdown_timeout"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_msec_slot,
0,
offsetof(ngx_core_conf_t, shutdown_timeout),
NULL },
{ ngx_string("working_directory"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
0,
offsetof(ngx_core_conf_t, working_directory),
NULL },
{ ngx_string("env"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_set_env,
0,
0,
NULL },
{ ngx_string("load_module"),
NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
ngx_load_module,
0,
0,
NULL },
ngx_null_command
};
static ngx_core_module_t ngx_core_module_ctx = {
ngx_string("core"),
ngx_core_module_create_conf,
ngx_core_module_init_conf
};
ngx_module_t ngx_core_module = {
NGX_MODULE_V1,
&ngx_core_module_ctx, /* module context */
ngx_core_commands, /* module directives */
NGX_CORE_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static ngx_uint_t ngx_show_help;
static ngx_uint_t ngx_show_version;
static ngx_uint_t ngx_show_configure;
static u_char *ngx_prefix;
static u_char *ngx_error_log;
static u_char *ngx_conf_file;
static u_char *ngx_conf_params;
static char *ngx_signal;
static char **ngx_os_environ;
int ngx_cdecl
main(int argc, char *const *argv)
{
ngx_buf_t *b;
ngx_log_t *log;
ngx_uint_t i;
ngx_cycle_t *cycle, init_cycle;
ngx_conf_dump_t *cd;
ngx_core_conf_t *ccf;
ngx_debug_init();
if (ngx_strerror_init() != NGX_OK) {
return 1;
}
if (ngx_get_options(argc, argv) != NGX_OK) {
return 1;
}
if (ngx_show_version) {
ngx_show_version_info();
if (!ngx_test_config) {
return 0;
}
}
/* TODO */ ngx_max_sockets = -1;
ngx_time_init();
#if (NGX_PCRE)
ngx_regex_init();
#endif
ngx_pid = ngx_getpid();
ngx_parent = ngx_getppid();
log = ngx_log_init(ngx_prefix, ngx_error_log);
if (log == NULL) {
return 1;
}
/* STUB */
#if (NGX_OPENSSL)
ngx_ssl_init(log);
#endif
/*
* init_cycle->log is required for signal handlers and
* ngx_process_options()
*/
ngx_memzero(&init_cycle, sizeof(ngx_cycle_t));
init_cycle.log = log;
ngx_cycle = &init_cycle;
init_cycle.pool = ngx_create_pool(1024, log);
if (init_cycle.pool == NULL) {
return 1;
}
if (ngx_save_argv(&init_cycle, argc, argv) != NGX_OK) {
return 1;
}
if (ngx_process_options(&init_cycle) != NGX_OK) {
return 1;
}
if (ngx_os_init(log) != NGX_OK) {
return 1;
}
/*
* ngx_crc32_table_init() requires ngx_cacheline_size set in ngx_os_init()
*/
if (ngx_crc32_table_init() != NGX_OK) {
return 1;
}
/*
* ngx_slab_sizes_init() requires ngx_pagesize set in ngx_os_init()
*/
ngx_slab_sizes_init();
if (ngx_add_inherited_sockets(&init_cycle) != NGX_OK) {
return 1;
}
if (ngx_preinit_modules() != NGX_OK) {
return 1;
}
cycle = ngx_init_cycle(&init_cycle);
if (cycle == NULL) {
if (ngx_test_config) {
ngx_log_stderr(0, "configuration file %s test failed",
init_cycle.conf_file.data);
}
return 1;
}
if (ngx_test_config) {
if (!ngx_quiet_mode) {
ngx_log_stderr(0, "configuration file %s test is successful",
cycle->conf_file.data);
}
if (ngx_dump_config) {
cd = cycle->config_dump.elts;
for (i = 0; i < cycle->config_dump.nelts; i++) {
ngx_write_stdout("# configuration file ");
(void) ngx_write_fd(ngx_stdout, cd[i].name.data,
cd[i].name.len);
ngx_write_stdout(":" NGX_LINEFEED);
b = cd[i].buffer;
(void) ngx_write_fd(ngx_stdout, b->pos, b->last - b->pos);
ngx_write_stdout(NGX_LINEFEED);
}
}
return 0;
}
if (ngx_signal) {
return ngx_signal_process(cycle, ngx_signal);
}
ngx_os_status(cycle->log);
ngx_cycle = cycle;
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
if (ccf->master && ngx_process == NGX_PROCESS_SINGLE) {
ngx_process = NGX_PROCESS_MASTER;
}
#if !(NGX_WIN32)
if (ngx_init_signals(cycle->log) != NGX_OK) {
return 1;
}
if (!ngx_inherited && ccf->daemon) {
if (ngx_daemon(cycle->log) != NGX_OK) {
return 1;
}
ngx_daemonized = 1;
}
if (ngx_inherited) {
ngx_daemonized = 1;
}
#endif
if (ngx_create_pidfile(&ccf->pid, cycle->log) != NGX_OK) {
return 1;
}
if (ngx_log_redirect_stderr(cycle) != NGX_OK) {
return 1;
}
if (log->file->fd != ngx_stderr) {
if (ngx_close_file(log->file->fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
ngx_close_file_n " built-in log failed");
}
}
ngx_use_stderr = 0;
if (ngx_process == NGX_PROCESS_SINGLE) {
ngx_single_process_cycle(cycle);
} else {
ngx_master_process_cycle(cycle);
}
return 0;
}
static void
ngx_show_version_info(void)
{
ngx_write_stderr("nginx version: " NGINX_VER_BUILD NGX_LINEFEED);
if (ngx_show_help) {
ngx_write_stderr(
"Usage: nginx [-?hvVtTq] [-s signal] [-p prefix]" NGX_LINEFEED
" [-e filename] [-c filename] [-g directives]"
NGX_LINEFEED NGX_LINEFEED
"Options:" NGX_LINEFEED
" -?,-h : this help" NGX_LINEFEED
" -v : show version and exit" NGX_LINEFEED
" -V : show version and configure options then exit"
NGX_LINEFEED
" -t : test configuration and exit" NGX_LINEFEED
" -T : test configuration, dump it and exit"
NGX_LINEFEED
" -q : suppress non-error messages "
"during configuration testing" NGX_LINEFEED
" -s signal : send signal to a master process: "
"stop, quit, reopen, reload" NGX_LINEFEED
#ifdef NGX_PREFIX
" -p prefix : set prefix path (default: " NGX_PREFIX ")"
NGX_LINEFEED
#else
" -p prefix : set prefix path (default: NONE)" NGX_LINEFEED
#endif
" -e filename : set error log file (default: "
#ifdef NGX_ERROR_LOG_STDERR
"stderr)" NGX_LINEFEED
#else
NGX_ERROR_LOG_PATH ")" NGX_LINEFEED
#endif
" -c filename : set configuration file (default: " NGX_CONF_PATH
")" NGX_LINEFEED
" -g directives : set global directives out of configuration "
"file" NGX_LINEFEED NGX_LINEFEED
);
}
if (ngx_show_configure) {
#ifdef NGX_COMPILER
ngx_write_stderr("built by " NGX_COMPILER NGX_LINEFEED);
#endif
#if (NGX_SSL)
if (ngx_strcmp(ngx_ssl_version(), OPENSSL_VERSION_TEXT) == 0) {
ngx_write_stderr("built with " OPENSSL_VERSION_TEXT NGX_LINEFEED);
} else {
ngx_write_stderr("built with " OPENSSL_VERSION_TEXT
" (running with ");
ngx_write_stderr((char *) (uintptr_t) ngx_ssl_version());
ngx_write_stderr(")" NGX_LINEFEED);
}
#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
ngx_write_stderr("TLS SNI support enabled" NGX_LINEFEED);
#else
ngx_write_stderr("TLS SNI support disabled" NGX_LINEFEED);
#endif
#endif
ngx_write_stderr("configure arguments:" NGX_CONFIGURE NGX_LINEFEED);
}
}
static ngx_int_t
ngx_add_inherited_sockets(ngx_cycle_t *cycle)
{
u_char *p, *v, *inherited;
ngx_int_t s;
ngx_listening_t *ls;
inherited = (u_char *) getenv(NGINX_VAR);
if (inherited == NULL) {
return NGX_OK;
}
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0,
"using inherited sockets from \"%s\"", inherited);
if (ngx_array_init(&cycle->listening, cycle->pool, 10,
sizeof(ngx_listening_t))
!= NGX_OK)
{
return NGX_ERROR;
}
for (p = inherited, v = p; *p; p++) {
if (*p == ':' || *p == ';') {
s = ngx_atoi(v, p - v);
if (s == NGX_ERROR) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
"invalid socket number \"%s\" in " NGINX_VAR
" environment variable, ignoring the rest"
" of the variable", v);
break;
}
v = p + 1;
ls = ngx_array_push(&cycle->listening);
if (ls == NULL) {
return NGX_ERROR;
}
ngx_memzero(ls, sizeof(ngx_listening_t));
ls->fd = (ngx_socket_t) s;
ls->inherited = 1;
}
}
if (v != p) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
"invalid socket number \"%s\" in " NGINX_VAR
" environment variable, ignoring", v);
}
ngx_inherited = 1;
return ngx_set_inherited_sockets(cycle);
}
char **
ngx_set_environment(ngx_cycle_t *cycle, ngx_uint_t *last)
{
char **p, **env, *str;
size_t len;
ngx_str_t *var;
ngx_uint_t i, n;
ngx_core_conf_t *ccf;
ngx_pool_cleanup_t *cln;
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
if (last == NULL && ccf->environment) {
return ccf->environment;
}
var = ccf->env.elts;
for (i = 0; i < ccf->env.nelts; i++) {
if (ngx_strcmp(var[i].data, "TZ") == 0
|| ngx_strncmp(var[i].data, "TZ=", 3) == 0)
{
goto tz_found;
}
}
var = ngx_array_push(&ccf->env);
if (var == NULL) {
return NULL;
}
var->len = 2;
var->data = (u_char *) "TZ";
var = ccf->env.elts;
tz_found:
n = 0;
for (i = 0; i < ccf->env.nelts; i++) {
if (var[i].data[var[i].len] == '=') {
n++;
continue;
}
for (p = ngx_os_environ; *p; p++) {
if (ngx_strncmp(*p, var[i].data, var[i].len) == 0
&& (*p)[var[i].len] == '=')
{
n++;
break;
}
}
}
if (last) {
env = ngx_alloc((*last + n + 1) * sizeof(char *), cycle->log);
if (env == NULL) {
return NULL;
}
*last = n;
} else {
cln = ngx_pool_cleanup_add(cycle->pool, 0);
if (cln == NULL) {
return NULL;
}
env = ngx_alloc((n + 1) * sizeof(char *), cycle->log);
if (env == NULL) {
return NULL;
}
cln->handler = ngx_cleanup_environment;
cln->data = env;
}
n = 0;
for (i = 0; i < ccf->env.nelts; i++) {
if (var[i].data[var[i].len] == '=') {
if (last) {
env[n++] = (char *) var[i].data;
continue;
}
cln = ngx_pool_cleanup_add(cycle->pool, 0);
if (cln == NULL) {
return NULL;
}
len = ngx_strlen(var[i].data) + 1;
str = ngx_alloc(len, cycle->log);
if (str == NULL) {
return NULL;
}
ngx_memcpy(str, var[i].data, len);
cln->handler = ngx_cleanup_environment_variable;
cln->data = str;
env[n++] = str;
continue;
}
for (p = ngx_os_environ; *p; p++) {
if (ngx_strncmp(*p, var[i].data, var[i].len) == 0
&& (*p)[var[i].len] == '=')
{
env[n++] = *p;
break;
}
}
}
env[n] = NULL;
if (last == NULL) {
ccf->environment = env;
environ = env;
}
return env;
}
static void
ngx_cleanup_environment(void *data)
{
char **env = data;
if (environ == env) {
/*
* if the environment is still used, as it happens on exit,
* the only option is to leak it
*/
return;
}
ngx_free(env);
}
static void
ngx_cleanup_environment_variable(void *data)
{
char *var = data;
char **p;
for (p = environ; *p; p++) {
/*
* if an environment variable is still used, as it happens on exit,
* the only option is to leak it
*/
if (*p == var) {
return;
}
}
ngx_free(var);
}
ngx_pid_t
ngx_exec_new_binary(ngx_cycle_t *cycle, char *const *argv)
{
char **env, *var;
u_char *p;
ngx_uint_t i, n;
ngx_pid_t pid;
ngx_exec_ctx_t ctx;
ngx_core_conf_t *ccf;
ngx_listening_t *ls;
ngx_memzero(&ctx, sizeof(ngx_exec_ctx_t));
ctx.path = argv[0];
ctx.name = "new binary process";
ctx.argv = argv;
n = 2;
env = ngx_set_environment(cycle, &n);
if (env == NULL) {
return NGX_INVALID_PID;
}
var = ngx_alloc(sizeof(NGINX_VAR)
+ cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2,
cycle->log);
if (var == NULL) {
ngx_free(env);
return NGX_INVALID_PID;
}
p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR));
ls = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {
if (ls[i].ignore) {
continue;
}
p = ngx_sprintf(p, "%ud;", ls[i].fd);
}
*p = '\0';
env[n++] = var;
#if (NGX_SETPROCTITLE_USES_ENV)
/* allocate the spare 300 bytes for the new binary process title */
env[n++] = "SPARE=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
#endif
env[n] = NULL;
#if (NGX_DEBUG)
{
char **e;
for (e = env; *e; e++) {
ngx_log_debug1(NGX_LOG_DEBUG_CORE, cycle->log, 0, "env: %s", *e);
}
}
#endif
ctx.envp = (char *const *) env;
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
ngx_rename_file_n " %s to %s failed "
"before executing new binary process \"%s\"",
ccf->pid.data, ccf->oldpid.data, argv[0]);
ngx_free(env);
ngx_free(var);
return NGX_INVALID_PID;
}
pid = ngx_execute(cycle, &ctx);
if (pid == NGX_INVALID_PID) {
if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data)
== NGX_FILE_ERROR)
{
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
ngx_rename_file_n " %s back to %s failed after "
"an attempt to execute new binary process \"%s\"",
ccf->oldpid.data, ccf->pid.data, argv[0]);
}
}
ngx_free(env);
ngx_free(var);
return pid;
}
static ngx_int_t
ngx_get_options(int argc, char *const *argv)
{
u_char *p;
ngx_int_t i;
for (i = 1; i < argc; i++) {
p = (u_char *) argv[i];
if (*p++ != '-') {
ngx_log_stderr(0, "invalid option: \"%s\"", argv[i]);
return NGX_ERROR;
}
while (*p) {
switch (*p++) {
case '?':
case 'h':
ngx_show_version = 1;
ngx_show_help = 1;
break;
case 'v':
ngx_show_version = 1;
break;
case 'V':
ngx_show_version = 1;
ngx_show_configure = 1;
break;
case 't':
ngx_test_config = 1;
break;
case 'T':
ngx_test_config = 1;
ngx_dump_config = 1;
break;
case 'q':
ngx_quiet_mode = 1;
break;
case 'p':
if (*p) {
ngx_prefix = p;
goto next;
}
if (argv[++i]) {
ngx_prefix = (u_char *) argv[i];
goto next;
}
ngx_log_stderr(0, "option \"-p\" requires directory name");
return NGX_ERROR;
case 'e':
if (*p) {
ngx_error_log = p;
} else if (argv[++i]) {
ngx_error_log = (u_char *) argv[i];
} else {
ngx_log_stderr(0, "option \"-e\" requires file name");
return NGX_ERROR;
}
if (ngx_strcmp(ngx_error_log, "stderr") == 0) {
ngx_error_log = (u_char *) "";
}
goto next;
case 'c':
if (*p) {
ngx_conf_file = p;
goto next;
}
if (argv[++i]) {
ngx_conf_file = (u_char *) argv[i];
goto next;
}
ngx_log_stderr(0, "option \"-c\" requires file name");
return NGX_ERROR;
case 'g':
if (*p) {
ngx_conf_params = p;
goto next;
}
if (argv[++i]) {
ngx_conf_params = (u_char *) argv[i];
goto next;
}
ngx_log_stderr(0, "option \"-g\" requires parameter");
return NGX_ERROR;
case 's':
if (*p) {
ngx_signal = (char *) p;
} else if (argv[++i]) {
ngx_signal = argv[i];
} else {
ngx_log_stderr(0, "option \"-s\" requires parameter");
return NGX_ERROR;
}
if (ngx_strcmp(ngx_signal, "stop") == 0
|| ngx_strcmp(ngx_signal, "quit") == 0
|| ngx_strcmp(ngx_signal, "reopen") == 0
|| ngx_strcmp(ngx_signal, "reload") == 0)
{
ngx_process = NGX_PROCESS_SIGNALLER;
goto next;
}
ngx_log_stderr(0, "invalid option: \"-s %s\"", ngx_signal);
return NGX_ERROR;
default:
ngx_log_stderr(0, "invalid option: \"%c\"", *(p - 1));
return NGX_ERROR;
}
}
next:
continue;
}
return NGX_OK;
}
static ngx_int_t
ngx_save_argv(ngx_cycle_t *cycle, int argc, char *const *argv)
{
#if (NGX_FREEBSD)
ngx_os_argv = (char **) argv;
ngx_argc = argc;
ngx_argv = (char **) argv;
#else
size_t len;
ngx_int_t i;
ngx_os_argv = (char **) argv;
ngx_argc = argc;
ngx_argv = ngx_alloc((argc + 1) * sizeof(char *), cycle->log);
if (ngx_argv == NULL) {
return NGX_ERROR;
}
for (i = 0; i < argc; i++) {
len = ngx_strlen(argv[i]) + 1;
ngx_argv[i] = ngx_alloc(len, cycle->log);
if (ngx_argv[i] == NULL) {
return NGX_ERROR;
}
(void) ngx_cpystrn((u_char *) ngx_argv[i], (u_char *) argv[i], len);
}
ngx_argv[i] = NULL;
#endif
ngx_os_environ = environ;
return NGX_OK;
}
static ngx_int_t
ngx_process_options(ngx_cycle_t *cycle)
{
u_char *p;
size_t len;
if (ngx_prefix) {
len = ngx_strlen(ngx_prefix);
p = ngx_prefix;
if (len && !ngx_path_separator(p[len - 1])) {
p = ngx_pnalloc(cycle->pool, len + 1);
if (p == NULL) {
return NGX_ERROR;
}
ngx_memcpy(p, ngx_prefix, len);
p[len++] = '/';
}
cycle->conf_prefix.len = len;
cycle->conf_prefix.data = p;
cycle->prefix.len = len;
cycle->prefix.data = p;
} else {
#ifndef NGX_PREFIX
p = ngx_pnalloc(cycle->pool, NGX_MAX_PATH);
if (p == NULL) {
return NGX_ERROR;
}
if (ngx_getcwd(p, NGX_MAX_PATH) == 0) {
ngx_log_stderr(ngx_errno, "[emerg]: " ngx_getcwd_n " failed");
return NGX_ERROR;
}
len = ngx_strlen(p);
p[len++] = '/';
cycle->conf_prefix.len = len;
cycle->conf_prefix.data = p;
cycle->prefix.len = len;
cycle->prefix.data = p;
#else
#ifdef NGX_CONF_PREFIX
ngx_str_set(&cycle->conf_prefix, NGX_CONF_PREFIX);
#else
ngx_str_set(&cycle->conf_prefix, NGX_PREFIX);
#endif
ngx_str_set(&cycle->prefix, NGX_PREFIX);
#endif
}
if (ngx_conf_file) {
cycle->conf_file.len = ngx_strlen(ngx_conf_file);
cycle->conf_file.data = ngx_conf_file;
} else {
ngx_str_set(&cycle->conf_file, NGX_CONF_PATH);
}
if (ngx_conf_full_name(cycle, &cycle->conf_file, 0) != NGX_OK) {
return NGX_ERROR;
}
for (p = cycle->conf_file.data + cycle->conf_file.len - 1;
p > cycle->conf_file.data;
p--)
{
if (ngx_path_separator(*p)) {
cycle->conf_prefix.len = p - cycle->conf_file.data + 1;
cycle->conf_prefix.data = cycle->conf_file.data;
break;
}
}
if (ngx_error_log) {
cycle->error_log.len = ngx_strlen(ngx_error_log);
cycle->error_log.data = ngx_error_log;
} else {
ngx_str_set(&cycle->error_log, NGX_ERROR_LOG_PATH);
}
if (ngx_conf_params) {
cycle->conf_param.len = ngx_strlen(ngx_conf_params);
cycle->conf_param.data = ngx_conf_params;
}
if (ngx_test_config) {
cycle->log->log_level = NGX_LOG_INFO;
}
return NGX_OK;
}
static void *
ngx_core_module_create_conf(ngx_cycle_t *cycle)
{
ngx_core_conf_t *ccf;
ccf = ngx_pcalloc(cycle->pool, sizeof(ngx_core_conf_t));
if (ccf == NULL) {
return NULL;
}
/*
* set by ngx_pcalloc()
*
* ccf->pid = NULL;
* ccf->oldpid = NULL;
* ccf->priority = 0;
* ccf->cpu_affinity_auto = 0;
* ccf->cpu_affinity_n = 0;
* ccf->cpu_affinity = NULL;
*/
ccf->daemon = NGX_CONF_UNSET;
ccf->master = NGX_CONF_UNSET;
ccf->timer_resolution = NGX_CONF_UNSET_MSEC;
ccf->shutdown_timeout = NGX_CONF_UNSET_MSEC;
ccf->worker_processes = NGX_CONF_UNSET;
ccf->debug_points = NGX_CONF_UNSET;
ccf->rlimit_nofile = NGX_CONF_UNSET;
ccf->rlimit_core = NGX_CONF_UNSET;
ccf->user = (ngx_uid_t) NGX_CONF_UNSET_UINT;
ccf->group = (ngx_gid_t) NGX_CONF_UNSET_UINT;
if (ngx_array_init(&ccf->env, cycle->pool, 1, sizeof(ngx_str_t))
!= NGX_OK)
{
return NULL;
}
return ccf;
}
static char *
ngx_core_module_init_conf(ngx_cycle_t *cycle, void *conf)
{
ngx_core_conf_t *ccf = conf;
ngx_conf_init_value(ccf->daemon, 1);
ngx_conf_init_value(ccf->master, 1);
ngx_conf_init_msec_value(ccf->timer_resolution, 0);
ngx_conf_init_msec_value(ccf->shutdown_timeout, 0);
ngx_conf_init_value(ccf->worker_processes, 1);
ngx_conf_init_value(ccf->debug_points, 0);
#if (NGX_HAVE_CPU_AFFINITY)
if (!ccf->cpu_affinity_auto
&& ccf->cpu_affinity_n
&& ccf->cpu_affinity_n != 1
&& ccf->cpu_affinity_n != (ngx_uint_t) ccf->worker_processes)
{
ngx_log_error(NGX_LOG_WARN, cycle->log, 0,
"the number of \"worker_processes\" is not equal to "
"the number of \"worker_cpu_affinity\" masks, "
"using last mask for remaining worker processes");
}
#endif
if (ccf->pid.len == 0) {
ngx_str_set(&ccf->pid, NGX_PID_PATH);
}
if (ngx_conf_full_name(cycle, &ccf->pid, 0) != NGX_OK) {
return NGX_CONF_ERROR;
}
ccf->oldpid.len = ccf->pid.len + sizeof(NGX_OLDPID_EXT);
ccf->oldpid.data = ngx_pnalloc(cycle->pool, ccf->oldpid.len);
if (ccf->oldpid.data == NULL) {
return NGX_CONF_ERROR;
}
ngx_memcpy(ngx_cpymem(ccf->oldpid.data, ccf->pid.data, ccf->pid.len),
NGX_OLDPID_EXT, sizeof(NGX_OLDPID_EXT));
#if !(NGX_WIN32)
if (ccf->user == (uid_t) NGX_CONF_UNSET_UINT && geteuid() == 0) {
struct group *grp;
struct passwd *pwd;
ngx_set_errno(0);
pwd = getpwnam(NGX_USER);
if (pwd == NULL) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"getpwnam(\"" NGX_USER "\") failed");
return NGX_CONF_ERROR;
}
ccf->username = NGX_USER;
ccf->user = pwd->pw_uid;
ngx_set_errno(0);
grp = getgrnam(NGX_GROUP);
if (grp == NULL) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"getgrnam(\"" NGX_GROUP "\") failed");
return NGX_CONF_ERROR;
}
ccf->group = grp->gr_gid;
}
if (ccf->lock_file.len == 0) {
ngx_str_set(&ccf->lock_file, NGX_LOCK_PATH);
}
if (ngx_conf_full_name(cycle, &ccf->lock_file, 0) != NGX_OK) {
return NGX_CONF_ERROR;
}
{
ngx_str_t lock_file;
lock_file = cycle->old_cycle->lock_file;
if (lock_file.len) {
lock_file.len--;
if (ccf->lock_file.len != lock_file.len
|| ngx_strncmp(ccf->lock_file.data, lock_file.data, lock_file.len)
!= 0)
{
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
"\"lock_file\" could not be changed, ignored");
}
cycle->lock_file.len = lock_file.len + 1;
lock_file.len += sizeof(".accept");
cycle->lock_file.data = ngx_pstrdup(cycle->pool, &lock_file);
if (cycle->lock_file.data == NULL) {
return NGX_CONF_ERROR;
}
} else {
cycle->lock_file.len = ccf->lock_file.len + 1;
cycle->lock_file.data = ngx_pnalloc(cycle->pool,
ccf->lock_file.len + sizeof(".accept"));
if (cycle->lock_file.data == NULL) {
return NGX_CONF_ERROR;
}
ngx_memcpy(ngx_cpymem(cycle->lock_file.data, ccf->lock_file.data,
ccf->lock_file.len),
".accept", sizeof(".accept"));
}
}
#endif
return NGX_CONF_OK;
}
static char *
ngx_set_user(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
#if (NGX_WIN32)
ngx_conf_log_error(NGX_LOG_WARN, cf, 0,
"\"user\" is not supported, ignored");
return NGX_CONF_OK;
#else
ngx_core_conf_t *ccf = conf;
char *group;
struct passwd *pwd;
struct group *grp;
ngx_str_t *value;
if (ccf->user != (uid_t) NGX_CONF_UNSET_UINT) {
return "is duplicate";
}
if (geteuid() != 0) {
ngx_conf_log_error(NGX_LOG_WARN, cf, 0,
"the \"user\" directive makes sense only "
"if the master process runs "
"with super-user privileges, ignored");
return NGX_CONF_OK;
}
value = cf->args->elts;
ccf->username = (char *) value[1].data;
ngx_set_errno(0);
pwd = getpwnam((const char *) value[1].data);
if (pwd == NULL) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, ngx_errno,
"getpwnam(\"%s\") failed", value[1].data);
return NGX_CONF_ERROR;
}
ccf->user = pwd->pw_uid;
group = (char *) ((cf->args->nelts == 2) ? value[1].data : value[2].data);
ngx_set_errno(0);
grp = getgrnam(group);
if (grp == NULL) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, ngx_errno,
"getgrnam(\"%s\") failed", group);
return NGX_CONF_ERROR;
}
ccf->group = grp->gr_gid;
return NGX_CONF_OK;
#endif
}
static char *
ngx_set_env(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_core_conf_t *ccf = conf;
ngx_str_t *value, *var;
ngx_uint_t i;
var = ngx_array_push(&ccf->env);
if (var == NULL) {
return NGX_CONF_ERROR;
}
value = cf->args->elts;
*var = value[1];
for (i = 0; i < value[1].len; i++) {
if (value[1].data[i] == '=') {
var->len = i;
return NGX_CONF_OK;
}
}
return NGX_CONF_OK;
}
static char *
ngx_set_priority(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_core_conf_t *ccf = conf;
ngx_str_t *value;
ngx_uint_t n, minus;
if (ccf->priority != 0) {
return "is duplicate";
}
value = cf->args->elts;
if (value[1].data[0] == '-') {
n = 1;
minus = 1;
} else if (value[1].data[0] == '+') {
n = 1;
minus = 0;
} else {
n = 0;
minus = 0;
}
ccf->priority = ngx_atoi(&value[1].data[n], value[1].len - n);
if (ccf->priority == NGX_ERROR) {
return "invalid number";
}
if (minus) {
ccf->priority = -ccf->priority;
}
return NGX_CONF_OK;
}
static char *
ngx_set_cpu_affinity(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
#if (NGX_HAVE_CPU_AFFINITY)
ngx_core_conf_t *ccf = conf;
u_char ch, *p;
ngx_str_t *value;
ngx_uint_t i, n;
ngx_cpuset_t *mask;
if (ccf->cpu_affinity) {
return "is duplicate";
}
mask = ngx_palloc(cf->pool, (cf->args->nelts - 1) * sizeof(ngx_cpuset_t));
if (mask == NULL) {
return NGX_CONF_ERROR;
}
ccf->cpu_affinity_n = cf->args->nelts - 1;
ccf->cpu_affinity = mask;
value = cf->args->elts;
if (ngx_strcmp(value[1].data, "auto") == 0) {
if (cf->args->nelts > 3) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid number of arguments in "
"\"worker_cpu_affinity\" directive");
return NGX_CONF_ERROR;
}
ccf->cpu_affinity_auto = 1;
CPU_ZERO(&mask[0]);
for (i = 0; i < (ngx_uint_t) ngx_min(ngx_ncpu, CPU_SETSIZE); i++) {
CPU_SET(i, &mask[0]);
}
n = 2;
} else {
n = 1;
}
for ( /* void */ ; n < cf->args->nelts; n++) {
if (value[n].len > CPU_SETSIZE) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"\"worker_cpu_affinity\" supports up to %d CPUs only",
CPU_SETSIZE);
return NGX_CONF_ERROR;
}
i = 0;
CPU_ZERO(&mask[n - 1]);
for (p = value[n].data + value[n].len - 1;
p >= value[n].data;
p--)
{
ch = *p;
if (ch == ' ') {
continue;
}
i++;
if (ch == '0') {
continue;
}
if (ch == '1') {
CPU_SET(i - 1, &mask[n - 1]);
continue;
}
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid character \"%c\" in \"worker_cpu_affinity\"",
ch);
return NGX_CONF_ERROR;
}
}
#else
ngx_conf_log_error(NGX_LOG_WARN, cf, 0,
"\"worker_cpu_affinity\" is not supported "
"on this platform, ignored");
#endif
return NGX_CONF_OK;
}
ngx_cpuset_t *
ngx_get_cpu_affinity(ngx_uint_t n)
{
#if (NGX_HAVE_CPU_AFFINITY)
ngx_uint_t i, j;
ngx_cpuset_t *mask;
ngx_core_conf_t *ccf;
static ngx_cpuset_t result;
ccf = (ngx_core_conf_t *) ngx_get_conf(ngx_cycle->conf_ctx,
ngx_core_module);
if (ccf->cpu_affinity == NULL) {
return NULL;
}
if (ccf->cpu_affinity_auto) {
mask = &ccf->cpu_affinity[ccf->cpu_affinity_n - 1];
for (i = 0, j = n; /* void */ ; i++) {
if (CPU_ISSET(i % CPU_SETSIZE, mask) && j-- == 0) {
break;
}
if (i == CPU_SETSIZE && j == n) {
/* empty mask */
return NULL;
}
/* void */
}
CPU_ZERO(&result);
CPU_SET(i % CPU_SETSIZE, &result);
return &result;
}
if (ccf->cpu_affinity_n > n) {
return &ccf->cpu_affinity[n];
}
return &ccf->cpu_affinity[ccf->cpu_affinity_n - 1];
#else
return NULL;
#endif
}
static char *
ngx_set_worker_processes(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_str_t *value;
ngx_core_conf_t *ccf;
ccf = (ngx_core_conf_t *) conf;
if (ccf->worker_processes != NGX_CONF_UNSET) {
return "is duplicate";
}
value = cf->args->elts;
if (ngx_strcmp(value[1].data, "auto") == 0) {
ccf->worker_processes = ngx_ncpu;
return NGX_CONF_OK;
}
ccf->worker_processes = ngx_atoi(value[1].data, value[1].len);
if (ccf->worker_processes == NGX_ERROR) {
return "invalid value";
}
return NGX_CONF_OK;
}
static char *
ngx_load_module(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
#if (NGX_HAVE_DLOPEN)
void *handle;
char **names, **order;
ngx_str_t *value, file;
ngx_uint_t i;
ngx_module_t *module, **modules;
ngx_pool_cleanup_t *cln;
if (cf->cycle->modules_used) {
return "is specified too late";
}
value = cf->args->elts;
file = value[1];
if (ngx_conf_full_name(cf->cycle, &file, 0) != NGX_OK) {
return NGX_CONF_ERROR;
}
cln = ngx_pool_cleanup_add(cf->cycle->pool, 0);
if (cln == NULL) {
return NGX_CONF_ERROR;
}
handle = ngx_dlopen(file.data);
if (handle == NULL) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
ngx_dlopen_n " \"%s\" failed (%s)",
file.data, ngx_dlerror());
return NGX_CONF_ERROR;
}
cln->handler = ngx_unload_module;
cln->data = handle;
modules = ngx_dlsym(handle, "ngx_modules");
if (modules == NULL) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
ngx_dlsym_n " \"%V\", \"%s\" failed (%s)",
&value[1], "ngx_modules", ngx_dlerror());
return NGX_CONF_ERROR;
}
names = ngx_dlsym(handle, "ngx_module_names");
if (names == NULL) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
ngx_dlsym_n " \"%V\", \"%s\" failed (%s)",
&value[1], "ngx_module_names", ngx_dlerror());
return NGX_CONF_ERROR;
}
order = ngx_dlsym(handle, "ngx_module_order");
for (i = 0; modules[i]; i++) {
module = modules[i];
module->name = names[i];
if (ngx_add_module(cf, &file, module, order) != NGX_OK) {
return NGX_CONF_ERROR;
}
ngx_log_debug2(NGX_LOG_DEBUG_CORE, cf->log, 0, "module: %s i:%ui",
module->name, module->index);
}
return NGX_CONF_OK;
#else
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"\"load_module\" is not supported "
"on this platform");
return NGX_CONF_ERROR;
#endif
}
#if (NGX_HAVE_DLOPEN)
static void
ngx_unload_module(void *data)
{
void *handle = data;
if (ngx_dlclose(handle) != 0) {
ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0,
ngx_dlclose_n " failed (%s)", ngx_dlerror());
}
}
#endif
|
c
|
github
|
https://github.com/nginx/nginx
|
src/core/nginx.c
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.statistics;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.Syncable;
import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics;
/**
* An extension of {@code BufferedOutputStream} which implements
* {@link IOStatisticsSource} and forwards requests for the
* {@link IOStatistics} to the wrapped stream.
*
* This should be used when any output stream needs buffering while
* allowing the inner stream to be a source of statistics.
*
* It also implements {@link StreamCapabilities}
* and {@link Syncable} and forwards to to the inner stream,
* if possible.
*/
public class BufferedIOStatisticsOutputStream
extends BufferedOutputStream
implements IOStatisticsSource, Syncable, StreamCapabilities {
/**
* Should calls to Syncable downgrade to flush if the underlying
* stream does not support it?
* While that breaks a core contract requirement of Syncable:
* "Sync.sync() guarantees durability", downgrading is
* the default behavior of FsDataOutputStream.
*/
private final boolean downgradeSyncable;
/**
* Construct with default buffer size.
* @param out output stream to buffer
* @param downgradeSyncable should Syncable calls downgrade?
*/
public BufferedIOStatisticsOutputStream(
final OutputStream out,
final boolean downgradeSyncable) {
super(out);
this.downgradeSyncable = downgradeSyncable;
}
/**
* Construct with custom buffer size.
*
* @param out output stream to buffer
* @param size buffer.
* @param downgradeSyncable should Syncable calls downgrade?
*/
public BufferedIOStatisticsOutputStream(
final OutputStream out,
final int size,
final boolean downgradeSyncable) {
super(out, size);
this.downgradeSyncable = downgradeSyncable;
}
/**
* Ask the inner stream for their IOStatistics.
* @return any IOStatistics offered by the inner stream.
*/
@Override
public IOStatistics getIOStatistics() {
return retrieveIOStatistics(out);
}
/**
* If the inner stream supports {@link StreamCapabilities},
* forward the probe to it.
* Otherwise: return false.
*
* @param capability string to query the stream support for.
* @return true if a capability is known to be supported.
*/
@Override
public boolean hasCapability(final String capability) {
if (out instanceof StreamCapabilities) {
return ((StreamCapabilities) out).hasCapability(capability);
} else {
return false;
}
}
/**
* If the inner stream is Syncable, flush the buffer and then
* invoke the inner stream's hflush() operation.
*
* Otherwise: throw an exception, unless the stream was constructed with
* {@link #downgradeSyncable} set to true, in which case the stream
* is just flushed.
* @throws IOException IO Problem
* @throws UnsupportedOperationException if the inner class is not syncable
*/
@Override
public void hflush() throws IOException {
if (out instanceof Syncable) {
flush();
((Syncable) out).hflush();
} else {
if (!downgradeSyncable) {
throw new UnsupportedOperationException("hflush not supported by "
+ out);
} else {
flush();
}
}
}
/**
* If the inner stream is Syncable, flush the buffer and then
* invoke the inner stream's hsync() operation.
*
* Otherwise: throw an exception, unless the stream was constructed with
* {@link #downgradeSyncable} set to true, in which case the stream
* is just flushed.
* @throws IOException IO Problem
* @throws UnsupportedOperationException if the inner class is not syncable
*/
@Override
public void hsync() throws IOException {
if (out instanceof Syncable) {
flush();
((Syncable) out).hsync();
} else {
if (!downgradeSyncable) {
throw new UnsupportedOperationException("hsync not supported by "
+ out);
} else {
flush();
}
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsOutputStream.java
|
from tempfile import NamedTemporaryFile
import pytest
import numpy as np
from numpy._core._multiarray_umath import (
_discover_array_parameters as discover_array_params,
_get_sfloat_dtype,
)
from numpy.testing import assert_array_equal
SF = _get_sfloat_dtype()
class TestSFloat:
def _get_array(self, scaling, aligned=True):
if not aligned:
a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:]
a = a.view(np.float64)
a[:] = [1., 2., 3.]
else:
a = np.array([1., 2., 3.])
a *= 1. / scaling # the casting code also uses the reciprocal.
return a.view(SF(scaling))
def test_sfloat_rescaled(self):
sf = SF(1.)
sf2 = sf.scaled_by(2.)
assert sf2.get_scaling() == 2.
sf6 = sf2.scaled_by(3.)
assert sf6.get_scaling() == 6.
def test_class_discovery(self):
# This does not test much, since we always discover the scaling as 1.
# But most of NumPy (when writing) does not understand DType classes
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
assert dt == SF(1.)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_scaled_float_from_floats(self, scaling):
a = np.array([1., 2., 3.], dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
def test_repr(self):
# Check the repr, mainly to cover the code paths:
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
def test_dtype_str(self):
assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)"
def test_dtype_name(self):
assert SF(1.).name == "_ScaledFloatTestDType64"
def test_sfloat_structured_dtype_printing(self):
dt = np.dtype([("id", int), ("value", SF(0.5))])
# repr of structured dtypes need special handling because the
# implementation bypasses the object repr
assert "('value', '_ScaledFloatTestDType64')" in repr(dt)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_from_float(self, scaling):
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
@pytest.mark.parametrize("aligned", [True, False])
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_getitem(self, aligned, scaling):
a = self._get_array(1., aligned)
assert a.tolist() == [1., 2., 3.]
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_casts(self, aligned):
a = self._get_array(1., aligned)
assert np.can_cast(a, SF(-1.), casting="equiv")
assert not np.can_cast(a, SF(-1.), casting="no")
na = a.astype(SF(-1.))
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
assert np.can_cast(a, SF(2.), casting="same_kind")
assert not np.can_cast(a, SF(2.), casting="safe")
a2 = a.astype(SF(2.))
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_cast_internal_errors(self, aligned):
a = self._get_array(2e300, aligned)
with pytest.raises(TypeError,
match="error raised inside the core-loop: non-finite factor!"):
a.astype(SF(2e-300))
def test_sfloat_promotion(self):
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
# Float64 -> SF(1.) and then promotes normally, so both of this work:
assert np.result_type(SF(3.), np.float64) == SF(3.)
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
# Test an undefined promotion:
with pytest.raises(TypeError):
np.result_type(SF(1.), np.int64)
def test_basic_multiply(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a * b
# multiplies dtype scaling and content separately:
assert res.dtype.get_scaling() == 8.
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
def test_possible_and_impossible_reduce(self):
# For reductions to work, the first and last operand must have the
# same dtype. For this parametric DType that is not necessarily true.
a = self._get_array(2.)
# Addition reduction works (as of writing requires to pass initial
# because setting a scaled-float from the default `0` fails).
res = np.add.reduce(a, initial=0.)
assert res == a.astype(np.float64).sum()
# But each multiplication changes the factor, so a reduction is not
# possible (the relaxed version of the old refusal to handle any
# flexible dtype).
with pytest.raises(TypeError,
match="the resolved dtypes are not compatible"):
np.multiply.reduce(a)
def test_basic_ufunc_at(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
float_b = b.view(np.float64).copy()
np.multiply.at(float_b, [1, 1, 1], float_a)
np.multiply.at(b, [1, 1, 1], float_a)
assert_array_equal(b.view(np.float64), float_b)
def test_basic_multiply_promotion(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
res1 = float_a * b
res2 = b * float_a
# one factor is one, so we get the factor of b:
assert res1.dtype == res2.dtype == b.dtype
expected_view = float_a * b.view(np.float64)
assert_array_equal(res1.view(np.float64), expected_view)
assert_array_equal(res2.view(np.float64), expected_view)
# Check that promotion works when `out` is used:
np.multiply(b, float_a, out=res2)
with pytest.raises(TypeError):
# The promoter accepts this (maybe it should not), but the SFloat
# result cannot be cast to integer:
np.multiply(b, float_a, out=np.arange(3))
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a + b
# addition uses the type promotion rules for the result:
assert res.dtype == np.result_type(a.dtype, b.dtype)
expected_view = (a.astype(res.dtype).view(np.float64) +
b.astype(res.dtype).view(np.float64))
assert_array_equal(res.view(np.float64), expected_view)
def test_addition_cast_safety(self):
"""The addition method is special for the scaled float, because it
includes the "cast" between different factors, thus cast-safety
is influenced by the implementation.
"""
a = self._get_array(2.)
b = self._get_array(-2.)
c = self._get_array(3.)
# sign change is "equiv":
np.add(a, b, casting="equiv")
with pytest.raises(TypeError):
np.add(a, b, casting="no")
# Different factor is "same_kind" (default) so check that "safe" fails
with pytest.raises(TypeError):
np.add(a, c, casting="safe")
# Check that casting the output fails also (done by the ufunc here)
with pytest.raises(TypeError):
np.add(a, a, out=c, casting="safe")
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_casts_to_bool(self, ufunc):
a = self._get_array(2.)
a[0] = 0. # make sure first element is considered False.
float_equiv = a.astype(float)
expected = ufunc(float_equiv, float_equiv)
res = ufunc(a, a)
assert_array_equal(res, expected)
# also check that the same works for reductions:
expected = ufunc.reduce(float_equiv)
res = ufunc.reduce(a)
assert_array_equal(res, expected)
# The output casting does not match the bool, bool -> bool loop:
with pytest.raises(TypeError):
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
def test_wrapped_and_wrapped_reductions(self):
a = self._get_array(2.)
float_equiv = a.astype(float)
expected = np.hypot(float_equiv, float_equiv)
res = np.hypot(a, a)
assert res.dtype == a.dtype
res_float = res.view(np.float64) * 2
assert_array_equal(res_float, expected)
# Also check reduction (keepdims, due to incorrect getitem)
res = np.hypot.reduce(a, keepdims=True)
assert res.dtype == a.dtype
expected = np.hypot.reduce(float_equiv, keepdims=True)
assert res.view(np.float64) * 2 == expected
def test_sort(self):
a = self._get_array(1.)
a = a[::-1] # reverse it
a.sort()
assert_array_equal(a.view(np.float64), [1., 2., 3.])
a = self._get_array(1.)
a = a[::-1] # reverse it
sorted_a = np.sort(a)
assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.])
# original is unchanged
assert_array_equal(a.view(np.float64), [3., 2., 1.])
a = self._get_array(0.5) # different factor
a = a[::2][::-1] # non-contiguous
sorted_a = np.sort(a)
assert_array_equal(sorted_a.view(np.float64), [2., 6.])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 2.])
a = self._get_array(0.5, aligned=False)
a = a[::-1] # reverse it
sorted_a = np.sort(a)
assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
sorted_a = np.sort(a, stable=True)
assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
sorted_a = np.sort(a, stable=False)
assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
def test_argsort(self):
a = self._get_array(1.)
a = a[::-1] # reverse it
indices = np.argsort(a)
assert_array_equal(indices, [2, 1, 0])
# original is unchanged
assert_array_equal(a.view(np.float64), [3., 2., 1.])
a = self._get_array(0.5)
a = a[::2][::-1] # reverse it
indices = np.argsort(a)
assert_array_equal(indices, [1, 0])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 2.])
a = self._get_array(0.5, aligned=False)
a = a[::-1] # reverse it
indices = np.argsort(a)
assert_array_equal(indices, [2, 1, 0])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
sorted_indices = np.argsort(a, stable=True)
assert_array_equal(sorted_indices, [2, 1, 0])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
sorted_indices = np.argsort(a, stable=False)
assert_array_equal(sorted_indices, [2, 1, 0])
# original is unchanged
assert_array_equal(a.view(np.float64), [6., 4., 2.])
def test_astype_class(self):
# Very simple test that we accept `.astype()` also on the class.
# ScaledFloat always returns the default descriptor, but it does
# check the relevant code paths.
arr = np.array([1., 2., 3.], dtype=object)
res = arr.astype(SF) # passing the class class
expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
assert_array_equal(res.view(np.float64), expected.view(np.float64))
def test_creation_class(self):
# passing in a dtype class should return
# the default descriptor
arr1 = np.array([1., 2., 3.], dtype=SF)
assert arr1.dtype == SF(1.)
arr2 = np.array([1., 2., 3.], dtype=SF(1.))
assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
assert arr1.dtype == arr2.dtype
assert np.empty(3, dtype=SF).dtype == SF(1.)
assert np.empty_like(arr1, dtype=SF).dtype == SF(1.)
assert np.zeros(3, dtype=SF).dtype == SF(1.)
assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.)
@pytest.mark.thread_unsafe(
reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)"
)
def test_np_save_load(self):
# this monkeypatch is needed because pickle
# uses the repr of a type to reconstruct it
np._ScaledFloatTestDType = SF
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
# adapted from RoundtripTest.roundtrip in np.save tests
with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f:
with pytest.warns(UserWarning) as record:
np.savez(f.name, arr)
assert len(record) == 1
with np.load(f.name, allow_pickle=True) as data:
larr = data["arr_0"]
assert_array_equal(arr.view(np.float64), larr.view(np.float64))
assert larr.dtype == arr.dtype == SF(1.0)
del np._ScaledFloatTestDType
def test_flatiter(self):
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
for i, val in enumerate(arr.flat):
assert arr[i] == val
@pytest.mark.parametrize(
"index", [
[1, 2], ..., slice(None, 2, None),
np.array([True, True, False]), np.array([0, 1])
], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"])
def test_flatiter_index(self, index):
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
np.testing.assert_array_equal(
arr[index].view(np.float64), arr.flat[index].view(np.float64))
arr2 = arr.copy()
arr[index] = 5.0
arr2.flat[index] = 5.0
np.testing.assert_array_equal(
arr.view(np.float64), arr2.view(np.float64))
@pytest.mark.thread_unsafe(
reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)"
)
def test_type_pickle():
# can't actually unpickle, but we can pickle (if in namespace)
import pickle
np._ScaledFloatTestDType = SF
s = pickle.dumps(SF)
res = pickle.loads(s)
assert res is SF
del np._ScaledFloatTestDType
def test_is_numeric():
assert SF._is_numeric
|
python
|
github
|
https://github.com/numpy/numpy
|
numpy/_core/tests/test_custom_dtypes.py
|
from __future__ import print_function
import os
import numpy as np
try:
import vtk
from vtk.util.numpy_support import vtk_to_numpy
except BaseException:
pass
def calculate_vtk_max_pointwise_difference(file1, file2, tol=1e-6):
arrays = [0] * 2
reader = vtk.vtkStructuredPointsReader()
for i, fname in enumerate([file1, file2]):
reader.SetFileName(fname)
reader.Update()
data = reader.GetOutput().GetPointData()
arrays[i] = np.array([vtk_to_numpy(data.GetArray(n))
for n in range(data.GetNumberOfArrays())])
try:
return np.allclose(
arrays[0], arrays[1], rtol=0, atol=tol), np.max(
np.abs(
arrays[0] - arrays[1]))
except BaseException:
return False, np.inf
try:
import vtk
from vtk.util.numpy_support import vtk_to_numpy
def calculate_vtk_max_pointwise_difference(file1, file2, tol=1e-6):
arrays = [0] * 2
reader = vtk.vtkStructuredPointsReader()
for i, fname in enumerate([file1, file2]):
reader.SetFileName(fname)
reader.Update()
data = reader.GetOutput().GetPointData()
arrays[i] = np.array([vtk_to_numpy(data.GetArray(n))
for n in range(data.GetNumberOfArrays())])
try:
return np.allclose(
arrays[0], arrays[1], rtol=0, atol=tol), np.max(
np.abs(
arrays[0] - arrays[1]))
except BaseException:
return False, np.inf
except BaseException:
pass
def params_match(inParams, outParams):
"""Check, if the parameters set and gotten back match.
Only check keys present in inParams.
"""
for k in list(inParams.keys()):
if k not in outParams:
print(k, "missing from returned parameters")
return False
if type(inParams[k]) == float:
if abs(outParams[k] -inParams[k])>=1E-14:
print("Mismatch in parameter ", k, inParams[k], outParams[k],type(inParams[k]),type(outParams[k]),abs(inParams[k]-outParams[k]))
return False
else:
if outParams[k] !=inParams[k]:
print("Mismatch in parameter ", k, inParams[k], outParams[k],type(inParams[k]),type(outParams[k]))
return False
return True
def generate_test_for_class(_system, _interClass, _params):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
2nd: Name of the interaction property to set (i.e. "P3M")
"""
params = _params
interClass = _interClass
system = _system
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# set Parameter
Inter = interClass(**params)
Inter.validate_params()
system.actors.add(Inter)
# Read them out again
outParams = Inter.get_params()
del system.actors[0]
self.assertTrue(
params_match(
params,
outParams),
"Missmatch of parameters.\nParameters set " +
params.__str__() +
" vs. output parameters " +
outParams.__str__())
return func
def lj_force_vector(v_d, d, lj_params):
"""Returns lj force for distance d and distance vecotr v_d based on the given lj_params.
Supports epsilon and cutoff."""
if d >= lj_params["cutoff"]:
return np.zeros(3)
return 4. * lj_params["epsilon"] * v_d * (-12.0 * d**-14 + 6.0 * d**-8)
def verify_lj_forces(system, tolerance, ids_to_skip=[]):
"""Goes over all pairs of paritcles in system and compares the forces on them
to what would be expected based on the systems lj parametes.
Particle ids listed in ids_to_skip are not checked
Do not run this with a thermostat enabled."""
# Initialize dict with expected forces
f_expected = {}
for id in system.part[:].id:
f_expected[id] = np.zeros(3)
# Cache some stuff to speed up pair loop
dist_vec=system.distance_vec
norm=np.linalg.norm
non_bonded_inter=system.non_bonded_inter
# lj parameters
lj_params={}
all_types=np.unique(system.part[:].type)
for i in all_types:
for j in all_types:
lj_params[i,j]=non_bonded_inter[int(i),int(j)].lennard_jones.get_params()
# Go over all pairs of particles
for pair in system.part.pairs():
p0=pair[0]
p1=pair[1]
if p0.id in ids_to_skip or p1.id in ids_to_skip:
continue
# Distance and distance vec
v_d = dist_vec(p0,p1)
d = norm(v_d)
# calc and add expected lj force
f = lj_force_vector(v_d, d, lj_params[p0.type,p1.type])
f_expected[p0.id] += f
f_expected[p1.id] -= f
# Check actual forces agaisnt expected
for id in system.part[:].id:
if id in ids_to_skip:
continue
if np.linalg.norm(system.part[id].f - f_expected[id]) >= tolerance:
raise Exception("LJ force verification failed on particle " +
str(id) +
". Got " +
str(system.part[id].f) +
", expected " +
str(f_expected[id]))
def abspath(path):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
def transform_pos_from_cartesian_to_polar_coordinates(pos):
"""Transform the given cartesian coordinates to polar coordinates.
Parameters
----------
pos : array_like :obj:`float`
``x``, ``y``, and ``z``-component of the cartesian position.
Returns
-------
array_like
The given position in polar coordinates.
"""
return np.array([np.sqrt(pos[0]**2.0 + pos[1]**2.0), np.arctan2(pos[1], pos[0]), pos[2]])
def transform_vel_from_cartesian_to_polar_coordinates(pos, vel):
"""Transform the given cartesian velocities to polar velocities.
Parameters
----------
pos : array_like :obj:`float`
``x``, ``y``, and ``z``-component of the cartesian position.
vel : array_like :obj:`float`
``x``, ``y``, and ``z``-component of the cartesian velocity.
"""
return np.array([
(pos[0] * vel[0] + pos[1] *
vel[1]) / np.sqrt(pos[0]**2.0 + pos[1]**2.0),\
(pos[0] * vel[1] - pos[1] *
vel[0]) / (pos[0]**2.0 + pos[1]**2.0),\
vel[2]])
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
Parameters
----------
axis : array_like :obj:`float`
Axis to rotate around.
theta : :obj:`float`
Rotation angle.
"""
axis = np.asarray(axis)
axis = axis/np.sqrt(np.dot(axis, axis))
a = np.cos(theta/2.0)
b, c, d = -axis*np.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def get_cylindrical_bin_volume(
n_r_bins,
n_phi_bins,
n_z_bins,
min_r,
max_r,
min_phi,
max_phi,
min_z,
max_z):
"""
Return the bin volumes for a cylindrical histogram.
Parameters
----------
n_r_bins : :obj:`float`
Number of bins in ``r`` direction.
n_phi_bins : :obj:`float`
Number of bins in ``phi`` direction.
n_z_bins : :obj:`float`
Number of bins in ``z`` direction.
min_r : :obj:`float`
Minimum considered value in ``r`` direction.
max_r : :obj:`float`
Maximum considered value in ``r`` direction.
min_phi : :obj:`float`
Minimum considered value in ``phi`` direction.
max_phi : :obj:`float`
Maximum considered value in ``phi`` direction.
min_z : :obj:`float`
Minimum considered value in ``z`` direction.
max_z : :obj:`float`
Maximum considered value in ``z`` direction.
Returns
-------
array_like : Bin volumes.
"""
bin_volume = np.zeros(n_r_bins)
r_bin_size = (max_r - min_r) / n_r_bins
phi_bin_size = (max_phi - min_phi) / n_phi_bins
z_bin_size = (max_z - min_z) / n_z_bins
for i in range(n_r_bins):
bin_volume[i] = np.pi * ((min_r + r_bin_size * (i + 1))**2.0 -
(min_r + r_bin_size * i)**2.0) * \
phi_bin_size / (2.0 * np.pi) * z_bin_size
return bin_volume
#
# Analytical Expressions for interactions
#
# Harmonic bond
def harmonic_potential(scalar_r, k, r_0, r_cut):
return 0.5 * k * (scalar_r - r_0)**2
def harmonic_force(scalar_r, k, r_0, r_cut):
return -k * (scalar_r - r_0)
# FENE bond
def fene_potential(scalar_r, k, d_r_max, r_0):
return -0.5 * k * d_r_max**2 * np.log(1 - ((scalar_r - r_0) / d_r_max)**2)
def fene_force(scalar_r, k, d_r_max, r_0):
return k * (scalar_r - r_0) * d_r_max**2 / ((scalar_r - r_0)**2 - d_r_max**2)
def fene_force2(bond_vector, k, d_r_max, r_0):
r=np.linalg.norm(bond_vector)
return k*(r-r_0)/(r*(1-((r-r_0)/d_r_max)**2))*np.array(bond_vector)
# Generic Lennard-Jones
def lj_generic_potential(r, eps, sig, cutoff, offset=0., shift=0., e1=12., e2=6., b1=4., b2=4., delta=0., lam=1.):
V = 0.
if (r >= offset + cutoff):
V = 0.
else:
# LJGEN_SOFTCORE transformations
rroff = np.sqrt(
np.power(r - offset, 2) + (1 - lam) * delta * sig**2)
V = eps * lam * \
(b1 * np.power(sig / rroff, e1) -
b2 * np.power(sig / rroff, e2) + shift)
return V
def lj_generic_force(espressomd, r, eps, sig, cutoff, offset=0., e1=12, e2=6, b1=4., b2=4., delta=0., lam=1., generic=True):
f = 1.
if (r >= offset + cutoff):
f = 0.
else:
h = (r - offset)**2 + delta * (1. - lam) * sig**2
f = (r - offset) * eps * lam * (
b1 * e1 * np.power(sig / np.sqrt(h), e1) - b2 * e2 * np.power(sig / np.sqrt(h), e2)) / h
if (not espressomd.has_features("LJGEN_SOFTCORE")) and generic:
f *= np.sign(r - offset)
return f
# Lennard-Jones
def lj_potential(r, eps, sig, cutoff, shift, offset=0.):
V = lj_generic_potential(
r, eps, sig, cutoff, offset=offset, shift=shift * 4.)
return V
def lj_force(espressomd, r, eps, sig, cutoff, offset=0.):
f = lj_generic_force(espressomd, r, eps, sig, cutoff, offset=offset, generic=False)
return f
# Lennard-Jones Cosine
def lj_cos_potential(r, eps, sig, cutoff, offset):
V = 0.
r_min = offset + np.power(2., 1. / 6.) * sig
r_cut = cutoff + offset
if (r < r_min):
V = lj_potential(r, eps=eps, sig=sig,
cutoff=cutoff, offset=offset, shift=0.)
elif (r < r_cut):
alpha = np.pi / \
(np.power(r_cut - offset, 2) - np.power(r_min - offset, 2))
beta = np.pi - np.power(r_min - offset, 2) * alpha
V = 0.5 * eps * \
(np.cos(alpha * np.power(r - offset, 2) + beta) - 1.)
return V
def lj_cos_force(espressomd, r, eps, sig, cutoff, offset):
f = 0.
r_min = offset + np.power(2., 1. / 6.) * sig
r_cut = cutoff + offset
if (r < r_min):
f = lj_force(espressomd, r, eps=eps, sig=sig,
cutoff=cutoff, offset=offset)
elif (r < r_cut):
alpha = np.pi / \
(np.power(r_cut - offset, 2) - np.power(r_min - offset, 2))
beta = np.pi - np.power(r_min - offset, 2) * alpha
f = (r - offset) * alpha * eps * \
np.sin(alpha * np.power(r - offset, 2) + beta)
return f
# Lennard-Jones Cosine^2
def lj_cos2_potential(r, eps, sig, offset, width):
V = 0.
r_min = offset + np.power(2., 1. / 6.) * sig
r_cut = r_min + width
if (r < r_min):
V = lj_potential(r, eps=eps, sig=sig,
offset=offset, cutoff=r_cut, shift=0.)
elif (r < r_cut):
V = -eps * np.power(np.cos(np.pi /
(2. * width) * (r - r_min)), 2)
return V
def lj_cos2_force(espressomd, r, eps, sig, offset, width):
f = 0.
r_min = offset + np.power(2., 1. / 6.) * sig
r_cut = r_min + width
if (r < r_min):
f = lj_force(espressomd, r, eps=eps, sig=sig, cutoff=r_cut, offset=offset)
elif (r < r_cut):
f = - np.pi * eps * \
np.sin(np.pi * (r - r_min) / width) / (2. * width)
return f
# Smooth-Step
def smooth_step_potential(r, eps, sig, cutoff, d, n, k0):
V = 0.
if (r < cutoff):
V = np.power(d / r, n) + eps / \
(1 + np.exp(2 * k0 * (r - sig)))
return V
def smooth_step_force(r, eps, sig, cutoff, d, n, k0):
f = 0.
if (r < cutoff):
f = n * d / r**2 * np.power(d / r, n - 1) + 2 * k0 * eps * np.exp(
2 * k0 * (r - sig)) / (1 + np.exp(2 * k0 * (r - sig))**2)
return f
# BMHTF
def bmhtf_potential(r, a, b, c, d, sig, cutoff):
V = 0.
if (r == cutoff):
V = a * np.exp(b * (sig - r)) - c * np.power(
r, -6) - d * np.power(r, -8)
if (r < cutoff):
V = a * np.exp(b * (sig - r)) - c * np.power(
r, -6) - d * np.power(r, -8)
V -= bmhtf_potential(cutoff, a, b, c, d, sig, cutoff)
return V
def bmhtf_force(r, a, b, c, d, sig, cutoff):
f = 0.
if (r < cutoff):
f = a * b * np.exp(b * (sig - r)) - 6 * c * np.power(
r, -7) - 8 * d * np.power(r, -9)
return f
# Morse
def morse_potential(r, eps, alpha, cutoff, rmin=0):
V = 0.
if (r < cutoff):
V = eps * (np.exp(-2. * alpha * (r - rmin)) -
2 * np.exp(-alpha * (r - rmin)))
V -= eps * (np.exp(-2. * alpha * (cutoff - rmin)
) - 2 * np.exp(-alpha * (cutoff - rmin)))
return V
def morse_force(r, eps, alpha, cutoff, rmin=0):
f = 0.
if (r < cutoff):
f = 2. * np.exp((rmin - r) * alpha) * \
(np.exp((rmin - r) * alpha) - 1) * alpha * eps
return f
# Buckingham
def buckingham_potential(r, a, b, c, d, cutoff, discont, shift):
V = 0.
if (r < discont):
m = - buckingham_force(
discont, a, b, c, d, cutoff, discont, shift)
c = buckingham_potential(
discont, a, b, c, d, cutoff, discont, shift) - m * discont
V = m * r + c
if (r >= discont) and (r < cutoff):
V = a * np.exp(- b * r) - c * np.power(
r, -6) - d * np.power(r, -4) + shift
return V
def buckingham_force( r, a, b, c, d, cutoff, discont, shift):
f = 0.
if (r < discont):
f = buckingham_force(
discont, a, b, c, d, cutoff, discont, shift)
if (r >= discont) and (r < cutoff):
f = a * b * np.exp(- b * r) - 6 * c * np.power(
r, -7) - 4 * d * np.power(r, -5)
return f
# Soft-sphere
def soft_sphere_potential(r, a, n, cutoff, offset=0):
V = 0.
if (r < offset + cutoff):
V = a * np.power(r - offset, -n)
return V
def soft_sphere_force(r, a, n, cutoff, offset=0):
f = 0.
if ((r > offset) and (r < offset + cutoff)):
f = n * a * np.power(r - offset, -(n + 1))
return f
# Hertzian
def hertzian_potential(r, eps, sig):
V = 0.
if (r < sig):
V = eps * np.power(1 - r / sig, 5. / 2.)
return V
def hertzian_force(r, eps, sig):
f = 0.
if (r < sig):
f = 5. / 2. * eps / sig * np.power(1 - r / sig, 3. / 2.)
return f
# Gaussian
def gaussian_potential(r, eps, sig, cutoff):
V = 0.
if (r < cutoff):
V = eps * np.exp(-np.power(r / sig, 2) / 2)
return V
def gaussian_force(r, eps, sig, cutoff):
f = 0.
if (r < cutoff):
f = eps * r / sig**2 * np.exp(-np.power(r / sig, 2) / 2)
return f
|
unknown
|
codeparrot/codeparrot-clean
| ||
# frozen_string_literal: true
require "abstract_unit"
class HTMLTest < ActiveSupport::TestCase
test "formats returns symbol for recognized MIME type" do
assert_equal :html, ActionView::Template::HTML.new("", :html).format
end
test "formats returns string for recognized MIME type when MIME does not have symbol" do
foo = Mime::Type.lookup("text/foo")
assert_nil foo.to_sym
assert_equal "text/foo", ActionView::Template::HTML.new("", foo).format
end
test "formats returns string for unknown MIME type" do
assert_equal "foo", ActionView::Template::HTML.new("", "foo").format
end
end
|
ruby
|
github
|
https://github.com/rails/rails
|
actionview/test/template/html_test.rb
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "MisleadingIdentifierCheck.h"
#include "llvm/Support/ConvertUTF.h"
namespace clang::tidy::misc {
// See https://www.unicode.org/Public/14.0.0/ucd/extracted/DerivedBidiClass.txt
static bool isUnassignedAL(llvm::UTF32 CP) {
return (0x0600 <= CP && CP <= 0x07BF) || (0x0860 <= CP && CP <= 0x08FF) ||
(0xFB50 <= CP && CP <= 0xFDCF) || (0xFDF0 <= CP && CP <= 0xFDFF) ||
(0xFE70 <= CP && CP <= 0xFEFF) ||
(0x00010D00 <= CP && CP <= 0x00010D3F) ||
(0x00010F30 <= CP && CP <= 0x00010F6F) ||
(0x0001EC70 <= CP && CP <= 0x0001ECBF) ||
(0x0001ED00 <= CP && CP <= 0x0001ED4F) ||
(0x0001EE00 <= CP && CP <= 0x0001EEFF);
}
// See https://www.unicode.org/Public/14.0.0/ucd/extracted/DerivedBidiClass.txt
static bool isUnassignedR(llvm::UTF32 CP) {
return (0x0590 <= CP && CP <= 0x05FF) || (0x07C0 <= CP && CP <= 0x085F) ||
(0xFB1D <= CP && CP <= 0xFB4F) ||
(0x00010800 <= CP && CP <= 0x00010CFF) ||
(0x00010D40 <= CP && CP <= 0x00010F2F) ||
(0x00010F70 <= CP && CP <= 0x00010FFF) ||
(0x0001E800 <= CP && CP <= 0x0001EC6F) ||
(0x0001ECC0 <= CP && CP <= 0x0001ECFF) ||
(0x0001ED50 <= CP && CP <= 0x0001EDFF) ||
(0x0001EF00 <= CP && CP <= 0x0001EFFF);
}
// See https://www.unicode.org/Public/14.0.0/ucd/extracted/DerivedBidiClass.txt
static bool isR(llvm::UTF32 CP) {
return (CP == 0x0590) || (CP == 0x05BE) || (CP == 0x05C0) || (CP == 0x05C3) ||
(CP == 0x05C6) || (0x05C8 <= CP && CP <= 0x05CF) ||
(0x05D0 <= CP && CP <= 0x05EA) || (0x05EB <= CP && CP <= 0x05EE) ||
(0x05EF <= CP && CP <= 0x05F2) || (0x05F3 <= CP && CP <= 0x05F4) ||
(0x05F5 <= CP && CP <= 0x05FF) || (0x07C0 <= CP && CP <= 0x07C9) ||
(0x07CA <= CP && CP <= 0x07EA) || (0x07F4 <= CP && CP <= 0x07F5) ||
(CP == 0x07FA) || (0x07FB <= CP && CP <= 0x07FC) ||
(0x07FE <= CP && CP <= 0x07FF) || (0x0800 <= CP && CP <= 0x0815) ||
(CP == 0x081A) || (CP == 0x0824) || (CP == 0x0828) ||
(0x082E <= CP && CP <= 0x082F) || (0x0830 <= CP && CP <= 0x083E) ||
(CP == 0x083F) || (0x0840 <= CP && CP <= 0x0858) ||
(0x085C <= CP && CP <= 0x085D) || (CP == 0x085E) || (CP == 0x085F) ||
(CP == 0x200F) || (CP == 0xFB1D) || (0xFB1F <= CP && CP <= 0xFB28) ||
(0xFB2A <= CP && CP <= 0xFB36) || (CP == 0xFB37) ||
(0xFB38 <= CP && CP <= 0xFB3C) || (CP == 0xFB3D) || (CP == 0xFB3E) ||
(CP == 0xFB3F) || (0xFB40 <= CP && CP <= 0xFB41) || (CP == 0xFB42) ||
(0xFB43 <= CP && CP <= 0xFB44) || (CP == 0xFB45) ||
(0xFB46 <= CP && CP <= 0xFB4F) || (0x10800 <= CP && CP <= 0x10805) ||
(0x10806 <= CP && CP <= 0x10807) || (CP == 0x10808) ||
(CP == 0x10809) || (0x1080A <= CP && CP <= 0x10835) ||
(CP == 0x10836) || (0x10837 <= CP && CP <= 0x10838) ||
(0x10839 <= CP && CP <= 0x1083B) || (CP == 0x1083C) ||
(0x1083D <= CP && CP <= 0x1083E) || (0x1083F <= CP && CP <= 0x10855) ||
(CP == 0x10856) || (CP == 0x10857) ||
(0x10858 <= CP && CP <= 0x1085F) || (0x10860 <= CP && CP <= 0x10876) ||
(0x10877 <= CP && CP <= 0x10878) || (0x10879 <= CP && CP <= 0x1087F) ||
(0x10880 <= CP && CP <= 0x1089E) || (0x1089F <= CP && CP <= 0x108A6) ||
(0x108A7 <= CP && CP <= 0x108AF) || (0x108B0 <= CP && CP <= 0x108DF) ||
(0x108E0 <= CP && CP <= 0x108F2) || (CP == 0x108F3) ||
(0x108F4 <= CP && CP <= 0x108F5) || (0x108F6 <= CP && CP <= 0x108FA) ||
(0x108FB <= CP && CP <= 0x108FF) || (0x10900 <= CP && CP <= 0x10915) ||
(0x10916 <= CP && CP <= 0x1091B) || (0x1091C <= CP && CP <= 0x1091E) ||
(0x10920 <= CP && CP <= 0x10939) || (0x1093A <= CP && CP <= 0x1093E) ||
(CP == 0x1093F) || (0x10940 <= CP && CP <= 0x1097F) ||
(0x10980 <= CP && CP <= 0x109B7) || (0x109B8 <= CP && CP <= 0x109BB) ||
(0x109BC <= CP && CP <= 0x109BD) || (0x109BE <= CP && CP <= 0x109BF) ||
(0x109C0 <= CP && CP <= 0x109CF) || (0x109D0 <= CP && CP <= 0x109D1) ||
(0x109D2 <= CP && CP <= 0x109FF) || (CP == 0x10A00) ||
(CP == 0x10A04) || (0x10A07 <= CP && CP <= 0x10A0B) ||
(0x10A10 <= CP && CP <= 0x10A13) || (CP == 0x10A14) ||
(0x10A15 <= CP && CP <= 0x10A17) || (CP == 0x10A18) ||
(0x10A19 <= CP && CP <= 0x10A35) || (0x10A36 <= CP && CP <= 0x10A37) ||
(0x10A3B <= CP && CP <= 0x10A3E) || (0x10A40 <= CP && CP <= 0x10A48) ||
(0x10A49 <= CP && CP <= 0x10A4F) || (0x10A50 <= CP && CP <= 0x10A58) ||
(0x10A59 <= CP && CP <= 0x10A5F) || (0x10A60 <= CP && CP <= 0x10A7C) ||
(0x10A7D <= CP && CP <= 0x10A7E) || (CP == 0x10A7F) ||
(0x10A80 <= CP && CP <= 0x10A9C) || (0x10A9D <= CP && CP <= 0x10A9F) ||
(0x10AA0 <= CP && CP <= 0x10ABF) || (0x10AC0 <= CP && CP <= 0x10AC7) ||
(CP == 0x10AC8) || (0x10AC9 <= CP && CP <= 0x10AE4) ||
(0x10AE7 <= CP && CP <= 0x10AEA) || (0x10AEB <= CP && CP <= 0x10AEF) ||
(0x10AF0 <= CP && CP <= 0x10AF6) || (0x10AF7 <= CP && CP <= 0x10AFF) ||
(0x10B00 <= CP && CP <= 0x10B35) || (0x10B36 <= CP && CP <= 0x10B38) ||
(0x10B40 <= CP && CP <= 0x10B55) || (0x10B56 <= CP && CP <= 0x10B57) ||
(0x10B58 <= CP && CP <= 0x10B5F) || (0x10B60 <= CP && CP <= 0x10B72) ||
(0x10B73 <= CP && CP <= 0x10B77) || (0x10B78 <= CP && CP <= 0x10B7F) ||
(0x10B80 <= CP && CP <= 0x10B91) || (0x10B92 <= CP && CP <= 0x10B98) ||
(0x10B99 <= CP && CP <= 0x10B9C) || (0x10B9D <= CP && CP <= 0x10BA8) ||
(0x10BA9 <= CP && CP <= 0x10BAF) || (0x10BB0 <= CP && CP <= 0x10BFF) ||
(0x10C00 <= CP && CP <= 0x10C48) || (0x10C49 <= CP && CP <= 0x10C7F) ||
(0x10C80 <= CP && CP <= 0x10CB2) || (0x10CB3 <= CP && CP <= 0x10CBF) ||
(0x10CC0 <= CP && CP <= 0x10CF2) || (0x10CF3 <= CP && CP <= 0x10CF9) ||
(0x10CFA <= CP && CP <= 0x10CFF) || (0x10D40 <= CP && CP <= 0x10E5F) ||
(CP == 0x10E7F) || (0x10E80 <= CP && CP <= 0x10EA9) ||
(CP == 0x10EAA) || (CP == 0x10EAD) ||
(0x10EAE <= CP && CP <= 0x10EAF) || (0x10EB0 <= CP && CP <= 0x10EB1) ||
(0x10EB2 <= CP && CP <= 0x10EFF) || (0x10F00 <= CP && CP <= 0x10F1C) ||
(0x10F1D <= CP && CP <= 0x10F26) || (CP == 0x10F27) ||
(0x10F28 <= CP && CP <= 0x10F2F) || (0x10F70 <= CP && CP <= 0x10F81) ||
(0x10F86 <= CP && CP <= 0x10F89) || (0x10F8A <= CP && CP <= 0x10FAF) ||
(0x10FB0 <= CP && CP <= 0x10FC4) || (0x10FC5 <= CP && CP <= 0x10FCB) ||
(0x10FCC <= CP && CP <= 0x10FDF) || (0x10FE0 <= CP && CP <= 0x10FF6) ||
(0x10FF7 <= CP && CP <= 0x10FFF) || (0x1E800 <= CP && CP <= 0x1E8C4) ||
(0x1E8C5 <= CP && CP <= 0x1E8C6) || (0x1E8C7 <= CP && CP <= 0x1E8CF) ||
(0x1E8D7 <= CP && CP <= 0x1E8FF) || (0x1E900 <= CP && CP <= 0x1E943) ||
(CP == 0x1E94B) || (0x1E94C <= CP && CP <= 0x1E94F) ||
(0x1E950 <= CP && CP <= 0x1E959) || (0x1E95A <= CP && CP <= 0x1E95D) ||
(0x1E95E <= CP && CP <= 0x1E95F) || (0x1E960 <= CP && CP <= 0x1EC6F) ||
(0x1ECC0 <= CP && CP <= 0x1ECFF) || (0x1ED50 <= CP && CP <= 0x1EDFF);
}
static bool hasRTLCharacters(StringRef Buffer) {
const char *CurPtr = Buffer.begin();
const char *EndPtr = Buffer.end();
while (CurPtr < EndPtr) {
llvm::UTF32 CodePoint = 0;
const llvm::ConversionResult Result = llvm::convertUTF8Sequence(
reinterpret_cast<const llvm::UTF8 **>(&CurPtr),
reinterpret_cast<const llvm::UTF8 *>(EndPtr), &CodePoint,
llvm::strictConversion);
if (Result != llvm::conversionOK)
break;
if (isUnassignedAL(CodePoint) || isUnassignedR(CodePoint) || isR(CodePoint))
return true;
}
return false;
}
MisleadingIdentifierCheck::MisleadingIdentifierCheck(StringRef Name,
ClangTidyContext *Context)
: ClangTidyCheck(Name, Context) {}
MisleadingIdentifierCheck::~MisleadingIdentifierCheck() = default;
void MisleadingIdentifierCheck::check(
const ast_matchers::MatchFinder::MatchResult &Result) {
if (const auto *ND = Result.Nodes.getNodeAs<NamedDecl>("nameddecl")) {
const IdentifierInfo *II = ND->getIdentifier();
if (II) {
const StringRef NDName = II->getName();
if (hasRTLCharacters(NDName))
diag(ND->getBeginLoc(), "identifier has right-to-left codepoints");
}
}
}
void MisleadingIdentifierCheck::registerMatchers(
ast_matchers::MatchFinder *Finder) {
Finder->addMatcher(ast_matchers::namedDecl().bind("nameddecl"), this);
}
} // namespace clang::tidy::misc
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clang-tidy/misc/MisleadingIdentifierCheck.cpp
|
import azure.cosmos.documents as documents
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.errors as errors
import samples.Shared.config as cfg
# ----------------------------------------------------------------------------------------------------------
# Prerequistes -
#
# 1. An Azure Cosmos account -
# https://docs.microsoft.com/azure/cosmos-db/create-sql-api-python#create-a-database-account
#
# 2. Microsoft Azure Cosmos PyPi package -
# https://pypi.python.org/pypi/azure-cosmos/
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic CRUD operations on a Database resource for Azure Cosmos
#
# 1. Query for Database (QueryDatabases)
#
# 2. Create Database (CreateDatabase)
#
# 3. Get a Database by its Id property (ReadDatabase)
#
# 4. List all Database resources on an account (ReadDatabases)
#
# 5. Delete a Database given its Id property (DeleteDatabase)
# ----------------------------------------------------------------------------------------------------------
HOST = cfg.settings['host']
MASTER_KEY = cfg.settings['master_key']
DATABASE_ID = cfg.settings['database_id']
class IDisposable:
""" A context manager to automatically close an object with a close method
in a with statement. """
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj # bound to target
def __exit__(self, exception_type, exception_val, trace):
# extra cleanup in here
self.obj = None
class DatabaseManagement:
@staticmethod
def find_database(client, id):
print('1. Query for Database')
databases = list(client.QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": id }
]
}))
if len(databases) > 0:
print('Database with id \'{0}\' was found'.format(id))
else:
print('No database with id \'{0}\' was found'. format(id))
@staticmethod
def create_database(client, id):
print("\n2. Create Database")
try:
client.CreateDatabase({"id": id})
print('Database with id \'{0}\' created'.format(id))
except errors.HTTPFailure as e:
if e.status_code == 409:
print('A database with id \'{0}\' already exists'.format(id))
else:
raise
@staticmethod
def read_database(client, id):
print("\n3. Get a Database by id")
try:
# All Azure Cosmos resources are addressable via a link
# This link is constructed from a combination of resource hierachy and
# the resource id.
# Eg. The link for database with an id of Foo would be dbs/Foo
database_link = 'dbs/' + id
database = client.ReadDatabase(database_link)
print('Database with id \'{0}\' was found, it\'s _self is {1}'.format(id, database['_self']))
except errors.HTTPFailure as e:
if e.status_code == 404:
print('A database with id \'{0}\' does not exist'.format(id))
else:
raise
@staticmethod
def list_databases(client):
print("\n4. List all Databases on an account")
print('Databases:')
databases = list(client.ReadDatabases())
if not databases:
return
for database in databases:
print(database['id'])
@staticmethod
def delete_database(client, id):
print("\n5. Delete Database")
try:
database_link = 'dbs/' + id
client.DeleteDatabase(database_link)
print('Database with id \'{0}\' was deleted'.format(id))
except errors.HTTPFailure as e:
if e.status_code == 404:
print('A database with id \'{0}\' does not exist'.format(id))
else:
raise
def run_sample():
with IDisposable(cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY} )) as client:
try:
# query for a database
DatabaseManagement.find_database(client, DATABASE_ID)
# create a database
DatabaseManagement.create_database(client, DATABASE_ID)
# get a database using its id
DatabaseManagement.read_database(client, DATABASE_ID)
# list all databases on an account
DatabaseManagement.list_databases(client)
# delete database by id
DatabaseManagement.delete_database(client, DATABASE_ID)
except errors.HTTPFailure as e:
print('\nrun_sample has caught an error. {0}'.format(e))
finally:
print("\nrun_sample done")
if __name__ == '__main__':
try:
run_sample()
except Exception as e:
print("Top level Error: args:{0}, message:{1}".format(e.args,e))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = 'share_networks'
_detail_version_modifiers = ["add_gateway", "add_mtu"]
def build_share_network(self, request, share_network):
"""View of a share network."""
return {'share_network': self._build_share_network_view(
request, share_network)}
def build_share_networks(self, request, share_networks, is_detail=True):
return {'share_networks':
[self._build_share_network_view(
request, share_network, is_detail)
for share_network in share_networks]}
def _build_share_network_view(self, request, share_network,
is_detail=True):
sn = {
'id': share_network.get('id'),
'name': share_network.get('name'),
}
if is_detail:
sn.update({
'project_id': share_network.get('project_id'),
'created_at': share_network.get('created_at'),
'updated_at': share_network.get('updated_at'),
'neutron_net_id': share_network.get('neutron_net_id'),
'neutron_subnet_id': share_network.get('neutron_subnet_id'),
'nova_net_id': share_network.get('nova_net_id'),
'network_type': share_network.get('network_type'),
'segmentation_id': share_network.get('segmentation_id'),
'cidr': share_network.get('cidr'),
'ip_version': share_network.get('ip_version'),
'description': share_network.get('description'),
})
self.update_versioned_resource_dict(request, sn, share_network)
return sn
@common.ViewBuilder.versioned_method("2.18")
def add_gateway(self, context, network_dict, network):
network_dict['gateway'] = network.get('gateway')
@common.ViewBuilder.versioned_method("2.20")
def add_mtu(self, context, network_dict, network):
network_dict['mtu'] = network.get('mtu')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
from doctest import DocTestSuite
from test import support
import threading
import weakref
import gc
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class ThreadingLocalTest(unittest.TestCase):
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = threading.local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX threading.local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertEqual(len(deadlist), n-1)
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assert_(len(deadlist) in (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(threading.local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import pytest
from pandas import DataFrame
import pandas._testing as tm
pytestmark = pytest.mark.filterwarnings(
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
)
def test_usecols_with_unicode_strings(all_parsers):
# see gh-13219
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"AAA": {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002,
},
"BBB": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["AAA", "BBB"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_single_byte_unicode_strings(all_parsers):
# see gh-13219
data = """A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"A": {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002,
},
"B": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [["AAA", b"BBB"], [b"AAA", "BBB"]])
def test_usecols_with_mixed_encoding_strings(all_parsers, usecols):
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
def test_usecols_with_multi_byte_characters(all_parsers):
data = """あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"あああ": {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002,
},
"いい": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["あああ", "いい"])
tm.assert_frame_equal(result, expected)
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/tests/io/parser/usecols/test_strings.py
|
"""
Copyright 2008, 2009, 2011 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
import os
import logging
from gi.repository import Gtk, Gdk, GObject
from . import Bars, Actions, Utils
from .BlockTreeWindow import BlockTreeWindow
from .Console import Console
from .VariableEditor import VariableEditor
from .Constants import \
NEW_FLOGRAPH_TITLE, DEFAULT_CONSOLE_WINDOW_WIDTH
from .Dialogs import TextDisplay, MessageDialogWrapper
from .Notebook import Notebook, Page
from ..core import Messages
log = logging.getLogger(__name__)
############################################################
# Main window
############################################################
class MainWindow(Gtk.ApplicationWindow):
"""The topmost window with menus, the tool bar, and other major windows."""
# Constants the action handler can use to indicate which panel visibility to change.
BLOCKS = 0
CONSOLE = 1
VARIABLES = 2
def __init__(self, app, platform):
"""
MainWindow constructor
Setup the menu, toolbar, flow graph editor notebook, block selection window...
"""
Gtk.ApplicationWindow.__init__(self, title="GNU Radio Companion", application=app)
log.debug("__init__()")
self._platform = platform
self.app = app
self.config = platform.config
# Add all "win" actions to the local
for x in Actions.get_actions():
if x.startswith("win."):
self.add_action(Actions.actions[x])
# Setup window
vbox = Gtk.VBox()
self.add(vbox)
icon_theme = Gtk.IconTheme.get_default()
icon = icon_theme.lookup_icon("gnuradio-grc", 48, 0)
if not icon:
# Set default window icon
self.set_icon_from_file(os.path.dirname(os.path.abspath(__file__)) + "/icon.png")
else:
# Use gnuradio icon
self.set_icon(icon.load_icon())
# Create the menu bar and toolbar
generate_modes = platform.get_generate_options()
# This needs to be replaced
# Have an option for either the application menu or this menu
self.menu = Bars.Menu()
self.menu_bar = Gtk.MenuBar.new_from_model(self.menu)
vbox.pack_start(self.menu_bar, False, False, 0)
self.tool_bar = Bars.Toolbar()
self.tool_bar.set_hexpand(True)
# Show the toolbar
self.tool_bar.show()
vbox.pack_start(self.tool_bar, False, False, 0)
# Main parent container for the different panels
self.main = Gtk.HPaned() #(orientation=Gtk.Orientation.HORIZONTAL)
vbox.pack_start(self.main, True, True, 0)
# Create the notebook
self.notebook = Notebook()
self.page_to_be_closed = None
self.current_page = None # type: Page
# Create the console window
self.console = Console()
# Create the block tree and variable panels
self.btwin = BlockTreeWindow(platform)
self.btwin.connect('create_new_block', self._add_block_to_current_flow_graph)
self.vars = VariableEditor()
self.vars.connect('create_new_block', self._add_block_to_current_flow_graph)
self.vars.connect('remove_block', self._remove_block_from_current_flow_graph)
# Figure out which place to put the variable editor
self.left = Gtk.VPaned() #orientation=Gtk.Orientation.VERTICAL)
self.right = Gtk.VPaned() #orientation=Gtk.Orientation.VERTICAL)
self.left_subpanel = Gtk.HPaned() #orientation=Gtk.Orientation.HORIZONTAL)
self.variable_panel_sidebar = self.config.variable_editor_sidebar()
if self.variable_panel_sidebar:
self.left.pack1(self.notebook)
self.left.pack2(self.console, False)
self.right.pack1(self.btwin)
self.right.pack2(self.vars, False)
else:
# Put the variable editor in a panel with the console
self.left.pack1(self.notebook)
self.left_subpanel.pack1(self.console, shrink=False)
self.left_subpanel.pack2(self.vars, resize=False, shrink=True)
self.left.pack2(self.left_subpanel, False)
# Create the right panel
self.right.pack1(self.btwin)
self.main.pack1(self.left)
self.main.pack2(self.right, False)
# Load preferences and show the main window
self.resize(*self.config.main_window_size())
self.main.set_position(self.config.blocks_window_position())
self.left.set_position(self.config.console_window_position())
if self.variable_panel_sidebar:
self.right.set_position(self.config.variable_editor_position(sidebar=True))
else:
self.left_subpanel.set_position(self.config.variable_editor_position())
self.show_all()
log.debug("Main window ready")
############################################################
# Event Handlers
############################################################
def _add_block_to_current_flow_graph(self, widget, key):
self.current_flow_graph.add_new_block(key)
def _remove_block_from_current_flow_graph(self, widget, key):
block = self.current_flow_graph.get_block(key)
self.current_flow_graph.remove_element(block)
def _quit(self, window, event):
"""
Handle the delete event from the main window.
Generated by pressing X to close, alt+f4, or right click+close.
This method in turns calls the state handler to quit.
Returns:
true
"""
Actions.APPLICATION_QUIT()
return True
def update_panel_visibility(self, panel, visibility=True):
"""
Handles changing visibility of panels.
"""
# Set the visibility for the requested panel, then update the containers if they need
# to be hidden as well.
if panel == self.BLOCKS:
if visibility:
self.btwin.show()
else:
self.btwin.hide()
elif panel == self.CONSOLE:
if visibility:
self.console.show()
else:
self.console.hide()
elif panel == self.VARIABLES:
if visibility:
self.vars.show()
else:
self.vars.hide()
else:
return
if self.variable_panel_sidebar:
# If both the variable editor and block panels are hidden, hide the right container
if not (self.btwin.get_property('visible')) and not (self.vars.get_property('visible')):
self.right.hide()
else:
self.right.show()
else:
if not (self.btwin.get_property('visible')):
self.right.hide()
else:
self.right.show()
if not (self.vars.get_property('visible')) and not (self.console.get_property('visible')):
self.left_subpanel.hide()
else:
self.left_subpanel.show()
############################################################
# Console Window
############################################################
@property
def current_page(self):
return self.notebook.current_page
@current_page.setter
def current_page(self, page):
self.notebook.current_page = page
def add_console_line(self, line):
"""
Place line at the end of the text buffer, then scroll its window all the way down.
Args:
line: the new text
"""
self.console.add_line(line)
############################################################
# Pages: create and close
############################################################
def new_page(self, file_path='', show=False):
"""
Create a new notebook page.
Set the tab to be selected.
Args:
file_path: optional file to load into the flow graph
show: true if the page should be shown after loading
"""
#if the file is already open, show the open page and return
if file_path and file_path in self._get_files(): #already open
page = self.notebook.get_nth_page(self._get_files().index(file_path))
self._set_page(page)
return
try: #try to load from file
if file_path: Messages.send_start_load(file_path)
flow_graph = self._platform.make_flow_graph()
flow_graph.grc_file_path = file_path
#print flow_graph
page = Page(
self,
flow_graph=flow_graph,
file_path=file_path,
)
if getattr(Messages, 'flowgraph_error') is not None:
Messages.send(
">>> Check: {}\n>>> FlowGraph Error: {}\n".format(
str(Messages.flowgraph_error_file),
str(Messages.flowgraph_error)
)
)
if file_path: Messages.send_end_load()
except Exception as e: #return on failure
Messages.send_fail_load(e)
if isinstance(e, KeyError) and str(e) == "'options'":
# This error is unrecoverable, so crash gracefully
exit(-1)
return
#add this page to the notebook
self.notebook.append_page(page, page.tab)
self.notebook.set_tab_reorderable(page, True)
#only show if blank or manual
if not file_path or show: self._set_page(page)
def close_pages(self):
"""
Close all the pages in this notebook.
Returns:
true if all closed
"""
open_files = [file for file in self._get_files() if file] #filter blank files
open_file = self.current_page.file_path
#close each page
for page in sorted(self.get_pages(), key=lambda p: p.saved):
self.page_to_be_closed = page
closed = self.close_page(False)
if not closed:
break
if self.notebook.get_n_pages(): return False
#save state before closing
self.config.set_open_files(open_files)
self.config.file_open(open_file)
self.config.main_window_size(self.get_size())
self.config.console_window_position(self.left.get_position())
self.config.blocks_window_position(self.main.get_position())
if self.variable_panel_sidebar:
self.config.variable_editor_position(self.right.get_position(), sidebar=True)
else:
self.config.variable_editor_position(self.left_subpanel.get_position())
self.config.save()
return True
def close_page(self, ensure=True):
"""
Close the current page.
If the notebook becomes empty, and ensure is true,
call new page upon exit to ensure that at least one page exists.
Args:
ensure: boolean
"""
if not self.page_to_be_closed: self.page_to_be_closed = self.current_page
#show the page if it has an executing flow graph or is unsaved
if self.page_to_be_closed.process or not self.page_to_be_closed.saved:
self._set_page(self.page_to_be_closed)
#unsaved? ask the user
if not self.page_to_be_closed.saved:
response = self._save_changes() # return value is either OK, CLOSE, or CANCEL
if response == Gtk.ResponseType.OK:
Actions.FLOW_GRAPH_SAVE() #try to save
if not self.page_to_be_closed.saved: #still unsaved?
self.page_to_be_closed = None #set the page to be closed back to None
return False
elif response == Gtk.ResponseType.CANCEL:
self.page_to_be_closed = None
return False
#stop the flow graph if executing
if self.page_to_be_closed.process:
Actions.FLOW_GRAPH_KILL()
#remove the page
self.notebook.remove_page(self.notebook.page_num(self.page_to_be_closed))
if ensure and self.notebook.get_n_pages() == 0:
self.new_page() #no pages, make a new one
self.page_to_be_closed = None #set the page to be closed back to None
return True
############################################################
# Misc
############################################################
def update(self):
"""
Set the title of the main window.
Set the titles on the page tabs.
Show/hide the console window.
"""
page = self.current_page
basename = os.path.basename(page.file_path)
dirname = os.path.dirname(page.file_path)
Gtk.Window.set_title(self, ''.join((
'*' if not page.saved else '', basename if basename else NEW_FLOGRAPH_TITLE,
'(read only)' if page.get_read_only() else '', ' - ',
dirname if dirname else self._platform.config.name,
)))
# set tab titles
for page in self.get_pages():
file_name = os.path.splitext(os.path.basename(page.file_path))[0]
page.set_markup('<span foreground="{foreground}">{title}{ro}</span>'.format(
foreground='black' if page.saved else 'red', ro=' (ro)' if page.get_read_only() else '',
title=Utils.encode(file_name or NEW_FLOGRAPH_TITLE),
))
fpath = page.file_path
if not fpath:
fpath = '(unsaved)'
page.set_tooltip(fpath)
# show/hide notebook tabs
self.notebook.set_show_tabs(len(self.get_pages()) > 1)
# Need to update the variable window when changing
self.vars.update_gui(self.current_flow_graph.blocks)
def update_pages(self):
"""
Forces a reload of all the pages in this notebook.
"""
for page in self.get_pages():
success = page.flow_graph.reload()
if success: # Only set saved if errors occurred during import
page.saved = False
@property
def current_flow_graph(self):
return self.current_page.flow_graph
def get_focus_flag(self):
"""
Get the focus flag from the current page.
Returns:
the focus flag
"""
return self.current_page.drawing_area.get_focus_flag()
############################################################
# Helpers
############################################################
def _set_page(self, page):
"""
Set the current page.
Args:
page: the page widget
"""
self.current_page = page
self.notebook.set_current_page(self.notebook.page_num(self.current_page))
def _save_changes(self):
"""
Save changes to flow graph?
Returns:
the response_id (see buttons variable below)
"""
buttons = (
'Close without saving', Gtk.ResponseType.CLOSE,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK
)
return MessageDialogWrapper(
self, Gtk.MessageType.QUESTION, Gtk.ButtonsType.NONE, 'Unsaved Changes!',
'Would you like to save changes before closing?', Gtk.ResponseType.OK, buttons
).run_and_destroy()
def _get_files(self):
"""
Get the file names for all the pages, in order.
Returns:
list of file paths
"""
return [page.file_path for page in self.get_pages()]
def get_pages(self):
"""
Get a list of all pages in the notebook.
Returns:
list of pages
"""
return [self.notebook.get_nth_page(page_num)
for page_num in range(self.notebook.get_n_pages())]
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/conformance/async/es6/asyncArrowFunction/asyncUnParenthesizedArrowFunction_es6.ts] ////
//// [asyncUnParenthesizedArrowFunction_es6.ts]
declare function someOtherFunction(i: any): Promise<void>;
const x = async i => await someOtherFunction(i)
const x1 = async (i) => await someOtherFunction(i);
//// [asyncUnParenthesizedArrowFunction_es6.js]
"use strict";
const x = (i) => __awaiter(void 0, void 0, void 0, function* () { return yield someOtherFunction(i); });
const x1 = (i) => __awaiter(void 0, void 0, void 0, function* () { return yield someOtherFunction(i); });
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/asyncUnParenthesizedArrowFunction_es6.js
|
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_QUEUE = 'queues/%s/%s'
API_QUEUE_PURGE = 'queues/%s/%s/contents'
API_QUEUES = 'queues'
API_QUEUES_VIRTUAL_HOST = 'queues/%s'
API_QUEUE_BINDINGS = 'queues/%s/%s/bindings'
API_QUEUE_BIND = 'bindings/%s/e/%s/q/%s'
API_QUEUE_UNBIND = 'bindings/%s/e/%s/q/%s/%s'
class Queue(ManagementHandler):
def get(self, queue, virtual_host='/'):
"""Get Queue details.
:param queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUE % (
virtual_host,
queue
)
)
def list(self, virtual_host='/', show_all=False):
"""List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.get(API_QUEUES)
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUES_VIRTUAL_HOST % virtual_host
)
def declare(self, queue='', virtual_host='/', passive=False, durable=False,
auto_delete=False, arguments=None):
"""Declare a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool auto_delete: Automatically delete when not in use
:param dict|None arguments: Queue key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
if passive:
return self.get(queue, virtual_host=virtual_host)
queue_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'arguments': arguments or {},
'vhost': virtual_host
}
)
return self.http_client.put(
API_QUEUE % (
quote(virtual_host, ''),
queue
),
payload=queue_payload)
def delete(self, queue, virtual_host='/'):
"""Delete a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE %
(
virtual_host,
queue
))
def purge(self, queue, virtual_host='/'):
"""Purge a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_PURGE %
(
virtual_host,
queue
))
def bindings(self, queue, virtual_host='/'):
"""Get Queue bindings.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_QUEUE_BINDINGS %
(
virtual_host,
queue
))
def bind(self, queue='', exchange='', routing_key='', virtual_host='/',
arguments=None):
"""Bind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param dict|None arguments: Bind key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
bind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'routing_key': routing_key,
'source': exchange,
'arguments': arguments or {},
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_QUEUE_BIND %
(
virtual_host,
exchange,
queue
),
payload=bind_payload)
def unbind(self, queue='', exchange='', routing_key='', virtual_host='/',
properties_key=None):
"""Unbind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param str properties_key:
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
unbind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'properties_key': properties_key or routing_key,
'source': exchange,
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_UNBIND %
(
virtual_host,
exchange,
queue,
properties_key or routing_key
),
payload=unbind_payload)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.expressionInfoProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.expressionInfoProvider.AbstractReturnTargetSymbolTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/expressionInfoProvider/returnExpressionTargetSymbol")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleReturnTargetSymbolTestGenerated extends AbstractReturnTargetSymbolTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInReturnExpressionTargetSymbol() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/expressionInfoProvider/returnExpressionTargetSymbol"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("labeledReturn.kt")
public void testLabeledReturn() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/returnExpressionTargetSymbol/labeledReturn.kt");
}
@Test
@TestMetadata("normalReturn.kt")
public void testNormalReturn() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/returnExpressionTargetSymbol/normalReturn.kt");
}
@Test
@TestMetadata("unresolvedReturn.kt")
public void testUnresolvedReturn() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/returnExpressionTargetSymbol/unresolvedReturn.kt");
}
}
|
java
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/expressionInfoProvider/FirIdeNormalAnalysisSourceModuleReturnTargetSymbolTestGenerated.java
|
{
"favorite": {
"favoriteDags_many": "Primeros {{count}} Dags Favoritos",
"favoriteDags_one": "Primer {{count}} Dag Favorito",
"favoriteDags_other": "Primeros {{count}} Dags Favoritos",
"noDagRuns": "Aún no hay Ejecuciones de Dag para este Dag.",
"noFavoriteDags": "Aún no hay favoritos. Haz clic en el ícono de estrella junto a un Dag en la lista para agregarlo a tus favoritos."
},
"group": "Grupo",
"health": {
"dagProcessor": "Procesador de Dags",
"health": "Salud",
"healthy": "Saludable",
"lastHeartbeat": "Último Heartbeat",
"metaDatabase": "Base de datos de la metadata",
"scheduler": "Programador",
"status": "Estado",
"triggerer": "Triggerer",
"unhealthy": "No Saludable"
},
"history": "Historial",
"importErrors": {
"dagImportError_many": "Errores de Importación de Dags",
"dagImportError_one": "Error de Importación de Dag",
"dagImportError_other": "Errores de Importación de Dags",
"searchByFile": "Buscar por archivo",
"timestamp": "Timestamp"
},
"managePools": "Gestionar Pools",
"noAssetEvents": "No se encontraron Eventos de Asset.",
"poolSlots": "Slots del Pool",
"sortBy": {
"newestFirst": "Más Recientes",
"oldestFirst": "Más Antiguos"
},
"source": "Origen",
"stats": {
"activeDags": "Dags Activos",
"failedDags": "Dags Fallidos",
"queuedDags": "Dags en Cola",
"requiredActions": "Acciones Requeridas",
"runningDags": "Dags en Ejecución",
"stats": "Estadísticas"
},
"uri": "URI",
"welcome": "Te damos la bienvenida"
}
|
json
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/ui/public/i18n/locales/es/dashboard.json
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_interface
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages physical attributes of interfaces.
description:
- Manages physical attributes of interfaces of NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- This module is also used to create logical interfaces such as
svis and loopbacks.
- Be cautious of platform specific idiosyncrasies. For example,
when you default a loopback interface, the admin state toggles
on certain versions of NX-OS.
- The M(nxos_overlay_global) C(anycast_gateway_mac) attribute must be
set before setting the C(fabric_forwarding_anycast_gateway) property.
options:
name:
description:
- Full name of interface, i.e. Ethernet1/1, port-channel10.
required: true
aliases: [interface]
interface_type:
description:
- Interface type to be unconfigured from the device.
choices: ['loopback', 'portchannel', 'svi', 'nve']
version_added: 2.2
speed:
description:
- Interface link speed. Applicable for ethernet interface only.
version_added: 2.5
admin_state:
description:
- Administrative state of the interface.
default: up
choices: ['up','down']
description:
description:
- Interface description.
mode:
description:
- Manage Layer 2 or Layer 3 state of the interface.
This option is supported for ethernet and portchannel interface.
Applicable for ethernet and portchannel interface only.
choices: ['layer2','layer3']
mtu:
description:
- MTU for a specific interface. Must be an even number between 576 and 9216.
Applicable for ethernet interface only.
version_added: 2.5
ip_forward:
description:
- Enable/Disable ip forward feature on SVIs.
choices: ['enable','disable']
version_added: 2.2
fabric_forwarding_anycast_gateway:
description:
- Associate SVI with anycast gateway under VLAN configuration mode.
Applicable for SVI interface only.
type: bool
version_added: 2.2
duplex:
description:
- Interface link status. Applicable for ethernet interface only.
default: auto
choices: ['full', 'half', 'auto']
version_added: 2.5
tx_rate:
description:
- Transmit rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
version_added: 2.5
rx_rate:
description:
- Receiver rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
version_added: 2.5
neighbors:
description:
- Check the operational state of given interface C(name) for LLDP neighbor.
- The following suboptions are available. This is state check parameter only.
suboptions:
host:
description:
- "LLDP neighbor host for given interface C(name)."
port:
description:
- "LLDP neighbor port to which given interface C(name) is connected."
version_added: 2.5
aggregate:
description: List of Interfaces definitions.
version_added: 2.5
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent','default']
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
"""
EXAMPLES = """
- name: Ensure an interface is a Layer 3 port and that it has the proper description
nxos_interface:
name: Ethernet1/1
description: 'Configured by Ansible'
mode: layer3
- name: Admin down an interface
nxos_interface:
name: Ethernet2/1
admin_state: down
- name: Remove all loopback interfaces
nxos_interface:
name: loopback
state: absent
- name: Remove all logical interfaces
nxos_interface:
interface_type: "{{ item }} "
state: absent
loop:
- loopback
- portchannel
- svi
- nve
- name: Admin up all loopback interfaces
nxos_interface:
name: loopback 0-1023
admin_state: up
- name: Admin down all loopback interfaces
nxos_interface:
name: looback 0-1023
admin_state: down
- name: Check neighbors intent arguments
nxos_interface:
name: Ethernet2/3
neighbors:
- port: Ethernet2/3
host: abc.mycompany.com
- name: Add interface using aggregate
nxos_interface:
aggregate:
- { name: Ethernet0/1, mtu: 256, description: test-interface-1 }
- { name: Ethernet0/2, mtu: 516, description: test-interface-2 }
duplex: full
speed: 100
state: present
- name: Delete interface using aggregate
nxos_interface:
aggregate:
- name: Loopback9
- name: Loopback10
state: absent
- name: Check intent arguments
nxos_interface:
name: Ethernet0/2
state: up
tx_rate: ge(0)
rx_rate: le(0)
"""
RETURN = """
commands:
description: command list sent to the device
returned: always
type: list
sample:
- interface Ethernet2/3
- mtu 1500
- speed 10
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, normalize_interface
from ansible.module_utils.network.nxos.nxos import get_interface_type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import conditional, remove_default_spec
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds, check_rc=False)
if body and "Invalid" in body[0]:
return []
else:
return body
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def get_interfaces_dict(module):
"""Gets all active interfaces on a given switch
"""
try:
body = execute_show_command('show interface', module)[0]
except IndexError:
return {}
interfaces = {
'ethernet': [],
'svi': [],
'loopback': [],
'management': [],
'portchannel': [],
'nve': [],
'unknown': []
}
if body:
interface_list = body['TABLE_interface']['ROW_interface']
for index in interface_list:
intf = index['interface']
intf_type = get_interface_type(intf)
interfaces[intf_type].append(intf)
return interfaces
def get_vlan_interface_attributes(name, intf_type, module):
""" Returns dictionary that has two k/v pairs:
admin_state & description if not an svi, returns None
"""
command = 'show run interface {0} all'.format(name)
try:
body = execute_show_command(command, module)[0]
except (IndexError, TypeError):
return None
if body:
command_list = body.split('\n')
desc = None
admin_state = 'down'
for each in command_list:
if 'description' in each:
desc = each.lstrip().split("description")[1].lstrip()
elif 'no shutdown' in each:
admin_state = 'up'
return dict(description=desc, admin_state=admin_state)
else:
return None
def get_interface_type_removed_cmds(interfaces):
commands = []
for interface in interfaces:
if interface != 'Vlan1':
commands.append('no interface {0}'.format(interface))
return commands
def get_admin_state(admin_state):
command = ''
if admin_state == 'up':
command = 'no shutdown'
elif admin_state == 'down':
command = 'shutdown'
return command
def is_default_interface(name, module):
"""Checks to see if interface exists and if it is a default config
"""
command = 'show run interface {0}'.format(name)
try:
body = execute_show_command(command, module)[0]
except (IndexError, TypeError) as e:
body = ''
if body:
raw_list = body.split('\n')
found = False
for line in raw_list:
if line.startswith('interface'):
found = True
if found and line and not line.startswith('interface'):
return False
return True
else:
return 'DNE'
def add_command_to_interface(interface, cmd, commands):
if interface not in commands:
commands.append(interface)
commands.append(cmd)
def map_obj_to_commands(updates, module):
commands = list()
commands2 = list()
want, have = updates
args = ('speed', 'description', 'duplex', 'mtu')
for w in want:
name = w['name']
mode = w['mode']
ip_forward = w['ip_forward']
fabric_forwarding_anycast_gateway = w['fabric_forwarding_anycast_gateway']
admin_state = w['admin_state']
state = w['state']
interface_type = w['interface_type']
del w['state']
if name:
w['interface_type'] = None
if interface_type:
obj_in_have = {}
if state in ('present', 'default'):
module.fail_json(msg='The interface_type param can be used only with state absent.')
else:
obj_in_have = search_obj_in_list(name, have)
is_default = is_default_interface(name, module)
if name:
interface = 'interface ' + name
if state == 'absent':
if obj_in_have:
commands.append('no interface {0}'.format(name))
elif interface_type and not obj_in_have:
intfs = get_interfaces_dict(module)[interface_type]
cmds = get_interface_type_removed_cmds(intfs)
commands.extend(cmds)
elif state == 'present':
if obj_in_have:
# Don't run switchport command for loopback and svi interfaces
if get_interface_type(name) in ('ethernet', 'portchannel'):
if mode == 'layer2' and mode != obj_in_have.get('mode'):
add_command_to_interface(interface, 'switchport', commands)
elif mode == 'layer3' and mode != obj_in_have.get('mode'):
add_command_to_interface(interface, 'no switchport', commands)
if admin_state == 'up' and admin_state != obj_in_have.get('admin_state'):
add_command_to_interface(interface, 'no shutdown', commands)
elif admin_state == 'down' and admin_state != obj_in_have.get('admin_state'):
add_command_to_interface(interface, 'shutdown', commands)
if ip_forward == 'enable' and ip_forward != obj_in_have.get('ip_forward'):
add_command_to_interface(interface, 'ip forward', commands)
elif ip_forward == 'disable' and ip_forward != obj_in_have.get('ip forward'):
add_command_to_interface(interface, 'no ip forward', commands)
if (fabric_forwarding_anycast_gateway is True and
obj_in_have.get('fabric_forwarding_anycast_gateway') is False):
add_command_to_interface(interface, 'fabric forwarding mode anycast-gateway', commands)
elif (fabric_forwarding_anycast_gateway is False and
obj_in_have.get('fabric_forwarding_anycast_gateway') is True):
add_command_to_interface(interface, 'no fabric forwarding mode anycast-gateway', commands)
for item in args:
candidate = w.get(item)
if candidate and candidate != obj_in_have.get(item):
cmd = item + ' ' + str(candidate)
add_command_to_interface(interface, cmd, commands)
if name and get_interface_type(name) == 'ethernet':
if mode != obj_in_have.get('mode'):
admin_state = w.get('admin_state') or obj_in_have.get('admin_state')
if admin_state:
c1 = 'interface {0}'.format(normalize_interface(w['name']))
c2 = get_admin_state(admin_state)
commands2.append(c1)
commands2.append(c2)
else:
commands.append(interface)
# Don't run switchport command for loopback and svi interfaces
if get_interface_type(name) in ('ethernet', 'portchannel'):
if mode == 'layer2':
commands.append('switchport')
elif mode == 'layer3':
commands.append('no switchport')
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
if ip_forward == 'enable':
commands.append('ip forward')
elif ip_forward == 'disable':
commands.append('no ip forward')
if fabric_forwarding_anycast_gateway is True:
commands.append('fabric forwarding mode anycast-gateway')
elif fabric_forwarding_anycast_gateway is False:
commands.append('no fabric forwarding mode anycast-gateway')
for item in args:
candidate = w.get(item)
if candidate:
commands.append(item + ' ' + str(candidate))
elif state == 'default':
if is_default is False:
commands.append('default interface {0}'.format(name))
elif is_default == 'DNE':
module.exit_json(msg='interface you are trying to default does not exist')
return commands, commands2
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
name = d['name']
d['name'] = normalize_interface(name)
obj.append(d)
else:
obj.append({
'name': normalize_interface(module.params['name']),
'description': module.params['description'],
'speed': module.params['speed'],
'mode': module.params['mode'],
'mtu': module.params['mtu'],
'duplex': module.params['duplex'],
'ip_forward': module.params['ip_forward'],
'fabric_forwarding_anycast_gateway': module.params['fabric_forwarding_anycast_gateway'],
'admin_state': module.params['admin_state'],
'state': module.params['state'],
'interface_type': module.params['interface_type'],
'tx_rate': module.params['tx_rate'],
'rx_rate': module.params['rx_rate'],
'neighbors': module.params['neighbors']
})
return obj
def map_config_to_obj(want, module):
objs = list()
for w in want:
obj = dict(name=None, description=None, admin_state=None, speed=None,
mtu=None, mode=None, duplex=None, interface_type=None,
ip_forward=None, fabric_forwarding_anycast_gateway=None)
if not w['name']:
return obj
command = 'show interface {0}'.format(w['name'])
try:
body = execute_show_command(command, module)[0]
except IndexError:
return list()
if body:
try:
interface_table = body['TABLE_interface']['ROW_interface']
except (KeyError, TypeError):
return list()
if interface_table:
if interface_table.get('eth_mode') == 'fex-fabric':
module.fail_json(msg='nxos_interface does not support interfaces with mode "fex-fabric"')
intf_type = get_interface_type(w['name'])
if intf_type in ['portchannel', 'ethernet']:
mode = interface_table.get('eth_mode')
if mode in ('access', 'trunk', 'dot1q-tunnel'):
obj['mode'] = 'layer2'
elif mode in ('routed', 'layer3'):
obj['mode'] = 'layer3'
else:
obj['mode'] = 'layer3'
if intf_type == 'ethernet':
obj['name'] = normalize_interface(interface_table.get('interface'))
obj['admin_state'] = interface_table.get('admin_state')
obj['description'] = interface_table.get('desc')
obj['mtu'] = interface_table.get('eth_mtu')
obj['duplex'] = interface_table.get('eth_duplex')
speed = interface_table.get('eth_speed')
command = 'show run interface {0}'.format(obj['name'])
body = execute_show_command(command, module)[0]
speed_match = re.search(r'speed (\d+)', body)
if speed_match is None:
obj['speed'] = 'auto'
else:
obj['speed'] = speed_match.group(1)
duplex_match = re.search(r'duplex (\S+)', body)
if duplex_match is None:
obj['duplex'] = 'auto'
else:
obj['duplex'] = duplex_match.group(1)
if 'ip forward' in body:
obj['ip_forward'] = 'enable'
else:
obj['ip_forward'] = 'disable'
elif intf_type == 'svi':
obj['name'] = normalize_interface(interface_table.get('interface'))
attributes = get_vlan_interface_attributes(obj['name'], intf_type, module)
obj['admin_state'] = str(attributes.get('admin_state',
'nxapibug'))
obj['description'] = str(attributes.get('description',
'nxapi_bug'))
obj['mtu'] = interface_table.get('svi_mtu')
command = 'show run interface {0}'.format(obj['name'])
body = execute_show_command(command, module)[0]
if 'ip forward' in body:
obj['ip_forward'] = 'enable'
else:
obj['ip_forward'] = 'disable'
if 'fabric forwarding mode anycast-gateway' in body:
obj['fabric_forwarding_anycast_gateway'] = True
else:
obj['fabric_forwarding_anycast_gateway'] = False
elif intf_type in ('loopback', 'management', 'nve'):
obj['name'] = normalize_interface(interface_table.get('interface'))
obj['admin_state'] = interface_table.get('admin_state')
obj['description'] = interface_table.get('desc')
elif intf_type == 'portchannel':
obj['name'] = normalize_interface(interface_table.get('interface'))
obj['admin_state'] = interface_table.get('admin_state')
obj['description'] = interface_table.get('desc')
obj['mtu'] = interface_table.get('eth_mtu')
objs.append(obj)
return objs
def check_declarative_intent_params(module, want):
failed_conditions = []
have_neighbors = None
for w in want:
want_tx_rate = w.get('tx_rate')
want_rx_rate = w.get('rx_rate')
want_neighbors = w.get('neighbors')
time.sleep(module.params['delay'])
if w['interface_type']:
return
cmd = [{'command': 'show interface {0}'.format(w['name']), 'output': 'text'}]
try:
out = run_commands(module, cmd, check_rc=False)[0]
except (AttributeError, IndexError, TypeError):
out = ''
if want_tx_rate:
match = re.search(r'output rate (\d+)', out, re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'input rate (\d+)', out, re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if want_neighbors:
have_host = []
have_port = []
if have_neighbors is None:
cmd = [{'command': 'show lldp neighbors interface {0} detail'.format(w['name']), 'output': 'text'}]
output = run_commands(module, cmd, check_rc=False)
if output:
have_neighbors = output[0]
else:
have_neighbors = ''
if have_neighbors and 'Total entries displayed: 0' not in have_neighbors:
for line in have_neighbors.strip().split('\n'):
if line.startswith('Port Description'):
have_port.append(line.split(': ')[1])
if line.startswith('System Name'):
have_host.append(line.split(': ')[1])
for item in want_neighbors:
host = item.get('host')
port = item.get('port')
if host and host not in have_host:
failed_conditions.append('host ' + host)
if port and port not in have_port:
failed_conditions.append('port ' + port)
return failed_conditions
def main():
""" main entry point for module execution
"""
neighbors_spec = dict(
host=dict(),
port=dict()
)
element_spec = dict(
name=dict(aliases=['interface']),
admin_state=dict(default='up', choices=['up', 'down']),
description=dict(),
speed=dict(),
mode=dict(choices=['layer2', 'layer3']),
mtu=dict(),
duplex=dict(choices=['full', 'half', 'auto']),
interface_type=dict(choices=['loopback', 'portchannel', 'svi', 'nve']),
ip_forward=dict(choices=['enable', 'disable']),
fabric_forwarding_anycast_gateway=dict(type='bool'),
tx_rate=dict(),
rx_rate=dict(),
neighbors=dict(type='list', elements='dict', options=neighbors_spec),
delay=dict(default=10, type='int'),
state=dict(choices=['absent', 'present', 'default'], default='present')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec,
mutually_exclusive=[['name', 'interface_type']])
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['name', 'aggregate', 'interface_type']]
mutually_exclusive = [['name', 'aggregate'],
['name', 'interface_type']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(want, module)
commands = []
commands1, commands2 = map_obj_to_commands((want, have), module)
commands.extend(commands1)
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
# if the mode changes from L2 to L3, the admin state
# seems to change after the API call, so adding a second API
# call to ensure it's in the desired state.
if commands2:
load_config(module, commands2)
commands.extend(commands2)
commands = [cmd for cmd in commands if cmd != 'configure']
result['commands'] = commands
if result['changed']:
failed_conditions = check_declarative_intent_params(module, want)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.string import return_ascii
class BlockingPrincipal(PrincipalMixin, db.Model):
__tablename__ = 'blocking_principals'
principal_backref_name = 'in_blocking_acls'
unique_columns = ('blocking_id',)
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='roombooking')
id = db.Column(
db.Integer,
primary_key=True
)
blocking_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.blockings.id'),
nullable=False
)
# relationship backrefs:
# - blocking (Blocking._allowed)
@return_ascii
def __repr__(self):
return '<BlockingPrincipal({}, {}, {})>'.format(
self.id,
self.blocking_id,
self.principal
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
import org.junit.jupiter.api.Named;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.Parameter;
import org.junit.jupiter.params.ParameterizedClass;
import org.junit.jupiter.params.provider.FieldSource;
import static java.util.stream.Collectors.groupingBy;
import static java.util.stream.Collectors.toList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.SoftAssertions.assertSoftly;
import static org.junit.jupiter.api.Named.named;
/**
* Unit tests for property descriptor resolution via
* {@link PropertyDescriptorUtils#determineBasicProperties(Class)}.
*
* <p>Results are compared to the behavior of the standard {@link Introspector}.
*
* @author Sam Brannen
* @since 6.2.16
*/
@ParameterizedClass(name = "{0}")
@FieldSource("resolvers")
class PropertyDescriptorUtilsPropertyResolutionTests {
static final List<Named<PropertiesResolver>> resolvers = List.of(
named("Basic Properties", new BasicPropertiesResolver()),
named("Standard Properties", new StandardPropertiesResolver()));
@Parameter
PropertiesResolver resolver;
@Nested
class NonGenericTypesTests {
@Test
void classWithOnlyGetter() {
var pdMap = resolver.resolve(ClassWithOnlyGetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, null);
}
@Test
void classWithOnlySetter() {
var pdMap = resolver.resolve(ClassWithOnlySetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, null, Long.class);
}
@Test
void classWithMatchingGetterAndSetter() {
var pdMap = resolver.resolve(ClassWithMatchingGetterAndSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Long.class, Long.class);
}
@Test
void classWithOneUnrelatedSetter() {
var pdMap = resolver.resolve(ClassWithOneUnrelatedSetter.class);
// java.beans.Introspector never resolves unrelated write methods.
Class<?> writeType = null;
if (resolver instanceof BasicPropertiesResolver) {
// Spring resolves a single write method even if its type is not
// related to the read type.
writeType = String.class;
}
assertReadAndWriteMethodsForClassAndId(pdMap, Integer.class, writeType);
}
@Test
void classWithUnrelatedSettersInSameTypeHierarchy() {
var pdMap = resolver.resolve(ClassWithUnrelatedSettersInSameTypeHierarchy.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Integer.class, null);
}
@Test
void classWithOneSubtypeSetter() {
var pdMap = resolver.resolve(ClassWithOneSubtypeSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, Long.class);
}
@Test
void classWithTwoSubtypeSetters() {
var pdMap = resolver.resolve(ClassWithTwoSubtypeSetters.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Serializable.class, Long.class);
}
@Test
void classWithTwoSubtypeSettersAndOneUnrelatedSetter() {
var pdMap = resolver.resolve(ClassWithTwoSubtypeSettersAndOneUnrelatedSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Serializable.class, Long.class);
}
static class ClassWithOnlyGetter {
public Number getId() {
return 42;
}
}
static class ClassWithOnlySetter {
public void setId(Long id) {
}
}
static class ClassWithMatchingGetterAndSetter {
public Long getId() {
return 42L;
}
public void setId(Long id) {
}
}
static class ClassWithOneUnrelatedSetter {
public Integer getId() {
return 42;
}
public void setId(String id) {
}
}
static class ClassWithUnrelatedSettersInSameTypeHierarchy {
public Integer getId() {
return 42;
}
public void setId(CharSequence id) {
}
public void setId(String id) {
}
}
static class ClassWithOneSubtypeSetter {
public Number getId() {
return 42;
}
public void setId(Long id) {
}
}
static class ClassWithTwoSubtypeSetters {
public Serializable getId() {
return 42;
}
public void setId(Number id) {
}
public void setId(Long id) {
}
}
static class ClassWithTwoSubtypeSettersAndOneUnrelatedSetter {
public Serializable getId() {
return 42;
}
public void setId(Number id) {
}
public void setId(Long id) {
}
public void setId(String id) {
}
}
}
@Nested
class UnboundedGenericsTests {
@Test
void determineBasicPropertiesWithUnresolvedGenericsInInterface() {
var pdMap = resolver.resolve(GenericService.class);
assertThat(pdMap).containsOnlyKeys("id");
assertReadAndWriteMethodsForId(pdMap.get("id"), Object.class, Object.class);
}
@Test
void determineBasicPropertiesWithUnresolvedGenericsInSubInterface() {
var pdMap = resolver.resolve(SubGenericService.class);
if (resolver instanceof StandardPropertiesResolver) {
// java.beans.Introspector does not resolve properties for sub-interfaces.
assertThat(pdMap).isEmpty();
}
else {
assertThat(pdMap).containsOnlyKeys("id");
assertReadAndWriteMethodsForId(pdMap.get("id"), Object.class, Object.class);
}
}
@Test
void resolvePropertiesWithUnresolvedGenericsInClass() {
var pdMap = resolver.resolve(BaseService.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Object.class, Object.class);
}
@Test // gh-36019
void resolvePropertiesInSubclassWithOverriddenGetterAndSetter() {
var pdMap = resolver.resolve(ServiceWithOverriddenGetterAndSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, String.class, String.class);
}
@Test // gh-36019
void resolvePropertiesWithUnresolvedGenericsInSubclassWithOverloadedSetter() {
var pdMap = resolver.resolve(ServiceWithOverloadedSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Object.class, Object.class);
}
@Test // gh-36019
void resolvePropertiesWithPartiallyUnresolvedGenericsInSubclassWithOverriddenGetter() {
var pdMap = resolver.resolve(ServiceWithOverriddenGetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, String.class, Object.class);
}
@Test // gh-36019
void resolvePropertiesWithPartiallyUnresolvedGenericsInSubclassWithOverriddenGetterAndOverloadedSetter() {
var pdMap = resolver.resolve(ServiceWithOverriddenGetterAndOverloadedSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, String.class, Object.class);
}
interface GenericService<T> {
void setId(T id);
T getId();
}
interface SubGenericService<T> extends GenericService<T> {
}
static class BaseService<T> {
private T id;
public T getId() {
return id;
}
public void setId(T id) {
this.id = id;
}
}
static class ServiceWithOverriddenGetterAndSetter extends BaseService<String>
implements SubGenericService<String> {
@Override
public String getId() {
return super.getId();
}
@Override
public void setId(String id) {
super.setId(id);
}
}
static class ServiceWithOverloadedSetter extends BaseService<String>
implements SubGenericService<String> {
public void setId(int id) {
setId(String.valueOf(id));
}
}
static class ServiceWithOverriddenGetter extends BaseService<String>
implements SubGenericService<String> {
@Override
public String getId() {
return super.getId();
}
}
static class ServiceWithOverriddenGetterAndOverloadedSetter extends BaseService<String>
implements SubGenericService<String> {
@Override
public String getId() {
return super.getId();
}
public void setId(int id) {
setId(String.valueOf(id));
}
}
}
@Nested
class BoundedGenericsTests {
@Test
void determineBasicPropertiesWithUnresolvedGenericsInInterface() {
var pdMap = resolver.resolve(Entity.class);
assertThat(pdMap).containsOnlyKeys("id");
assertReadAndWriteMethodsForId(pdMap.get("id"), Serializable.class, Serializable.class);
}
@Test
void resolvePropertiesWithUnresolvedGenericsInClass() {
var pdMap = resolver.resolve(BaseEntity.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, Number.class);
}
@Test
void resolvePropertiesWithUnresolvedGenericsInSubclass() {
var pdMap = resolver.resolve(Person.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, Number.class);
}
@Test // gh-36019
void resolvePropertiesWithUnresolvedGenericsInSubclassWithOverriddenGetter() {
var pdMap = resolver.resolve(PersonWithOverriddenGetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Long.class, Number.class);
}
@Test // gh-36019
void resolvePropertiesWithUnresolvedGenericsInSubclassWithOverriddenSetter() {
var pdMap = resolver.resolve(PersonWithOverriddenSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, Long.class);
}
@Test
void resolvePropertiesWithUnresolvedGenericsInSubclassWithOverloadedSetter() {
var pdMap = resolver.resolve(PersonWithOverloadedSetter.class);
assertReadAndWriteMethodsForClassAndId(pdMap, Number.class, Number.class);
}
interface Entity<T extends Serializable> {
T getId();
void setId(T id);
}
abstract static class BaseEntity<T extends Number> implements Entity<T> {
private T id;
@Override
public T getId() {
return this.id;
}
@Override
public void setId(T id) {
this.id = id;
}
}
static class Person extends BaseEntity<Long> {
}
static class PersonWithOverriddenGetter extends BaseEntity<Long> {
/**
* Overrides super implementation to ensure that the JavaBeans read method
* is of type {@link Long}, while leaving the type for the write method
* ({@link #setId}) set to {@link Number}.
*/
@Override
public Long getId() {
return super.getId();
}
}
static class PersonWithOverriddenSetter extends BaseEntity<Long> {
/**
* Overrides super implementation to ensure that the JavaBeans write method
* is of type {@link Long}, while leaving the type for the read method
* ({@link #getId()}) set to {@link Number}.
*/
@Override
public void setId(Long id) {
super.setId(id);
}
}
static class PersonWithOverloadedSetter extends BaseEntity<Long> {
// Intentionally chose Integer, since it's a subtype of Long and Number.
public void setId(Integer id) {
setId(id.longValue());
}
}
}
private static void assertReadAndWriteMethodsForClassAndId(Map<String, List<PropertyDescriptor>> pdMap,
Class<?> readType, Class<?> writeType) {
assertThat(pdMap).containsOnlyKeys("class", "id");
assertReadAndWriteMethodsForClass(pdMap.get("class"));
assertReadAndWriteMethodsForId(pdMap.get("id"), readType, writeType);
}
private static void assertReadAndWriteMethodsForClass(List<PropertyDescriptor> pds) {
assertThat(pds).hasSize(1);
var pd = pds.get(0);
assertThat(pd.getName()).isEqualTo("class");
var readMethod = pd.getReadMethod();
assertThat(readMethod.getName()).isEqualTo("getClass");
assertThat(readMethod.getReturnType()).as("read type").isEqualTo(Class.class);
assertThat(readMethod.getParameterCount()).isZero();
assertThat(pd.getWriteMethod()).as("write method").isNull();
}
private static void assertReadAndWriteMethodsForId(List<PropertyDescriptor> pds, Class<?> readType, Class<?> writeType) {
assertThat(pds).hasSize(1);
var pd = pds.get(0);
assertThat(pd.getName()).isEqualTo("id");
var readMethod = pd.getReadMethod();
var writeMethod = pd.getWriteMethod();
assertSoftly(softly -> {
if (readType == null) {
softly.assertThat(readMethod).as("readmethod").isNull();
}
else {
softly.assertThat(readMethod.getName()).isEqualTo("getId");
softly.assertThat(readMethod.getReturnType()).as("read type").isEqualTo(readType);
softly.assertThat(readMethod.getParameterCount()).isZero();
}
if (writeType == null) {
softly.assertThat(writeMethod).as("write method").isNull();
}
else {
softly.assertThat(writeMethod).as("write method").isNotNull();
if (writeMethod != null) {
softly.assertThat(writeMethod.getName()).isEqualTo("setId");
softly.assertThat(writeMethod.getReturnType()).isEqualTo(void.class);
softly.assertThat(writeMethod.getParameterCount()).isEqualTo(1);
softly.assertThat(writeMethod.getParameterTypes()[0]).as("write type").isEqualTo(writeType);
}
}
});
}
private static Map<String, List<PropertyDescriptor>> toMap(Stream<? extends PropertyDescriptor> stream) {
return stream.collect(groupingBy(PropertyDescriptor::getName, toList()));
}
private interface PropertiesResolver {
Map<String, List<PropertyDescriptor>> resolve(Class<?> beanClass);
}
private static class BasicPropertiesResolver implements PropertiesResolver {
@Override
public Map<String, List<PropertyDescriptor>> resolve(Class<?> beanClass) {
try {
var pds = PropertyDescriptorUtils.determineBasicProperties(beanClass);
return toMap(pds.stream());
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
private static class StandardPropertiesResolver implements PropertiesResolver {
@Override
public Map<String, List<PropertyDescriptor>> resolve(Class<?> beanClass) {
try {
var beanInfo = Introspector.getBeanInfo(beanClass);
return toMap(Arrays.stream(beanInfo.getPropertyDescriptors()));
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-beans/src/test/java/org/springframework/beans/PropertyDescriptorUtilsPropertyResolutionTests.java
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from blinker import Namespace
_signals = Namespace()
can_access = _signals.signal('can-access', """
Called when `ProtectionMixin.can_access` is used to determine if a
user can access something or not.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `user` and `allow_admin`
arguments of `can_access` are passed as kwargs with the same name.
The `authorized` argument is ``None`` when this signal is called at
the beginning of the access check and ``True`` or ``False`` at the end
when regular access rights have already been checked. For expensive
checks (such as anything involving database queries) it is recommended
to skip the check while `authorized` is ``None`` since the regular
access check is likely to be cheaper (due to ACLs being preloaded etc).
If the signal returns ``True`` or ``False``, the access check succeeds
or fails immediately. If multiple subscribers to the signal return
contradictory results, ``False`` wins and access is denied.
""")
can_manage = _signals.signal('can-manage', """
Called when `ProtectionMixin.can_manage` is used to determine if a
user can manage something or not.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `user`, `role`, `allow_admin`,
`check_parent` and `explicit_role` arguments of `can_manage` are
passed as kwargs with the same name.
If the signal returns ``True`` or ``False``, the access check succeeds
or fails without any further checks. If multiple subscribers to the
signal return contradictory results, ``False`` wins and access is
denied.
""")
entry_changed = _signals.signal('entry-changed', """
Called when an ACL entry is changed.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The `User`, `GroupProxy` or
`EmailPrincipal` is passed as `principal` and `entry` contains the
actual ACL entry (a `PrincipalMixin` instance) or ``None`` in case
the entry was deleted. `is_new` is a boolean indicating whether
the given principal was in the ACL before. If `quiet` is ``True``,
signal handlers should not perform noisy actions such as logging or
sending emails related to the change.
If the ACL uses roles, `old_data` will contain a dictionary of the
previous roles/permissions (see `PrincipalRolesMixin.current_data`).
""")
protection_changed = _signals.signal('protection-changed', """
Called when the protection mode of an object is changed.
The `sender` is the type of the object that's using the mixin. The
actual instance is passed as `obj`. The old protection mode is passed
as `old_mode`, the new mode as `mode`.
""")
get_management_roles = _signals.signal('get-management-roles', """
Expected to return `ManagementRole` subclasses. The `sender` is the
type of the object the roles may be used for. Functions subscribing
to this signal **MUST** check the sender by specifying it using the
first argument of `connect_via()` or by comparing it inside the
function.
""")
|
unknown
|
codeparrot/codeparrot-clean
| ||
from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPMeterStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
self.logger.info("=== start ===")
for i in range(3000):
band = parser.OFPMeterBandDrop(rate=10, burst_size=1)
meter_mod = self.create_meter_mod(datapath,
ofproto.OFPMC_ADD,
ofproto.OFPMF_PKTPS,
i,
[band])
datapath.send_msg(meter_mod)
self.logger.info("=== end ===")
req = parser.OFPMeterStatsRequest(datapath, 0, ofproto.OFPM_ALL)
datapath.send_msg(req)
@set_ev_cls(EventOFPMeterStatsReply, MAIN_DISPATCHER)
def meter_stats_reply_handler(self, ev):
meters = []
for stat in ev.msg.body:
meters.append('meter_id=0x%08x len=%d flow_count=%d '
'packet_in_count=%d byte_in_count=%d '
'duration_sec=%d duration_nsec=%d '
'band_stats=%s' %
(stat.meter_id, stat.len, stat.flow_count,
stat.packet_in_count, stat.byte_in_count,
stat.duration_sec, stat.duration_nsec,
stat.band_stats))
self.logger.info('MeterStats: %s', meters)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Item Loader
See documentation in docs/topics/loaders.rst
"""
from collections import defaultdict
import six
from scrapy.item import Item
from scrapy.selector import Selector
from scrapy.utils.decorators import deprecated
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.utils.misc import arg_to_iter, extract_regex
from scrapy.utils.python import flatten
from .common import wrap_loader_context
from .processors import Identity
class ItemLoader(object):
default_item_class = Item
default_input_processor = Identity()
default_output_processor = Identity()
default_selector_class = Selector
def __init__(self, item=None, selector=None, response=None, **context):
if selector is None and response is not None:
selector = self.default_selector_class(response)
self.selector = selector
context.update(selector=selector, response=response)
if item is None:
item = self.default_item_class()
self.item = context['item'] = item
self.context = context
self._values = defaultdict(list)
def add_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._add_value(k, v)
else:
self._add_value(field_name, value)
def replace_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._replace_value(k, v)
else:
self._replace_value(field_name, value)
def _add_value(self, field_name, value):
value = arg_to_iter(value)
processed_value = self._process_input_value(field_name, value)
if processed_value:
self._values[field_name] += arg_to_iter(processed_value)
def _replace_value(self, field_name, value):
self._values.pop(field_name, None)
self._add_value(field_name, value)
def get_value(self, value, *processors, **kw):
regex = kw.get('re', None)
if regex:
value = arg_to_iter(value)
value = flatten([extract_regex(regex, x) for x in value])
for proc in processors:
if value is None:
break
proc = wrap_loader_context(proc, self.context)
value = proc(value)
return value
def load_item(self):
item = self.item
for field_name in tuple(self._values):
value = self.get_output_value(field_name)
if value is not None:
item[field_name] = value
return item
def get_output_value(self, field_name):
proc = self.get_output_processor(field_name)
proc = wrap_loader_context(proc, self.context)
try:
return proc(self._values[field_name])
except Exception as e:
raise ValueError("Error with output processor: field=%r value=%r error='%s: %s'" % \
(field_name, self._values[field_name], type(e).__name__, str(e)))
def get_collected_values(self, field_name):
return self._values[field_name]
def get_input_processor(self, field_name):
proc = getattr(self, '%s_in' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'input_processor', \
self.default_input_processor)
return proc
def get_output_processor(self, field_name):
proc = getattr(self, '%s_out' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'output_processor', \
self.default_output_processor)
return proc
def _process_input_value(self, field_name, value):
proc = self.get_input_processor(field_name)
proc = wrap_loader_context(proc, self.context)
return proc(value)
def _get_item_field_attr(self, field_name, key, default=None):
if isinstance(self.item, Item):
value = self.item.fields[field_name].get(key, default)
else:
value = default
return value
def _check_selector_method(self):
if self.selector is None:
raise RuntimeError("To use XPath or CSS selectors, "
"%s must be instantiated with a selector "
"or a response" % self.__class__.__name__)
def add_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_xpath(self, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
return self.get_value(values, *processors, **kw)
@deprecated(use_instead='._get_xpathvalues()')
def _get_values(self, xpaths, **kw):
return self._get_xpathvalues(xpaths, **kw)
def _get_xpathvalues(self, xpaths, **kw):
self._check_selector_method()
xpaths = arg_to_iter(xpaths)
return flatten([self.selector.xpath(xpath).extract() for xpath in xpaths])
def add_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_css(self, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
return self.get_value(values, *processors, **kw)
def _get_cssvalues(self, csss, **kw):
self._check_selector_method()
csss = arg_to_iter(csss)
return flatten([self.selector.css(css).extract() for css in csss])
XPathItemLoader = create_deprecated_class('XPathItemLoader', ItemLoader)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
import pytest
from pandas import (
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
class TestSearchSorted:
def test_searchsorted_different_argument_classes(self, listlike_box):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
result = idx.searchsorted(listlike_box(idx))
expected = np.arange(len(idx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
result = idx._data.searchsorted(listlike_box(idx))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
)
def test_searchsorted_invalid_argument_dtype(self, arg):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got"
with pytest.raises(TypeError, match=msg):
idx.searchsorted(arg)
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/tests/indexes/timedeltas/test_searchsorted.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fontcacheproto(AutotoolsPackage):
"""X.org FontcacheProto protocol headers."""
homepage = "http://cgit.freedesktop.org/xorg/proto/fontcacheproto"
url = "https://www.x.org/archive/individual/proto/fontcacheproto-0.1.3.tar.gz"
version('0.1.3', '5a91ab914ffbfbc856e6fcde52e6f3e3')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""Status and configuration for SPIcam.
History:
2007-05-22 ROwen
2008-02-11 ROwen Modified to use new TUI.Inst.StatusConfigWdg.
2008-02-12 ROwen Bug fix: was using instName=Expose for the expose window.
2008-03-13 ROwen Simplified the test code (copying that for NICFPS).
2011-08-11 ROwen Modified to save state.
2014-02-03 ROwen Updated to use modernized TestData.
"""
import RO.Alg
import TUI.Inst.ExposeWdg
import TUI.Inst.StatusConfigWdg
import StatusConfigInputWdg
InstName = StatusConfigInputWdg.StatusConfigInputWdg.InstName
def addWindow(tlSet):
tlSet.createToplevel (
name = "None.%s Expose" % (InstName,),
defGeom = "+452+280",
resizable = False,
wdgFunc = RO.Alg.GenericCallback (
TUI.Inst.ExposeWdg.ExposeWdg,
instName = InstName,
),
visible = False,
)
tlSet.createToplevel (
name = "Inst.%s" % (InstName,),
defGeom = "+676+280",
resizable = False,
wdgFunc = StatusConfigWdg,
visible = False,
doSaveState = True,
)
class StatusConfigWdg(TUI.Inst.StatusConfigWdg.StatusConfigWdg):
def __init__(self, master):
TUI.Inst.StatusConfigWdg.StatusConfigWdg.__init__(self,
master = master,
statusConfigInputClass = StatusConfigInputWdg.StatusConfigInputWdg,
)
if __name__ == "__main__":
import RO.Wdg
import TestData
root = TestData.tuiModel.tkRoot
root.resizable(width=0, height=0)
tlSet = TestData.tuiModel.tlSet
addWindow(tlSet)
tlSet.makeVisible("Inst.%s" % (InstName,))
TestData.start()
root.mainloop()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import gc #<-- added 21 may 2009 by mm
from glob import glob
import numpy
from opus_core.logger import logger
from opus_core.store.storage import Storage
from opus_core.misc import is_file_in_directory
class file_flt_storage(Storage):
#there are weird issues with how this was ported over from the old storage classes
class storage_file(object):
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
def get_type(self):
extension_position = self._name.rfind('.')+1
byteorder_symbol = self._name[extension_position:extension_position+1]
type = self._map_extension_character_to_byteorder_symbol(byteorder_symbol)
if type is None:
return None
type = type + self._name[extension_position+1:]
return type.encode()
def get_short_name(self):
type = self.get_type()
if type is None:
return os.path.basename(self._name)
return os.path.basename(self._name)[:-len(type)-1]
@classmethod
def new_storage_file(cls, short_name, type, path):
byteorder_symbol = type[0:1]
byteorder_extension = cls._map_byteorder_symbol_to_extension_character(byteorder_symbol)
extension = byteorder_extension + type[1:]
name = '%s.%s' % (os.path.join(path, short_name), extension)
return cls(name)
@classmethod
def _map_byteorder_symbol_to_extension_character(cls, byteorder_character):
map = {
'<': 'l', # little-endian
'>': 'b', # big-endian
'|': 'i', # irrelevant
}
if array([1], dtype='<i4').dtype.byteorder == '=':
map['='] = map['<']
else:
map['='] = map['>']
return map[byteorder_character]
#_map_byteorder_symbol_to_extension_character = classmethod(_map_byteorder_symbol_to_extension_character)
def _map_extension_character_to_byteorder_symbol(self, extension_character):
try:
return {
'l': '<', # little-endian
'b': '>', # big-endian
'i': '|', # irrelevant
}[extension_character]
except:
return None
def _extension_for_numpy_type(self, dtype):
"""Returns the file extension for this numpy type."""
str = dtype.str
return self._map_byteorder_symbol_to_extension_character(str[0]) + str[1:]
def _get_native_endian_file_extension_character(self):
if array([1], dtype='<i4').dtype.byteorder == '=':
return self._map_byteorder_symbol_to_extension_character('<')
else:
return self._map_byteorder_symbol_to_extension_character('>')
def _write_to_file(self, directory, attribute_name, attribute_data):
"""Writes data to a file."""
extension = self._extension_for_numpy_type(attribute_data.dtype)
filename = '%s.%s' % (attribute_name, extension)
file_path = os.path.join(directory, filename)
f = file(file_path, mode="wb")
try:
try:
attribute_data.tofile(f)
except ValueError:
logger.log_error(
"Unable to write attribute '%s' to disk. The disk may be "
"full or the location write-protected. (%s)"
% (attribute_name, file_path))
raise
finally:
f.close()
def __init__(self, storage_location):
self._base_directory = storage_location
"""@classmethod
def _map_byteorder_symbol_to_extension_character(cls, byteorder_character):
map = {
'<': 'l', # little-endian
'>': 'b', # big-endian
'|': 'i', # irrelevant
}
if array([1], dtype='<i4').dtype.byteorder == '=':
map['='] = map['<']
else:
map['='] = map['>']
return map[byteorder_character]"""
def has_table(self, table):
return is_file_in_directory(table, self._get_base_directory())
def get_storage_location(self):
return self._base_directory
def load_table(self, table_name, column_names=Storage.ALL_COLUMNS, lowercase=True):
gc.collect() #<-- added 21 may 2009 by mm
files = self._get_files(table_name=table_name)
result = {}
for file in files:
if lowercase:
column_name = file.get_short_name().lower()
else:
column_name = file.get_short_name()
if column_names == Storage.ALL_COLUMNS or column_name in column_names:
result[column_name] = numpy.fromfile(file.get_name(), dtype=file.get_type())
return result
def get_column_names(self, table_name, lowercase=True):
gc.collect() #<-- added 21 may 2009 by mm
files = self._get_files(table_name)
result = [file.get_short_name() for file in files]
if lowercase:
result = [file.lower() for file in result]
return result
def _get_base_directory(self):
return self._base_directory
def get_table_names(self):
gc.collect() #<-- added 21 may 2009 by mm
dataset_path = self._get_base_directory()
if os.path.exists(dataset_path):
file_names = glob(os.path.join(dataset_path, '*'))
return [os.path.basename(name) for name in file_names
if os.path.isdir(name) and len(self.get_column_names(name))>0]
else:
raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def write_table(self, table_name, table_data, mode = Storage.OVERWRITE):
"""
'table_name' specifies the subdirectory relative to base directory.
'table_data' is a dictionary where keys are the column names and values
are value arrays of the corresponding columns.
"""
gc.collect() #<-- added 21 may 2009 by mm
dir = os.path.join(self._get_base_directory(), table_name)
if not os.path.exists(dir):
logger.log_status("%s doesn't exist and is created" % dir)
os.makedirs(dir)
unused_column_size, column_names = self._get_column_size_and_names(table_data)
for column_name in column_names:
col_type = table_data[column_name].dtype.str
column_file = self.storage_file.new_storage_file(column_name, col_type, dir)
existing_files_of_this_name = glob(os.path.join(dir, '%s.*' % column_name))
if (len (existing_files_of_this_name) > 1):
message = "Column '%s' has multiple files with different file extensions:\n" % column_name
for existing_file_name in existing_files_of_this_name:
message += existing_file_name + "\n"
message += "Either the process of copying files into this directory is flawed, or there is a bug in Opus."
raise FltError(message)
if mode == Storage.OVERWRITE:
for existing_file_name in existing_files_of_this_name:
os.remove(existing_file_name)
if mode == Storage.OVERWRITE or len(existing_files_of_this_name) == 0:
table_data[column_name].tofile(column_file.get_name())
# def _get_base_directory(self):
# return self._base_directory
def _get_files(self, table_name=''):
dataset_path = os.path.join(self._get_base_directory(), table_name)
if os.path.exists(dataset_path):
file_names = glob(os.path.join(dataset_path, '*.l*')) # filter here for only the lf4 files - exclude all others
return [self.storage_file(name) for name in file_names]
else:
raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def listdir_in_base_directory(self):
return os.listdir( self._get_base_directory() )
class FltError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
import sys
from opus_core.tests import opus_unittest
from opus_core.opus_package import OpusPackage
from opus_core.store.storage import TestStorageInterface
from opus_core.tests.utils.cache_extension_replacements import replacements
from numpy import array, fromfile, int32
from shutil import rmtree
from tempfile import mkdtemp
class StorageFileTests(opus_unittest.OpusTestCase):
def test_get_short_name(self):
storage_file = file_flt_storage.storage_file('path/to/table/test.li4')
self.assertEqual('<i4', storage_file.get_type())
self.assertEqual('test', storage_file.get_short_name())
storage_file = file_flt_storage.storage_file('path/to/table/foo.iS11')
self.assertEqual('|S11', storage_file.get_type())
self.assertEqual('foo', storage_file.get_short_name())
def test_new_storage_file(self):
storage_file = file_flt_storage.storage_file.new_storage_file('test', '<i4', 'path/to/table')
self.assertEqual('test', storage_file.get_short_name())
self.assertEqual('<i4', storage_file.get_type())
expected = os.path.join('path/to/table', 'test.li4')
self.assertEqual(expected, storage_file.get_name())
def test_get_type_from_unicode_filename(self):
storage_file = file_flt_storage.storage_file(u'path/to/table/test.li4')
self.assertEquals('<i4', storage_file.get_type())
self.assertEquals(type('<i4'), type(storage_file.get_type()))
class StorageTests(opus_unittest.OpusTestCase):
def setUp(self):
opus_core_path = OpusPackage().get_opus_core_path()
local_test_data_path = os.path.join(
opus_core_path, 'data', 'test_cache', '1980')
self.storage = file_flt_storage(local_test_data_path)
def test_get_files(self):
expected = ['city_id', 'city_name']
expected.sort()
actual = self.storage.get_column_names('cities')
actual.sort()
self.assertEqual(expected, actual)
def test_load_table(self):
expected = {
'city_id': array([3, 1, 2], dtype='<i4'),
'city_name': array(['Unknown', 'Eugene', 'Springfield']),
}
actual = self.storage.load_table('cities')
self.assertDictsEqual(expected, actual)
def test_get_table_names_1981(self):
opus_core_path = OpusPackage().get_opus_core_path()
local_test_data_path = os.path.join(
opus_core_path, 'data', 'test_cache', '1981')
storage = file_flt_storage(local_test_data_path)
expected = ['base_year', 'cities']
actual = storage.get_table_names()
expected.sort()
actual.sort()
self.assertEquals(expected, actual)
class StorageWriteTests(TestStorageInterface):
def setUp(self):
self.temp_dir = mkdtemp(prefix='opus_core_test_flt_storage')
self.storage = file_flt_storage(self.temp_dir)
self.table_name = 'testtable'
def tearDown(self):
if os.path.exists(self.temp_dir):
rmtree(self.temp_dir)
def test_write_char_array(self):
expected = array(['string1', 'string227'])
table_data = {
'char_column': expected,
}
file_name = os.path.join(self.temp_dir, self.table_name, 'char_column.iS9')
self.storage.write_table(self.table_name, table_data)
self.assert_(os.path.exists(file_name))
actual = numpy.fromfile(file_name, dtype='|S9')
self.assert_((expected==actual).all())
def test_write_int_array(self):
expected = array([100, 70])
table_data = {
'int_column': expected,
}
# file_name is e.g. 'int_column.li4' for a little-endian 32 bit machine
file_name = 'int_column.%(endian)si%(bytes)u' % replacements
# numpy_dtype is e.g. '<i4' for a little-endian 32 bit machine
numpy_dtype = '%(numpy_endian)si%(bytes)u' % replacements
file_path = os.path.join(self.temp_dir, self.table_name, file_name)
self.storage.write_table(self.table_name, table_data)
self.assert_(os.path.exists(file_path))
actual = numpy.fromfile(file_path, dtype=numpy_dtype)
self.assert_((expected==actual).all())
def test_write_float_and_boolean_array(self):
expected_float = array([100.17, 70.00])
expected_bool = array([True, False])
table_data = {
'float_column': expected_float,
'bool_column': expected_bool,
}
if sys.byteorder=='little':
file_name = 'float_column.lf8'
numpy_ext = '<f8'
else:
file_name = 'float_column.bf8'
numpy_ext = '>f8'
file_path = os.path.join(self.temp_dir, self.table_name, file_name)
self.storage.write_table(self.table_name, table_data)
self.assert_(os.path.exists(file_path))
actual = fromfile(file_path, numpy_ext)
self.assert_((expected_float == actual).all())
file_path = os.path.join(self.temp_dir, self.table_name, 'bool_column.ib1')
self.storage.write_table(self.table_name, table_data)
self.assert_(os.path.exists(file_path))
actual = fromfile(file_path, '|b1')
self.assert_((expected_bool == actual).all())
def test_writing_column_to_file_when_file_of_same_column_name_and_different_type_already_exists(self):
column_name= "some_column"
os.mkdir(os.path.join(self.temp_dir, self.table_name))
existing_file = file(os.path.join(self.temp_dir , self.table_name, column_name + ".li4"), "w")
existing_file.close()
storage = file_flt_storage(storage_location=self.temp_dir)
# Test writing
my_data = { column_name: array([9,99,999], dtype='<i8') }
storage.write_table(table_name=self.table_name, table_data=my_data)
self.assert_(not (os.path.exists(existing_file.name)))
self.assert_(os.path.exists(os.path.join(self.temp_dir, self.table_name, column_name + ".li8")))
def test_writing_column_to_file_when_two_files_of_same_column_name_and_different_type_already_exist(self):
column_name= "some_column"
os.mkdir(os.path.join(self.temp_dir, self.table_name))
existing_file_1 = file(os.path.join(self.temp_dir , self.table_name, column_name + ".li4"), "w")
existing_file_1.close()
existing_file_2 = file(os.path.join(self.temp_dir , self.table_name, column_name + ".bi4"), "w")
existing_file_2.close()
storage = file_flt_storage(storage_location=self.temp_dir)
# Test writing
my_data = { column_name: array([9,99,999], dtype='<i8') }
self.assertRaises(FltError, storage.write_table, self.table_name, my_data)
self.assert_(not (os.path.exists(os.path.join(self.temp_dir, self.table_name, column_name + ".li8"))))
if __name__ == '__main__':
opus_unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package addrs
import (
"fmt"
"testing"
)
func TestInstanceKeyString(t *testing.T) {
tests := []struct {
Key InstanceKey
Want string
}{
{
IntKey(0),
`[0]`,
},
{
IntKey(5),
`[5]`,
},
{
StringKey(""),
`[""]`,
},
{
StringKey("hi"),
`["hi"]`,
},
{
StringKey("0"),
`["0"]`, // intentionally distinct from IntKey(0)
},
{
// Quotes must be escaped
StringKey(`"`),
`["\""]`,
},
{
// Escape sequences must themselves be escaped
StringKey(`\r\n`),
`["\\r\\n"]`,
},
{
// Template interpolation sequences "${" must be escaped.
StringKey(`${hello}`),
`["$${hello}"]`,
},
{
// Template control sequences "%{" must be escaped.
StringKey(`%{ for something in something }%{ endfor }`),
`["%%{ for something in something }%%{ endfor }"]`,
},
{
// Dollar signs that aren't followed by { are not interpolation sequences
StringKey(`$hello`),
`["$hello"]`,
},
{
// Percent signs that aren't followed by { are not control sequences
StringKey(`%hello`),
`["%hello"]`,
},
}
for _, test := range tests {
testName := fmt.Sprintf("%#v", test.Key)
t.Run(testName, func(t *testing.T) {
got := test.Key.String()
want := test.Want
if got != want {
t.Errorf("wrong result\nreciever: %s\ngot: %s\nwant: %s", testName, got, want)
}
})
}
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/addrs/instance_key_test.go
|
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support bind()
use tokio::net::TcpListener;
use tokio_test::assert_ok;
use std::io::prelude::*;
use std::net::TcpStream;
use std::thread;
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `socket` on miri.
async fn echo_server() {
const N: usize = 1024;
let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(srv.local_addr());
let msg = "foo bar baz";
let t = thread::spawn(move || {
let mut s = assert_ok!(TcpStream::connect(addr));
let t2 = thread::spawn(move || {
let mut s = assert_ok!(TcpStream::connect(addr));
let mut b = vec![0; msg.len() * N];
assert_ok!(s.read_exact(&mut b));
b
});
let mut expected = Vec::<u8>::new();
for _i in 0..N {
expected.extend(msg.as_bytes());
let res = assert_ok!(s.write(msg.as_bytes()));
assert_eq!(res, msg.len());
}
(expected, t2)
});
let (mut a, _) = assert_ok!(srv.accept().await);
let (mut b, _) = assert_ok!(srv.accept().await);
let n = assert_ok!(tokio::io::copy(&mut a, &mut b).await);
let (expected, t2) = t.join().unwrap();
let actual = t2.join().unwrap();
assert!(expected == actual);
assert_eq!(n, msg.len() as u64 * 1024);
}
|
rust
|
github
|
https://github.com/tokio-rs/tokio
|
tokio/tests/buffered.rs
|
# --- import --------------------------------------------------------------------------------------
import os
import numpy as np
import WrightTools as wt
from . import _pulse
from ._scan import Scan
# --- define --------------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
# integration defaults
timestep = 4.0
early_buffer = 100.0
late_buffer = 400.0
# --- class ---------------------------------------------------------------------------------------
class Experiment:
"""Experiment."""
def __init__(self, axes, name, pm, pulse_class):
# basic attributes
self.axes = axes
for a in self.axes:
setattr(self, a.name, a)
self.name = name
self.pm = pm
self.npulses = len(pm)
self.timestep = timestep
self.early_buffer = early_buffer
self.late_buffer = late_buffer
# pulse
self.pulse_class = pulse_class
self.pulses = [self.pulse_class() for _ in self.pm]
def __repr__(self):
return '<WrightSim.Experiment object \'{0}\' at {1}>'.format(self.name, str(id(self)))
@property
def active_axes(self):
return [a for a in self.axes if a.active]
@property
def axis_names(self):
return [a.name for a in self.axes]
def run(self, hamiltonian, mp=True):
"""Run the experiment.
Parameters
----------
hamiltonian : WrightSim Hamiltonian
Hamiltonian.
mp : boolean (optional)
Toggle CPU multiprocessing. Default is True.
Returns
-------
WrightSim Scan
Scan that was run."""
out = Scan(self, hamiltonian)
out.run(mp=mp)
# finish
return out
def set_axis(self, axis_name, points):
'''
Activate and define points for one of the experimental axes.
Parameters
----------
axis_name : string
Name of axis.
points : 1D array-like
Points (in native units) to scan over.
'''
# TODO: is there a way to prevent incompatible axes being simultaniously activated?
axis_index = self.axis_names.index(axis_name)
axis = self.axes[axis_index]
axis.points = points
axis.active = True
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2024 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.config.yaml
import com.charleskorn.kaml.Yaml
import com.charleskorn.kaml.YamlMap
import kotlinx.serialization.decodeFromString
import java.io.File
/**
* Loads a configuration from the YAML file, if found.
* On JVM, loads a configuration from application resources, if exist; otherwise, reads a configuration from a file.
* On Native, always reads a configuration from a file.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.config.yaml.YamlConfig)
*/
@Suppress("ktlint:standard:function-naming")
public actual fun YamlConfig(path: String?): YamlConfig? {
val resolvedPath = when {
path == null -> DEFAULT_YAML_FILENAME
path.endsWith(".yaml") || path.endsWith(".yml") -> path
else -> return null
}
val resource = Thread.currentThread().contextClassLoader.getResource(resolvedPath)
if (resource != null) {
return resource.openStream().use {
configFromString(String(it.readBytes()))
}
}
val file = File(resolvedPath)
if (file.exists()) {
return configFromString(file.readText())
}
return null
}
private fun configFromString(content: String): YamlConfig {
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
return YamlConfig.from(yaml)
}
internal actual fun getSystemPropertyOrEnvironmentVariable(key: String): String? {
return System.getProperty(key) ?: System.getenv(key)
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-server/ktor-server-config-yaml/jvm/src/io/ktor/server/config/yaml/YamlConfigJvm.kt
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: '2.0'
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pytest
from diofant import (Derivative, E, I, O, PoleError, Rational, Symbol, acosh,
acoth, asin, asinh, atanh, besselk, cbrt, ceiling, cos,
cosh, cot, coth, exp, floor, limit, ln, log, pi, sign,
sin, sinh, sqrt, tan, tanh)
from diofant.abc import a, b, l, w, x, y, z
__all__ = ()
def test_simple_1():
assert x.nseries(x, n=5) == x
assert y.nseries(x, n=5) == y
assert (1/(x*y)).nseries(y, n=5) == 1/(x*y)
assert Rational(3, 4).nseries(x, n=5) == Rational(3, 4)
assert x.nseries(x) == x
def test_mul_0():
assert (x*ln(x)).nseries(x, n=5) == x*ln(x)
def test_mul_1():
assert (x*ln(2 + x)).nseries(x, n=4) == x*log(2) + x**2/2 - x**3/8 + \
x**4/24 + O(x**5)
assert (x*ln(1 + x)).nseries(x, n=4) == x**2 - x**3/2 + x**4/3 + O(x**5)
def test_pow_0():
assert (x**2).nseries(x, n=5) == x**2
assert (1/x).nseries(x, n=5) == 1/x
assert (1/x**2).nseries(x, n=5) == 1/x**2
assert (x**Rational(2, 3)).nseries(x, n=5) == (x**Rational(2, 3))
assert (sqrt(x)**3).nseries(x, n=5) == (sqrt(x)**3)
def test_pow_1():
assert ((1 + x)**2).nseries(x, n=5) == 1 + 2*x + x**2
def test_geometric_1():
assert (1/(1 - x)).nseries(x, n=5) == 1 + x + x**2 + x**3 + x**4 + O(x**5)
assert (x/(1 - x)).nseries(x) == x + x**2 + x**3 + x**4 + x**5 + \
x**6 + O(x**7)
assert (x**3/(1 - x)).nseries(x, n=5) == x**3 + x**4 + x**5 + x**6 + \
x**7 + O(x**8)
def test_sqrt_1():
assert sqrt(1 + x).nseries(x, n=5) == 1 + x/2 - x**2/8 + x**3/16 - 5*x**4/128 + O(x**5)
def test_exp_1():
assert exp(x).nseries(x, n=5) == 1 + x + x**2/2 + x**3/6 + x**4/24 + O(x**5)
assert exp(x).nseries(x, n=12) == 1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + \
x**6/720 + x**7/5040 + x**8/40320 + x**9/362880 + x**10/3628800 + \
x**11/39916800 + O(x**12)
assert exp(1/x).nseries(x, n=5) == exp(1/x)
assert exp(1/(1 + x)).nseries(x, n=4) == \
(E*(1 - x - 13*x**3/6 + 3*x**2/2)).expand() + O(x**4)
assert exp(2 + x).nseries(x, n=5) == \
(exp(2)*(1 + x + x**2/2 + x**3/6 + x**4/24)).expand() + O(x**5)
def test_exp_sqrt_1():
assert exp(1 + sqrt(x)).nseries(x, n=4) == \
E + E*x/2 + E*sqrt(x) + E*x**Rational(3, 2)/6 + O(x**2)
def test_power_x_x1():
assert (exp(x*ln(x))).nseries(x, n=4) == \
1 + x*log(x) + x**2*log(x)**2/2 + x**3*log(x)**3/6 + O(x**4*log(x)**4)
def test_power_x_x2():
assert (x**x).nseries(x, n=4) == \
1 + x*log(x) + x**2*log(x)**2/2 + x**3*log(x)**3/6 + O(x**4*log(x)**4)
def test_log_singular1():
assert log(1 + 1/x).nseries(x, n=5) == x - log(x) - x**2/2 + x**3/3 - \
x**4/4 + O(x**5)
def test_log_power1():
e = 1 / (1/x + x ** (log(3)/log(2)))
assert e.nseries(x, n=2) == x - x**(2 + log(3)/log(2)) + \
O(x**(3 + 2*log(3)/log(2)))
def test_log_series():
e = 1/(1 - log(x))
assert e.nseries(x, n=5, logx=l) == 1/(1 - l)
def test_log2():
e = log(-1/x)
assert e.nseries(x, n=5) == -log(x) + log(-1)
def test_log3():
e = 1/log(-1/x)
assert e.nseries(x, n=4, logx=l) == 1/(-l + log(-1))
def test_series1():
e = sin(x)
assert e.nseries(x, 0) != 0
assert e.nseries(x, 0) == O(x)
assert e.nseries(x, 1) == O(x)
assert e.nseries(x, 2) == x + O(x**3)
assert e.nseries(x, 3) == x + O(x**3)
assert e.nseries(x, 4) == x - x**3/6 + O(x**5)
e = (exp(x) - 1)/x
assert e.nseries(x, 4) == 1 + x/2 + x**2/6 + O(x**3)
assert x.nseries(x, 2) == x
@pytest.mark.xfail
def test_series1_failing():
assert x.nseries(x, 0) == O(1, x)
assert x.nseries(x, 1) == O(x)
def test_seriesbug1():
assert (1/x).nseries(x, 3) == 1/x
assert (x + 1/x).nseries(x, 3) == x + 1/x
def test_series2x():
assert ((x + 1)**(-2)).nseries(x, 4) == 1 - 2*x + 3*x**2 - 4*x**3 + O(x**4)
assert ((x + 1)**(-1)).nseries(x, 4) == 1 - x + x**2 - x**3 + O(x**4)
assert ((x + 1)**0).nseries(x, 3) == 1
assert ((x + 1)**1).nseries(x, 3) == 1 + x
assert ((x + 1)**2).nseries(x, 3) == 1 + 2*x + x**2
assert ((x + 1)**3).nseries(x, 3) == 1 + 3*x + 3*x**2 + O(x**3)
assert (1/(1 + x)).nseries(x, 4) == 1 - x + x**2 - x**3 + O(x**4)
assert (x + 3/(1 + 2*x)).nseries(x, 4) == 3 - 5*x + 12*x**2 - 24*x**3 + O(x**4)
assert ((1/x + 1)**3).nseries(x, 4) == 1 + x**(-3) + 3*x**(-2) + 3/x
assert (1/(1 + 1/x)).nseries(x, 3) == x - x**2 + x**3 - O(x**4)
assert (1/(1 + 1/x**2)).nseries(x, 2) == x**2 - x**4 + O(x**6)
def test_bug2(): # 1/log(0) * log(0) problem
e = (w**(-1) + w**(
-log(3)*log(2)**(-1)))**(-1)*(3*w**(-log(3)*log(2)**(-1)) + 2*w**(-1))
e = e.expand()
assert e.nseries(w, 4).subs({w: 0}) == 3
def test_exp():
e = (1 + x)**(1/x)
assert e.nseries(x, n=3) == exp(1) - x*exp(1)/2 + O(x**2)
def test_exp2():
e = w**(1 - log(x)/(log(2) + log(x)))
assert e.nseries(w, n=1) == e
def test_bug3():
e = (2/x + 3/x**2)/(1/x + 1/x**2)
assert e.nseries(x, n=3) == 3 - x + x**2 + O(x**3)
def test_generalexponent():
p = 2
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 3) == 3 - x + x**2 + O(x**3)
p = Rational(1, 2)
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 4) == 2 - x + sqrt(x) + x**Rational(3, 2) + O(x**2)
e = 1 + sqrt(x)
assert e.nseries(x, 4) == 1 + sqrt(x)
# more complicated example
def test_genexp_x():
e = 1/(1 + sqrt(x))
assert e.nseries(x, 4) == 1 + x - sqrt(x) - sqrt(x)**3 + O(x**2)
# more complicated example
def test_genexp_x2():
p = Rational(3, 2)
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 4) == 3 + x - sqrt(x) - x**p + O(x**2)
def test_seriesbug2():
# simple case (1):
e = ((2*w)/w)**(1 + w)
assert e.nseries(w, 1) == 2 + O(w)
assert e.nseries(w, 1).subs({w: 0}) == 2
def test_seriesbug2b():
# test sin
e = sin(2*w)/w
assert e.nseries(w, 3) == 2 + O(w**2)
def test_seriesbug2d():
w = Symbol('w', extended_real=True)
e = log(sin(2*w)/w)
assert e.series(w, n=5) == log(2) - 2*w**2/3 - 4*w**4/45 + O(w**5)
def test_seriesbug2c():
w = Symbol('w', extended_real=True)
# more complicated case, but sin(x)~x, so the result is the same as in (1)
e = (sin(2*w)/w)**(1 + w)
assert e.series(w, 0, 1) == 2 + O(w)
assert e.series(w, 0, 3) == 2 + 2*w*log(2) + \
w**2*(-Rational(4, 3) + log(2)**2) + O(w**3)
assert e.series(w, 0, 2).subs({w: 0}) == 2
def test_expbug4():
x = Symbol('x', extended_real=True)
assert (log(
sin(2*x)/x)*(1 + x)).series(x, 0, 2) == log(2) + x*log(2) + O(x**2)
assert exp(
log(sin(2*x)/x)*(1 + x)).series(x, 0, 2) == 2 + 2*x*log(2) + O(x**2)
assert exp(log(2) + O(x)).nseries(x, 1) == 2 + O(x)
assert ((2 + O(x))**(1 + x)).nseries(x, 1) == 2 + O(x)
def test_logbug4():
assert log(2 + O(x)).nseries(x, 1) == log(2) + O(x)
def test_expbug5():
assert exp(log(1 + x)/x).nseries(x, n=3) == exp(1) + -exp(1)*x/2 + O(x**2)
assert exp(O(x)).nseries(x, 1) == 1 + O(x)
def test_sinsinbug():
assert sin(sin(x)).nseries(x, 8) == x - x**3/3 + x**5/10 - 8*x**7/315 + O(x**9)
def test_sympyissue_3258():
a = x/(exp(x) - 1)
assert a.nseries(x) == 1 - x/2 - x**4/720 + x**2/12 + O(x**5)
def test_sympyissue_3204():
x = Symbol('x', nonnegative=True)
f = cbrt(sin(x**3))
assert f.nseries(x) == x - x**7/18 - x**13/3240 + O(x**19)
def test_sympyissue_3224():
f = sqrt(1 - sqrt(y))
assert f.nseries(y, 4) == 1 - sqrt(y)/2 - y/8 - sqrt(y)**3/16 + O(y**2)
def test_sympyissue_3463():
r = log(5)/log(3)
p = w**(-1 + r)
e = 1/x*(-log(w**(1 + r)) + log(w + w**r))
e_ser = -r*log(w)/x + p/x - p**2/(2*x) + O(p**3)
assert (e.nseries(w, n=3) - e_ser).removeO().simplify() == 0
def test_sin():
assert sin(8*x).nseries(x, n=4) == 8*x - 256*x**3/3 + O(x**5)
assert sin(x + y).nseries(x, n=1) == sin(y) + O(x)
assert sin(x + y).nseries(x, n=2) == sin(y) + cos(y)*x + O(x**2)
assert sin(x + y).nseries(x, n=5) == sin(y) + cos(y)*x - sin(y)*x**2/2 - \
cos(y)*x**3/6 + sin(y)*x**4/24 + O(x**5)
def test_sympyissue_3515():
e = sin(8*x)/x
assert e.nseries(x, n=6) == 8 - 256*x**2/3 + 4096*x**4/15 + O(x**6)
def test_sympyissue_3505():
e = sin(x)**(-4)*(sqrt(cos(x))*sin(x)**2 - cbrt(cos(x))*sin(x)**2)
assert e.nseries(x, n=8) == -Rational(1, 12) - 7*x**2/288 - \
43*x**4/10368 + O(x**6)
def test_sympyissue_3501():
e = x**(-2)*(x*sin(a + x) - x*sin(a))
assert e.nseries(x, n=5) == cos(a) - sin(a)*x/2 - cos(a)*x**2/6 + \
sin(a)*x**3/24 + O(x**4)
e = x**(-2)*(x*cos(a + x) - x*cos(a))
assert e.nseries(x, n=5) == -sin(a) - cos(a)*x/2 + sin(a)*x**2/6 + \
cos(a)*x**3/24 + O(x**4)
def test_sympyissue_3502():
e = sin(5*x)/sin(2*x)
assert e.nseries(x, n=2) == Rational(5, 2) + O(x**2)
assert e.nseries(x, n=6) == \
Rational(5, 2) - 35*x**2/4 + 329*x**4/48 + O(x**6)
def test_sympyissue_3503():
e = sin(2 + x)/(2 + x)
assert e.nseries(x, n=2) == sin(2)/2 + x*(-sin(2)/4 + cos(2)/2) + O(x**2)
def test_sympyissue_3506():
e = (x + sin(3*x))**(-2)*(x*(x + sin(3*x)) - (x + sin(3*x))*sin(2*x))
assert e.nseries(x) == -Rational(1, 4) + 5*x**2/96 + 91*x**4/768 + O(x**6)
def test_sympyissue_3508():
x = Symbol('x', extended_real=True)
assert log(sin(x)).series(x, n=5) == log(x) - x**2/6 - x**4/180 + O(x**5)
e = -log(x) + x*(-log(x) + log(sin(2*x))) + log(sin(2*x))
assert e.series(x, n=5) == \
log(2) + log(2)*x - 2*x**2/3 - 2*x**3/3 - 4*x**4/45 + O(x**5)
def test_sympyissue_3507():
e = x**(-4)*(x**2 - x**2*sqrt(cos(x)))
assert e.nseries(x, n=7) == \
Rational(1, 4) + x**2/96 + 19*x**4/5760 + O(x**6)
def test_sympyissue_3639():
assert sin(cos(x)).nseries(x, n=5) == \
sin(1) - x**2*cos(1)/2 + x**4*(-sin(1)/8 + cos(1)/24) + O(x**5)
def test_hyperbolic():
assert sinh(x).nseries(x, n=6) == x + x**3/6 + x**5/120 + O(x**7)
assert cosh(x).nseries(x, n=5) == 1 + x**2/2 + x**4/24 + O(x**6)
assert tanh(x).nseries(x, n=6) == x - x**3/3 + 2*x**5/15 + O(x**7)
assert coth(x).nseries(x, n=6) == \
1/x - x**3/45 + x/3 + 2*x**5/945 + O(x**7)
assert asinh(x).nseries(x, n=6) == x - x**3/6 + 3*x**5/40 + O(x**7)
assert acosh(x).nseries(x, n=6) == \
pi*I/2 - I*x - 3*I*x**5/40 - I*x**3/6 + O(x**7)
assert atanh(x).nseries(x, n=6) == x + x**3/3 + x**5/5 + O(x**7)
assert acoth(x).nseries(x, n=6) == x + x**3/3 + x**5/5 + pi*I/2 + O(x**7)
def test_series2():
w = Symbol('w', extended_real=True)
x = Symbol('x', extended_real=True)
e = w**(-2)*(w*exp(1/x - w) - w*exp(1/x))
assert e.nseries(w, n=3) == -exp(1/x) + w*exp(1/x)/2 + O(w**2)
def test_series3():
w = Symbol('w', extended_real=True)
e = w**(-6)*(w**3*tan(w) - w**3*sin(w))
assert e.nseries(w, n=5) == Rational(1, 2) + O(w**2)
def test_bug4():
e = x/(w**4 + x**2*w**4 + 2*x*w**4)*w**4
assert e.nseries(w, n=2).simplify() in [x/(1 + 2*x + x**2),
1/(1 + x/2 + 1/x/2)/2, 1/x/(1 + 2/x + x**(-2))]
def test_bug5():
e = (-log(w) + log(1 + w*log(x)))**(-2)*w**(-2)*((-log(w) +
log(1 + x*w))*(-log(w) + log(1 + w*log(x)))*w - x*(-log(w) +
log(1 + w*log(x)))*w)
assert e.nseries(w, n=1, logx=l) == (x/l + 1)/w + O(1, w)
assert e.nseries(w, n=2, logx=l) == x*log(x)/l**2 + log(x)/l - \
x/l + (1 + x/l)/w + O(w)
def test_sympyissue_4115():
assert (sin(x)/(1 - cos(x))).nseries(x, n=4) == 2/x + O(x)
assert (sin(x)**2/(1 - cos(x))).nseries(x, n=2) == 2 + O(x**2)
def test_pole():
pytest.raises(PoleError, lambda: sin(1/x).series(x, 0, 5))
pytest.raises(PoleError, lambda: sin(1 + 1/x).series(x, 0, 5))
pytest.raises(PoleError, lambda: (x*sin(1/x)).series(x, 0, 5))
pytest.raises(PoleError, lambda: besselk(0, x).series(x, 0, 2))
def test_expsinbug():
assert exp(sin(x)).series(x, 0, 0) == O(1, x)
assert exp(sin(x)).series(x, 0, 1) == 1 + O(x)
assert exp(sin(x)).series(x, 0, 2) == 1 + x + O(x**2)
assert exp(sin(x)).series(x, 0, 3) == 1 + x + x**2/2 + O(x**3)
assert exp(sin(x)).series(x, 0, 4) == 1 + x + x**2/2 + O(x**4)
assert exp(sin(x)).series(x, 0, 5) == 1 + x + x**2/2 - x**4/8 + O(x**5)
def test_floor():
x = Symbol('x')
assert floor(x).series(x) == 0
assert floor(-x).series(x) == -1
assert floor(sin(x)).series(x) == 0
assert floor(sin(-x)).series(x) == -1
assert floor(x**3).series(x) == 0
assert floor(-x**3).series(x) == -1
assert floor(cos(x)).series(x) == 0
assert floor(cos(-x)).series(x) == 0
assert floor(5 + sin(x)).series(x) == 5
assert floor(5 + sin(-x)).series(x) == 4
assert floor(x).series(x, 2) == 2
assert floor(-x).series(x, 2) == -3
x = Symbol('x', negative=True)
assert floor(x + 1.5).series(x) == 1
def test_ceiling():
assert ceiling(x).series(x) == 1
assert ceiling(-x).series(x) == 0
assert ceiling(sin(x)).series(x) == 1
assert ceiling(sin(-x)).series(x) == 0
assert ceiling(1 - cos(x)).series(x) == 1
assert ceiling(1 - cos(-x)).series(x) == 1
assert ceiling(x).series(x, 2) == 3
assert ceiling(-x).series(x, 2) == -2
def test_abs():
assert abs(x).nseries(x, n=4) == x
assert abs(-x).nseries(x, n=4) == x
assert abs(x + 1).nseries(x, n=4) == x + 1
assert abs(sin(x)).nseries(x, n=4) == x - Rational(1, 6)*x**3 + O(x**5)
assert abs(sin(-x)).nseries(x, n=4) == x - Rational(1, 6)*x**3 + O(x**5)
assert abs(x - a).series(x, 1) == (x - a)*sign(1 - a)
def test_dir():
assert abs(x).series(x, 0, dir='+') == x
assert abs(x).series(x, 0, dir='-') == -x
assert floor(x + 2).series(x, 0, dir='+') == 2
assert floor(x + 2).series(x, 0, dir='-') == 1
assert floor(x + 2.2).series(x, 0, dir='-') == 2
assert ceiling(x + 2.2).series(x, 0, dir='-') == 3
assert sin(x + y).series(x, 0, dir='-') == sin(x + y).series(x, 0, dir='+')
def test_sympyissue_3504():
e = asin(a*x)/x
assert e.series(x, 4, n=2).removeO() == \
(x - 4)*(a/(4*sqrt(-16*a**2 + 1)) - asin(4*a)/16) + asin(4*a)/4
def test_sympyissue_4441():
f = 1/(1 + a*x)
assert f.series(x, 0, 5) == 1 - a*x + a**2*x**2 - a**3*x**3 + \
a**4*x**4 + O(x**5)
f = 1/(1 + (a + b)*x)
assert f.series(x, 0, 3) == 1 + x*(-a - b) + x**2*(a**2 + 2*a*b + b**2) + O(x**3)
def test_sympyissue_4329():
assert tan(x).series(x, pi/2, n=3).removeO() == \
-pi/6 + x/3 - 1/(x - pi/2)
assert cot(x).series(x, pi, n=3).removeO() == \
-x/3 + pi/3 + 1/(x - pi)
assert limit(tan(x)**tan(2*x), x, pi/4) == exp(-1)
def test_sympyissue_5183():
assert abs(x + x**2).series(n=1) == O(x)
assert abs(x + x**2).series(n=2) == x + O(x**2)
assert ((1 + x)**2).series(x, n=6) == 1 + 2*x + x**2
assert (1 + 1/x).series() == 1 + 1/x
assert Derivative(exp(x).series(), x).doit() == \
1 + x + x**2/2 + x**3/6 + x**4/24 + Derivative(O(x**6), x)
def test_sympyissue_5654():
assert (1/(x**2+a**2)**2).series(x, x0=I*a, n=0) == \
-I/(4*a**3*(-I*a + x)) - 1/(4*a**2*(-I*a + x)**2) + O(1, (x, I*a))
assert (1/(x**2+a**2)**2).series(x, x0=I*a, n=1) == 3/(16*a**4) \
- I/(4*a**3*(-I*a + x)) - 1/(4*a**2*(-I*a + x)**2) + O(-I*a + x, (x, I*a))
def test_sympyissue_5925():
sx = sqrt(x + z).series(z, 0, 1)
sxy = sqrt(x + y + z).series(z, 0, 1)
s1, s2 = sx.subs({x: x + y}), sxy
assert (s1 - s2).expand().removeO().simplify() == 0
sx = sqrt(x + z).series(z, 0, 1)
sxy = sqrt(x + y + z).series(z, 0, 1)
assert sxy.subs({x: 1, y: 2}) == sx.subs({x: 3})
def test_sympyissues_6235_6236():
q = Symbol('q', positive=True)
assert (((x - 1)**q + 1)/(x**q - 1)).nseries(x, n=2).removeO() == \
(-1 - x**q + (-1)**(q + 1) + (-1)**(q + 1)*x**q +
(-1)**q*q*x**(q + 1) + (-1)**q*q*x)
assert (((x - 1)**q)/(x**q - 1)).nseries(x, n=2).removeO() == \
(-1)**(q + 1) + (-1)**(q + 1)*x**q + (-1)**q*q*x**(q + 1) + (-1)**q*q*x
def test_diofantissue_210():
assert cos(x**6).nseries(x, n=2) == 1 + O(x**12)
assert cos(x**6).nseries(x, n=3) == 1 - x**12/2 + O(x**24)
assert cos(x**6).nseries(x, n=4) == 1 - x**12/2 + O(x**24)
assert cos(x**6).nseries(x, n=5) == 1 - x**12/2 + x**24/24 + O(x**36)
# issue sympy/sympy#10503
f = exp(x**3)*cos(x**6)
assert f.series(x, n=14) == (1 + x**3 + x**6/2 +
x**9/6 - 11*x**12/24 + O(x**14))
assert f.series(x, n=15) == (1 + x**3 + x**6/2 +
x**9/6 - 11*x**12/24 + O(x**15))
assert f.series(x, n=16) == (1 + x**3 + x**6/2 + x**9/6 - 11*x**12/24 -
59*x**15/120 + O(x**16))
|
unknown
|
codeparrot/codeparrot-clean
| ||
try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import pytest
import argparse
import inspect
import collections
import os
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(self, parser, namespace,
values, option_string=None):
callback_(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(
config.slaveinput["follower_ident"]
)
if config.option.write_idents:
with open(config.option.write_idents, "a") as file_:
file_.write(config.slaveinput["follower_ident"] + "\n")
else:
if config.option.write_idents and \
os.path.exists(config.option.write_idents):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(bool(getattr(config.option,
"cov_source", False)))
plugin_base.set_skip_test(pytest.skip.Exception)
def pytest_sessionstart(session):
plugin_base.post_begin()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
items[:] = [
item for item in
items if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module):
if sub_cls is not test_class.cls:
list_ = rebuilt_items[test_class.cls]
for inst in pytest.Class(
sub_cls.__name__,
parent=test_class.parent.parent).collect():
list_.extend(inst.collect())
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls])
rebuilt_items[item.parent.cls][:] = []
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(newitems, key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name
))
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(obj):
return pytest.Class(name, parent=collector)
elif inspect.isfunction(obj) and \
isinstance(collector, pytest.Instance) and \
plugin_base.want_method(collector.cls, obj):
return pytest.Function(name, parent=collector)
else:
return []
_current_class = None
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(item, item.parent.module.__name__,
item.parent.cls, item.name)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import {useRef} from 'react';
import {addOne} from 'shared-runtime';
function useKeyCommand() {
const currentPosition = useRef(0);
const handleKey = direction => () => {
const position = currentPosition.current;
const nextPosition = direction === 'left' ? addOne(position) : position;
currentPosition.current = nextPosition;
};
const moveLeft = {
handler: handleKey('left')(),
};
const moveRight = {
handler: handleKey('right')(),
};
return [moveLeft, moveRight];
}
export const FIXTURE_ENTRYPOINT = {
fn: useKeyCommand,
params: [],
};
|
typescript
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.capture-ref-for-mutation.tsx
|
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.marketplace import models as marketplace_models
from .. import tasks
from . import fixtures
@freeze_time('2020-02-01')
class TaskTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.BookingFixture()
self.fixture.order_item.state = marketplace_models.OrderItem.States.EXECUTING
self.fixture.order_item.save()
def test_reject_past_booking(self):
self.fixture.resource.attributes['schedules'] = [
{
'start': '2020-01-01T02:00:00+03:00',
'end': '2020-01-15T02:00:00+03:00',
'id': '1',
},
{
'start': '2020-01-16T02:00:00+03:00',
'end': '2020-01-17T02:00:00+03:00',
'id': '2',
},
]
self.fixture.resource.save()
tasks.reject_past_bookings()
self.fixture.resource.refresh_from_db()
self.assertEqual(
self.fixture.resource.state, marketplace_models.Resource.States.TERMINATED
)
def test_do_not_reject_actual_booking(self):
self.fixture.resource.attributes['schedules'] = [
{
'start': '2020-01-01T02:00:00+03:00',
'end': '2020-01-15T02:00:00+03:00',
'id': '1',
},
{
'start': '2020-03-01T02:00:00+03:00',
'end': '2020-03-15T02:00:00+03:00',
'id': '2',
},
]
self.fixture.resource.save()
tasks.reject_past_bookings()
self.fixture.resource.refresh_from_db()
self.assertEqual(
self.fixture.resource.state, marketplace_models.Resource.States.CREATING
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: core.py 65806 2008-08-18 11:13:45Z marc-andre.lemburg $"
import sys, os
from types import *
from distutils.debug import DEBUG
from distutils.errors import *
from distutils.util import grok_environment_error
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.config import PyPIRCCommand
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if 'script_name' not in attrs:
attrs['script_name'] = os.path.basename(sys.argv[0])
if 'script_args' not in attrs:
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError, msg:
if 'name' in attrs:
raise SystemExit, "error in %s setup command: %s" % \
(attrs['name'], msg)
else:
raise SystemExit, "error in setup command: %s" % msg
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print "options (after parsing config files):"
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line; any command-line errors are the end user's
# fault, so turn them into SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError, msg:
raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg
if DEBUG:
print "options (after parsing command line):"
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit, error
except (DistutilsError,
CCompilerError), msg:
if DEBUG:
raise
else:
raise SystemExit, "error: " + str(msg)
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be run with 'execfile()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,)
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {'__file__': script_name}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
exec open(script_name, 'r').read() in g, l
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError, \
("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const { getEntryRuntime, mergeRuntimeOwned } = require("./util/runtime");
/** @typedef {import("./Compiler")} Compiler */
/** @typedef {import("./Module").FactoryMeta} FactoryMeta */
/** @typedef {import("./util/runtime").RuntimeSpec} RuntimeSpec */
const PLUGIN_NAME = "FlagAllModulesAsUsedPlugin";
class FlagAllModulesAsUsedPlugin {
/**
* @param {string} explanation explanation
*/
constructor(explanation) {
this.explanation = explanation;
}
/**
* Apply the plugin
* @param {Compiler} compiler the compiler instance
* @returns {void}
*/
apply(compiler) {
compiler.hooks.compilation.tap(PLUGIN_NAME, (compilation) => {
const moduleGraph = compilation.moduleGraph;
compilation.hooks.optimizeDependencies.tap(PLUGIN_NAME, (modules) => {
/** @type {RuntimeSpec} */
let runtime;
for (const [name, { options }] of compilation.entries) {
runtime = mergeRuntimeOwned(
runtime,
getEntryRuntime(compilation, name, options)
);
}
for (const module of modules) {
const exportsInfo = moduleGraph.getExportsInfo(module);
exportsInfo.setUsedInUnknownWay(runtime);
moduleGraph.addExtraReason(module, this.explanation);
if (module.factoryMeta === undefined) {
module.factoryMeta = {};
}
/** @type {FactoryMeta} */
(module.factoryMeta).sideEffectFree = false;
}
});
});
}
}
module.exports = FlagAllModulesAsUsedPlugin;
|
javascript
|
github
|
https://github.com/webpack/webpack
|
lib/FlagAllModulesAsUsedPlugin.js
|
// run
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test closures in if conditions.
package main
func main() {
if func() bool { return true }() {} // gc used to say this was a syntax error
if (func() bool { return true })() {}
if (func() bool { return true }()) {}
}
|
go
|
github
|
https://github.com/golang/go
|
test/func6.go
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the implementation for the DirectoryWatcher class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.impl import io_wrapper
class DirectoryWatcher(object):
"""A DirectoryWatcher wraps a loader to load from a sequence of paths.
A loader reads a path and produces some kind of values as an iterator. A
DirectoryWatcher takes a directory, a factory for loaders, and optionally a
path filter and watches all the paths inside that directory.
This class is only valid under the assumption that only one path will be
written to by the data source at a time and that once the source stops writing
to a path, it will start writing to a new path that's lexicographically
greater and never come back. It uses some heuristics to check whether this is
true based on tracking changes to the files' sizes, but the check can have
false negatives. However, it should have no false positives.
"""
def __init__(self, directory, loader_factory, path_filter=lambda x: True):
"""Constructs a new DirectoryWatcher.
Args:
directory: The directory to load files from.
loader_factory: A factory for creating loaders. The factory should take a
path and return an object that has a Load method returning an
iterator that will yield all events that have not been yielded yet.
path_filter: If specified, only paths matching this filter are loaded.
Raises:
ValueError: If path_provider or loader_factory are None.
"""
if directory is None:
raise ValueError('A directory is required')
if loader_factory is None:
raise ValueError('A loader factory is required')
self._directory = directory
self._path = None
self._loader_factory = loader_factory
self._loader = None
self._path_filter = path_filter
self._ooo_writes_detected = False
# The file size for each file at the time it was finalized.
self._finalized_sizes = {}
def Load(self):
"""Loads new values.
The watcher will load from one path at a time; as soon as that path stops
yielding events, it will move on to the next path. We assume that old paths
are never modified after a newer path has been written. As a result, Load()
can be called multiple times in a row without losing events that have not
been yielded yet. In other words, we guarantee that every event will be
yielded exactly once.
Yields:
All values that have not been yielded yet.
Raises:
DirectoryDeletedError: If the directory has been permanently deleted
(as opposed to being temporarily unavailable).
"""
try:
for event in self._LoadInternal():
yield event
except errors.OpError:
if not gfile.Exists(self._directory):
raise DirectoryDeletedError(
'Directory %s has been permanently deleted' % self._directory)
def _LoadInternal(self):
"""Internal implementation of Load().
The only difference between this and Load() is that the latter will throw
DirectoryDeletedError on I/O errors if it thinks that the directory has been
permanently deleted.
Yields:
All values that have not been yielded yet.
"""
# If the loader exists, check it for a value.
if not self._loader:
self._InitializeLoader()
while True:
# Yield all the new events in the path we're currently loading from.
for event in self._loader.Load():
yield event
next_path = self._GetNextPath()
if not next_path:
logging.info('No path found after %s', self._path)
# Current path is empty and there are no new paths, so we're done.
return
# There's a new path, so check to make sure there weren't any events
# written between when we finished reading the current path and when we
# checked for the new one. The sequence of events might look something
# like this:
#
# 1. Event #1 written to path #1.
# 2. We check for events and yield event #1 from path #1
# 3. We check for events and see that there are no more events in path #1.
# 4. Event #2 is written to path #1.
# 5. Event #3 is written to path #2.
# 6. We check for a new path and see that path #2 exists.
#
# Without this loop, we would miss event #2. We're also guaranteed by the
# loader contract that no more events will be written to path #1 after
# events start being written to path #2, so we don't have to worry about
# that.
for event in self._loader.Load():
yield event
logging.info('Directory watcher advancing from %s to %s', self._path,
next_path)
# Advance to the next path and start over.
self._SetPath(next_path)
# The number of paths before the current one to check for out of order writes.
_OOO_WRITE_CHECK_COUNT = 20
def OutOfOrderWritesDetected(self):
"""Returns whether any out-of-order writes have been detected.
Out-of-order writes are only checked as part of the Load() iterator. Once an
out-of-order write is detected, this function will always return true.
Note that out-of-order write detection is not performed on GCS paths, so
this function will always return false.
Returns:
Whether any out-of-order write has ever been detected by this watcher.
"""
return self._ooo_writes_detected
def _InitializeLoader(self):
path = self._GetNextPath()
if path:
self._SetPath(path)
else:
raise StopIteration
def _SetPath(self, path):
"""Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch.
"""
old_path = self._path
if old_path and not io_wrapper.IsGCSPath(old_path):
try:
# We're done with the path, so store its size.
size = gfile.Stat(old_path).length
logging.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except errors.OpError as e:
logging.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path)
def _GetNextPath(self):
"""Gets the next path to load from.
This function also does the checking for out-of-order writes as it iterates
through the paths.
Returns:
The next path to load events from, or None if there are no more paths.
"""
paths = sorted(path
for path in io_wrapper.ListDirectoryAbsolute(self._directory)
if self._path_filter(path))
if not paths:
return None
if self._path is None:
return paths[0]
# Don't bother checking if the paths are GCS (which we can't check) or if
# we've already detected an OOO write.
if not io_wrapper.IsGCSPath(paths[0]) and not self._ooo_writes_detected:
# Check the previous _OOO_WRITE_CHECK_COUNT paths for out of order writes.
current_path_index = bisect.bisect_left(paths, self._path)
ooo_check_start = max(0, current_path_index - self._OOO_WRITE_CHECK_COUNT)
for path in paths[ooo_check_start:current_path_index]:
if self._HasOOOWrite(path):
self._ooo_writes_detected = True
break
next_paths = list(path
for path in paths
if self._path is None or path > self._path)
if next_paths:
return min(next_paths)
else:
return None
def _HasOOOWrite(self, path):
"""Returns whether the path has had an out-of-order write."""
# Check the sizes of each path before the current one.
size = gfile.Stat(path).length
old_size = self._finalized_sizes.get(path, None)
if size != old_size:
if old_size is None:
logging.error('File %s created after file %s even though it\'s '
'lexicographically earlier', path, self._path)
else:
logging.error('File %s updated even though the current file is %s',
path, self._path)
return True
else:
return False
class DirectoryDeletedError(Exception):
"""Thrown by Load() when the directory is *permanently* gone.
We distinguish this from temporary errors so that other code can decide to
drop all of our data only when a directory has been intentionally deleted,
as opposed to due to transient filesystem errors.
"""
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
# (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: patch
author:
- "Jakub Jirutka (@jirutka)"
- "Luis Alberto Perez Lazaro (@luisperlaz)"
version_added: 1.9
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool.
options:
basedir:
description:
- Path of a base directory in which the patch file will be applied.
May be ommitted when C(dest) option is specified, otherwise required.
required: false
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
required: false
aliases: [ "originalfile" ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's "files" directory.
required: true
aliases: [ "patchfile" ]
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the src. Default is C(no).
choices: [ "yes", "no" ]
required: false
default: "no"
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
For more information see the strip parameter of the GNU patch tool.
required: false
type: "int"
default: "0"
backup:
version_added: "2.0"
description:
- passes --backup --version-control=numbered to patch,
producing numbered backup copies
choices: [ 'yes', 'no' ]
default: 'no'
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF. Line endings of src and dest must match. If set to
C(no), patch will replace CRLF in src files on POSIX.
required: false
type: "bool"
default: "no"
note:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = '''
- name: apply patch to one file
patch: >
src=/tmp/index.html.patch
dest=/var/www/index.html
- name: apply patch to multiple files under basedir
patch: >
src=/tmp/customize.patch
basedir=/var/www
strip=1
'''
import os
from os import path, R_OK, W_OK
class PatchError(Exception):
pass
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0):
opts = ['--quiet', '--reverse', '--forward', '--dry-run',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
opts.append('--dry-run')
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec={
'src': {'required': True, 'aliases': ['patchfile']},
'dest': {'aliases': ['originalfile']},
'basedir': {},
'strip': {'default': 0, 'type': 'int'},
'remote_src': {'default': False, 'type': 'bool'},
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
'backup': {'default': False, 'type': 'bool'},
'binary': {'default': False, 'type': 'bool'},
},
required_one_of=[['dest', 'basedir']],
supports_check_mode=True
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
p.src = os.path.expanduser(p.src)
if not os.access(p.src, R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
try:
apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup )
changed = True
except PatchError, e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
namespace Illuminate\Foundation\Console;
use Illuminate\Console\GeneratorCommand;
use Symfony\Component\Console\Attribute\AsCommand;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
use function Laravel\Prompts\select;
#[AsCommand(name: 'make:enum')]
class EnumMakeCommand extends GeneratorCommand
{
/**
* The console command name.
*
* @var string
*/
protected $name = 'make:enum';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Create a new enum';
/**
* The type of class being generated.
*
* @var string
*/
protected $type = 'Enum';
/**
* Get the stub file for the generator.
*
* @return string
*/
protected function getStub()
{
if ($this->option('string') || $this->option('int')) {
return $this->resolveStubPath('/stubs/enum.backed.stub');
}
return $this->resolveStubPath('/stubs/enum.stub');
}
/**
* Resolve the fully-qualified path to the stub.
*
* @param string $stub
* @return string
*/
protected function resolveStubPath($stub)
{
return file_exists($customPath = $this->laravel->basePath(trim($stub, '/')))
? $customPath
: __DIR__.$stub;
}
/**
* Get the default namespace for the class.
*
* @param string $rootNamespace
* @return string
*/
protected function getDefaultNamespace($rootNamespace)
{
return match (true) {
is_dir(app_path('Enums')) => $rootNamespace.'\\Enums',
is_dir(app_path('Enumerations')) => $rootNamespace.'\\Enumerations',
default => $rootNamespace,
};
}
/**
* Build the class with the given name.
*
* @param string $name
* @return string
*
* @throws \Illuminate\Contracts\Filesystem\FileNotFoundException
*/
protected function buildClass($name)
{
if ($this->option('string') || $this->option('int')) {
return str_replace(
['{{ type }}'],
$this->option('string') ? 'string' : 'int',
parent::buildClass($name)
);
}
return parent::buildClass($name);
}
/**
* Interact further with the user if they were prompted for missing arguments.
*
* @param \Symfony\Component\Console\Input\InputInterface $input
* @param \Symfony\Component\Console\Output\OutputInterface $output
* @return void
*/
protected function afterPromptingForMissingArguments(InputInterface $input, OutputInterface $output)
{
if ($this->didReceiveOptions($input)) {
return;
}
$type = select('Which type of enum would you like?', [
'pure' => 'Pure enum',
'string' => 'Backed enum (String)',
'int' => 'Backed enum (Integer)',
]);
if ($type !== 'pure') {
$input->setOption($type, true);
}
}
/**
* Get the console command arguments.
*
* @return array
*/
protected function getOptions()
{
return [
['string', 's', InputOption::VALUE_NONE, 'Generate a string backed enum.'],
['int', 'i', InputOption::VALUE_NONE, 'Generate an integer backed enum.'],
['force', 'f', InputOption::VALUE_NONE, 'Create the enum even if the enum already exists'],
];
}
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Foundation/Console/EnumMakeCommand.php
|
import ast
from sys import version_info
from pyflakes import messages as m, checker
from pyflakes.test.harness import TestCase, skipIf, skip
class Test(TestCase):
def test_undefined(self):
self.flakes('bar', m.UndefinedName)
def test_definedInListComp(self):
self.flakes('[a for a in range(10) if a]')
@skipIf(version_info < (3,),
'in Python 2 list comprehensions execute in the same scope')
def test_undefinedInListComp(self):
self.flakes('''
[a for a in range(10)]
a
''',
m.UndefinedName)
@skipIf(version_info < (3,),
'in Python 2 exception names stay bound after the except: block')
def test_undefinedExceptionName(self):
"""Exception names can't be used after the except: block.
The exc variable is unused inside the exception handler."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''', m.UndefinedName, m.UnusedVariable)
def test_namesDeclaredInExceptBlocks(self):
"""Locals declared in except: blocks can be used after the block.
This shows the example in test_undefinedExceptionName is
different."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
e = exc
e
''')
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringLocalVariable(self):
"""Exception names obscure locals, can't be used after.
Last line will raise UnboundLocalError on Python 3 after exiting
the except: block. Note next two examples for false positives to
watch out for."""
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''',
m.UndefinedName)
@skipIf(version_info < (3,),
'in Python 2 exception names stay bound after the except: block')
def test_undefinedExceptionNameObscuringLocalVariable2(self):
"""Exception names are unbound after the `except:` block.
Last line will raise UnboundLocalError on Python 3 but would print out
've' on Python 2. The exc variable is unused inside the exception
handler."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
print(exc)
exc = 'Original value'
''', m.UndefinedName, m.UnusedVariable)
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive1(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because it's only
entered if no exception was raised."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', *expected)
def test_delExceptionInExcept(self):
"""The exception name can be deleted in the except: block."""
self.flakes('''
try:
pass
except Exception as exc:
del exc
''')
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive2(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because `error` is
only falsy if the `except:` block has not been entered."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', *expected)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable(self):
"""Exception names obscure globals, can't be used after.
Last line will raise UnboundLocalError on both Python 2 and
Python 3 because the existence of that exception name creates
a local scope placeholder for it, obscuring any globals, etc."""
self.flakes('''
exc = 'Original value'
def func():
try:
pass # nothing is raised
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable2(self):
"""Exception names obscure globals, can't be used after.
Last line will raise NameError on Python 3 because the name is
locally unbound after the `except:` block, even if it's
nonlocal. We should issue an error in this case because code
only working correctly if an exception isn't raised, is invalid.
Unless it's explicitly silenced, see false positives below."""
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive1(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because it's only entered
if no exception was raised."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', *expected)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive2(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because `error` is only
falsy if the `except:` block has not been entered."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
def func():
global exc
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', *expected)
def test_functionsNeedGlobalScope(self):
self.flakes('''
class a:
def b():
fu
fu = 1
''')
def test_builtins(self):
self.flakes('range(10)')
def test_builtinWindowsError(self):
"""
C{WindowsError} is sometimes a builtin name, so no warning is emitted
for using it.
"""
self.flakes('WindowsError')
@skipIf(version_info < (3, 6), 'new feature in 3.6')
def test_moduleAnnotations(self):
"""
Use of the C{__annotations__} in module scope should not emit
an undefined name warning when version is greater than or equal to 3.6.
"""
self.flakes('__annotations__')
def test_magicGlobalsFile(self):
"""
Use of the C{__file__} magic global should not emit an undefined name
warning.
"""
self.flakes('__file__')
def test_magicGlobalsBuiltins(self):
"""
Use of the C{__builtins__} magic global should not emit an undefined
name warning.
"""
self.flakes('__builtins__')
def test_magicGlobalsName(self):
"""
Use of the C{__name__} magic global should not emit an undefined name
warning.
"""
self.flakes('__name__')
def test_magicGlobalsPath(self):
"""
Use of the C{__path__} magic global should not emit an undefined name
warning, if you refer to it from a file called __init__.py.
"""
self.flakes('__path__', m.UndefinedName)
self.flakes('__path__', filename='package/__init__.py')
def test_magicModuleInClassScope(self):
"""
Use of the C{__module__} magic builtin should not emit an undefined
name warning if used in class scope.
"""
self.flakes('__module__', m.UndefinedName)
self.flakes('''
class Foo:
__module__
''')
self.flakes('''
class Foo:
def bar(self):
__module__
''', m.UndefinedName)
def test_globalImportStar(self):
"""Can't find undefined names with import *."""
self.flakes('from fu import *; bar',
m.ImportStarUsed, m.ImportStarUsage)
@skipIf(version_info >= (3,), 'obsolete syntax')
def test_localImportStar(self):
"""
A local import * still allows undefined names to be found
in upper scopes.
"""
self.flakes('''
def a():
from fu import *
bar
''', m.ImportStarUsed, m.UndefinedName, m.UnusedImport)
@skipIf(version_info >= (3,), 'obsolete syntax')
def test_unpackedParameter(self):
"""Unpacked function parameters create bindings."""
self.flakes('''
def a((bar, baz)):
bar; baz
''')
def test_definedByGlobal(self):
"""
"global" can make an otherwise undefined name in another function
defined.
"""
self.flakes('''
def a(): global fu; fu = 1
def b(): fu
''')
self.flakes('''
def c(): bar
def b(): global bar; bar = 1
''')
def test_definedByGlobalMultipleNames(self):
"""
"global" can accept multiple names.
"""
self.flakes('''
def a(): global fu, bar; fu = 1; bar = 2
def b(): fu; bar
''')
def test_globalInGlobalScope(self):
"""
A global statement in the global scope is ignored.
"""
self.flakes('''
global x
def foo():
print(x)
''', m.UndefinedName)
def test_global_reset_name_only(self):
"""A global statement does not prevent other names being undefined."""
# Only different undefined names are reported.
# See following test that fails where the same name is used.
self.flakes('''
def f1():
s
def f2():
global m
''', m.UndefinedName)
@skip("todo")
def test_unused_global(self):
"""An unused global statement does not define the name."""
self.flakes('''
def f1():
m
def f2():
global m
''', m.UndefinedName)
def test_del(self):
"""Del deletes bindings."""
self.flakes('a = 1; del a; a', m.UndefinedName)
def test_delGlobal(self):
"""Del a global binding from a function."""
self.flakes('''
a = 1
def f():
global a
del a
a
''')
def test_delUndefined(self):
"""Del an undefined name."""
self.flakes('del a', m.UndefinedName)
def test_delConditional(self):
"""
Ignores conditional bindings deletion.
"""
self.flakes('''
context = None
test = True
if False:
del(test)
assert(test)
''')
def test_delConditionalNested(self):
"""
Ignored conditional bindings deletion even if they are nested in other
blocks.
"""
self.flakes('''
context = None
test = True
if False:
with context():
del(test)
assert(test)
''')
def test_delWhile(self):
"""
Ignore bindings deletion if called inside the body of a while
statement.
"""
self.flakes('''
def test():
foo = 'bar'
while False:
del foo
assert(foo)
''')
def test_delWhileTestUsage(self):
"""
Ignore bindings deletion if called inside the body of a while
statement and name is used inside while's test part.
"""
self.flakes('''
def _worker():
o = True
while o is not True:
del o
o = False
''')
def test_delWhileNested(self):
"""
Ignore bindings deletions if node is part of while's test, even when
del is in a nested block.
"""
self.flakes('''
context = None
def _worker():
o = True
while o is not True:
while True:
with context():
del o
o = False
''')
def test_globalFromNestedScope(self):
"""Global names are available from nested scopes."""
self.flakes('''
a = 1
def b():
def c():
a
''')
def test_laterRedefinedGlobalFromNestedScope(self):
"""
Test that referencing a local name that shadows a global, before it is
defined, generates a warning.
"""
self.flakes('''
a = 1
def fun():
a
a = 2
return a
''', m.UndefinedLocal)
def test_laterRedefinedGlobalFromNestedScope2(self):
"""
Test that referencing a local name in a nested scope that shadows a
global declared in an enclosing scope, before it is defined, generates
a warning.
"""
self.flakes('''
a = 1
def fun():
global a
def fun2():
a
a = 2
return a
''', m.UndefinedLocal)
def test_intermediateClassScopeIgnored(self):
"""
If a name defined in an enclosing scope is shadowed by a local variable
and the name is used locally before it is bound, an unbound local
warning is emitted, even if there is a class scope between the enclosing
scope and the local scope.
"""
self.flakes('''
def f():
x = 1
class g:
def h(self):
a = x
x = None
print(x, a)
print(x)
''', m.UndefinedLocal)
def test_doubleNestingReportsClosestName(self):
"""
Test that referencing a local name in a nested scope that shadows a
variable declared in two different outer scopes before it is defined
in the innermost scope generates an UnboundLocal warning which
refers to the nearest shadowed name.
"""
exc = self.flakes('''
def a():
x = 1
def b():
x = 2 # line 5
def c():
x
x = 3
return x
return x
return x
''', m.UndefinedLocal).messages[0]
# _DoctestMixin.flakes adds two lines preceding the code above.
expected_line_num = 7 if self.withDoctest else 5
self.assertEqual(exc.message_args, ('x', expected_line_num))
def test_laterRedefinedGlobalFromNestedScope3(self):
"""
Test that referencing a local name in a nested scope that shadows a
global, before it is defined, generates a warning.
"""
self.flakes('''
def fun():
a = 1
def fun2():
a
a = 1
return a
return a
''', m.UndefinedLocal)
def test_undefinedAugmentedAssignment(self):
self.flakes(
'''
def f(seq):
a = 0
seq[a] += 1
seq[b] /= 2
c[0] *= 2
a -= 3
d += 4
e[any] = 5
''',
m.UndefinedName, # b
m.UndefinedName, # c
m.UndefinedName, m.UnusedVariable, # d
m.UndefinedName, # e
)
def test_nestedClass(self):
"""Nested classes can access enclosing scope."""
self.flakes('''
def f(foo):
class C:
bar = foo
def f(self):
return foo
return C()
f(123).f()
''')
def test_badNestedClass(self):
"""Free variables in nested classes must bind at class creation."""
self.flakes('''
def f():
class C:
bar = foo
foo = 456
return foo
f()
''', m.UndefinedName)
def test_definedAsStarArgs(self):
"""Star and double-star arg names are defined."""
self.flakes('''
def f(a, *b, **c):
print(a, b, c)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_definedAsStarUnpack(self):
"""Star names in unpack are defined."""
self.flakes('''
a, *b = range(10)
print(a, b)
''')
self.flakes('''
*a, b = range(10)
print(a, b)
''')
self.flakes('''
a, *b, c = range(10)
print(a, b, c)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_usedAsStarUnpack(self):
"""
Star names in unpack are used if RHS is not a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = range(10)
''')
self.flakes('''
def f():
(*a, b) = range(10)
''')
self.flakes('''
def f():
[a, *b, c] = range(10)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_unusedAsStarUnpack(self):
"""
Star names in unpack are unused if RHS is a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = any, all, 4, 2, 'un'
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(*a, b) = [bool, int, float, complex]
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
[a, *b, c] = 9, 8, 7, 6, 5, 4
''', m.UnusedVariable, m.UnusedVariable, m.UnusedVariable)
@skipIf(version_info < (3,), 'new in Python 3')
def test_keywordOnlyArgs(self):
"""Keyword-only arg names are defined."""
self.flakes('''
def f(*, a, b=None):
print(a, b)
''')
self.flakes('''
import default_b
def f(*, a, b=default_b):
print(a, b)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_keywordOnlyArgsUndefined(self):
"""Typo in kwonly name."""
self.flakes('''
def f(*, a, b=default_c):
print(a, b)
''', m.UndefinedName)
@skipIf(version_info < (3,), 'new in Python 3')
def test_annotationUndefined(self):
"""Undefined annotations."""
self.flakes('''
from abc import note1, note2, note3, note4, note5
def func(a: note1, *args: note2,
b: note3=12, **kw: note4) -> note5: pass
''')
self.flakes('''
def func():
d = e = 42
def func(a: {1, d}) -> (lambda c: e): pass
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_metaClassUndefined(self):
self.flakes('''
from abc import ABCMeta
class A(metaclass=ABCMeta): pass
''')
def test_definedInGenExp(self):
"""
Using the loop variable of a generator expression results in no
warnings.
"""
self.flakes('(a for a in [1, 2, 3] if a)')
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if b)')
def test_undefinedInGenExpNested(self):
"""
The loop variables of generator expressions nested together are
not defined in the other generator.
"""
self.flakes('(b for b in (a for a in [1, 2, 3] if b) if b)',
m.UndefinedName)
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if a)',
m.UndefinedName)
def test_undefinedWithErrorHandler(self):
"""
Some compatibility code checks explicitly for NameError.
It should not trigger warnings.
"""
self.flakes('''
try:
socket_map
except NameError:
socket_map = {}
''')
self.flakes('''
try:
_memoryview.contiguous
except (NameError, AttributeError):
raise RuntimeError("Python >= 3.3 is required")
''')
# If NameError is not explicitly handled, generate a warning
self.flakes('''
try:
socket_map
except:
socket_map = {}
''', m.UndefinedName)
self.flakes('''
try:
socket_map
except Exception:
socket_map = {}
''', m.UndefinedName)
def test_definedInClass(self):
"""
Defined name for generator expressions and dict/set comprehension.
"""
self.flakes('''
class A:
T = range(10)
Z = (x for x in T)
L = [x for x in T]
B = dict((i, str(i)) for i in T)
''')
self.flakes('''
class A:
T = range(10)
X = {x for x in T}
Y = {x:x for x in T}
''')
def test_definedInClassNested(self):
"""Defined name for nested generator expressions in a class."""
self.flakes('''
class A:
T = range(10)
Z = (x for x in (a for a in T))
''')
def test_undefinedInLoop(self):
"""
The loop variable is defined after the expression is computed.
"""
self.flakes('''
for i in range(i):
print(i)
''', m.UndefinedName)
self.flakes('''
[42 for i in range(i)]
''', m.UndefinedName)
self.flakes('''
(42 for i in range(i))
''', m.UndefinedName)
def test_definedFromLambdaInDictionaryComprehension(self):
"""
Defined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(x) for x in range(10)}
''')
def test_definedFromLambdaInGenerator(self):
"""
Defined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(x) for x in range(10))
''')
def test_undefinedFromLambdaInDictionaryComprehension(self):
"""
Undefined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(y) for x in range(10)}
''', m.UndefinedName)
def test_undefinedFromLambdaInComprehension(self):
"""
Undefined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(y) for x in range(10))
''', m.UndefinedName)
def test_dunderClass(self):
"""
`__class__` is defined in class scope under Python 3, but is not
in Python 2.
"""
code = '''
class Test(object):
def __init__(self):
print(__class__.__name__)
self.x = 1
t = Test()
'''
if version_info < (3,):
self.flakes(code, m.UndefinedName)
else:
self.flakes(code)
class NameTests(TestCase):
"""
Tests for some extra cases of name handling.
"""
def test_impossibleContext(self):
"""
A Name node with an unrecognized context results in a RuntimeError being
raised.
"""
tree = ast.parse("x = 10")
file_tokens = checker.make_tokens("x = 10")
# Make it into something unrecognizable.
tree.body[0].targets[0].ctx = object()
self.assertRaises(RuntimeError, checker.Checker, tree, file_tokens=file_tokens)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.junitrunner;
import static org.junit.Assert.*;
import static org.mockito.Mockito.verify;
import static org.mockitousage.junitrunner.Filters.methodNameContains;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class JUnit45RunnerTest {
@InjectMocks private ListDependent listDependent = new ListDependent();
@Mock private List<String> list;
@Test
public void shouldInitMocksUsingRunner() {
list.add("test");
verify(list).add("test");
}
@Test
public void shouldInjectMocksUsingRunner() {
assertNotNull(list);
assertSame(list, listDependent.getList());
}
@Test
public void shouldFilterTestMethodsCorrectly() throws Exception {
MockitoJUnitRunner runner = new MockitoJUnitRunner(this.getClass());
runner.filter(methodNameContains("shouldInitMocksUsingRunner"));
assertEquals(1, runner.testCount());
}
class ListDependent {
private List<?> list;
public List<?> getList() {
return list;
}
}
}
|
java
|
github
|
https://github.com/mockito/mockito
|
mockito-core/src/test/java/org/mockitousage/junitrunner/JUnit45RunnerTest.java
|
# -*- coding: utf-8 -*-
# Copyright © 2014–2015, Chris Warrick.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import io
import os
import nikola.post
from nikola.plugin_categories import Command
from nikola import utils
class UpgradeMetadata(Command):
"""Upgrade metadata from the old no-descriptions format to the new reST-esque format."""
name = 'upgrade_metadata'
doc_purpose = 'upgrade old-style metadata'
cmd_options = [
{
'name': 'yes',
'short': 'y',
'long': 'yes',
'type': bool,
'default': False,
'help': 'Proceed without confirmation',
},
]
fields = ('title', 'slug', 'date', 'tags', 'link', 'description', 'type')
def _execute(self, options, args):
L = utils.get_logger('upgrade_metadata', utils.STDERR_HANDLER)
nikola.post._UPGRADE_METADATA_ADVERTISED = True
# scan posts
self.site.scan_posts()
flagged = []
for post in self.site.timeline:
if not post.newstylemeta:
flagged.append(post)
if flagged:
if len(flagged) == 1:
L.info('1 post (and/or its translations) contains old-style metadata:')
else:
L.info('{0} posts (and/or their translations) contain old-style metadata:'.format(len(flagged)))
for post in flagged:
L.info(' ' + post.metadata_path)
if not options['yes']:
yesno = utils.ask_yesno("Proceed with metadata upgrade?")
if options['yes'] or yesno:
for post in flagged:
for lang in self.site.config['TRANSLATIONS'].keys():
if lang == post.default_lang:
fname = post.metadata_path
else:
meta_path = os.path.splitext(post.source_path)[0] + '.meta'
fname = utils.get_translation_candidate(post.config, meta_path, lang)
if os.path.exists(fname):
with io.open(fname, 'r', encoding='utf-8') as fh:
meta = fh.readlines()
if not meta[min(1, len(meta) - 1)].startswith('.. '):
# check if we’re dealing with old style metadata
with io.open(fname, 'w', encoding='utf-8') as fh:
for k, v in zip(self.fields, meta):
fh.write('.. {0}: {1}'.format(k, v))
L.debug(fname)
L.info('{0} posts upgraded.'.format(len(flagged)))
else:
L.info('Metadata not upgraded.')
else:
L.info('No old-style metadata posts found. No action is required.')
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* slru_io.c
*
* Routines for reading and writing SLRU files during upgrade.
*
* Copyright (c) 2025-2026, PostgreSQL Global Development Group
* src/bin/pg_upgrade/slru_io.c
*/
#include "postgres_fe.h"
#include <fcntl.h>
#include "common/fe_memutils.h"
#include "common/file_perm.h"
#include "common/file_utils.h"
#include "pg_upgrade.h"
#include "port/pg_iovec.h"
#include "slru_io.h"
static SlruSegState *AllocSlruSegState(const char *dir);
static char *SlruFileName(SlruSegState *state, int64 segno);
static void SlruFlush(SlruSegState *state);
/* common parts of AllocSlruRead and AllocSlruWrite */
static SlruSegState *
AllocSlruSegState(const char *dir)
{
SlruSegState *state = pg_malloc(sizeof(*state));
state->dir = pstrdup(dir);
state->fn = NULL;
state->fd = -1;
state->segno = -1;
state->pageno = 0;
/* state->writing and state->long_segment_names must be set by caller! */
return state;
}
/* similar to the backend function with the same name */
static char *
SlruFileName(SlruSegState *state, int64 segno)
{
if (state->long_segment_names)
{
Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFFFFFFFFFFF));
return psprintf("%s/%015" PRIX64, state->dir, segno);
}
else
{
Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFF));
return psprintf("%s/%04X", state->dir, (unsigned int) segno);
}
}
/*
* Create SLRU reader for dir.
*/
SlruSegState *
AllocSlruRead(const char *dir, bool long_segment_names)
{
SlruSegState *state = AllocSlruSegState(dir);
state->writing = false;
state->long_segment_names = long_segment_names;
return state;
}
/*
* Read the given page into memory buffer.
*
* Reading can be done in random order.
*
* If the file containing 'pageno' does not exist, a fatal error is raised.
* If the file exists but is shorter than expected, the missing part is read
* as zeros and a warning is logged. That is reasonable behavior for current
* callers.
*
* This is the slow path of the inlineable SlruReadSwitchPage() function.
*/
char *
SlruReadSwitchPageSlow(SlruSegState *state, uint64 pageno)
{
int64 segno;
off_t offset;
ssize_t bytes_read;
Assert(!state->writing); /* read only mode */
if (state->segno != -1 && pageno == state->pageno)
return state->buf.data;
/* If the new page is on a different SLRU segment, open the new segment */
segno = pageno / SLRU_PAGES_PER_SEGMENT;
if (segno != state->segno)
{
if (state->segno != -1)
{
close(state->fd);
state->fd = -1;
pg_free(state->fn);
state->fn = NULL;
state->segno = -1;
}
state->fn = SlruFileName(state, segno);
if ((state->fd = open(state->fn, O_RDONLY | PG_BINARY, 0)) < 0)
pg_fatal("could not open file \"%s\": %m", state->fn);
state->segno = segno;
}
offset = (pageno % SLRU_PAGES_PER_SEGMENT) * BLCKSZ;
bytes_read = 0;
while (bytes_read < BLCKSZ)
{
ssize_t rc;
rc = pg_pread(state->fd,
&state->buf.data[bytes_read],
BLCKSZ - bytes_read,
offset);
if (rc < 0)
{
if (errno == EINTR)
continue;
pg_fatal("could not read file \"%s\": %m", state->fn);
}
if (rc == 0)
{
/* unexpected EOF */
pg_log(PG_WARNING, "unexpected EOF reading file \"%s\" at offset %u, reading as zeros",
state->fn, (unsigned int) offset);
memset(&state->buf.data[bytes_read], 0, BLCKSZ - bytes_read);
break;
}
bytes_read += rc;
offset += rc;
}
state->pageno = pageno;
return state->buf.data;
}
/*
* Free the reader.
*/
void
FreeSlruRead(SlruSegState *state)
{
Assert(!state->writing); /* read only mode */
if (state->fd != -1)
close(state->fd);
pg_free(state);
}
/*
* Create SLRU writer for dir.
*/
SlruSegState *
AllocSlruWrite(const char *dir, bool long_segment_names)
{
SlruSegState *state = AllocSlruSegState(dir);
state->writing = true;
state->long_segment_names = long_segment_names;
return state;
}
/*
* Open the given page for writing.
*
* NOTE: This uses O_EXCL when stepping to a new segment, so this assumes that
* each segment is written in full before moving on to the next one. This
* limitation would be easy to lift if needed, but it fits the usage pattern
* of current callers.
*
* This is the slow path of the inlineable SlruWriteSwitchPage() function.
*/
char *
SlruWriteSwitchPageSlow(SlruSegState *state, uint64 pageno)
{
int64 segno;
off_t offset;
Assert(state->writing);
if (state->segno != -1 && pageno == state->pageno)
return state->buf.data;
segno = pageno / SLRU_PAGES_PER_SEGMENT;
offset = (pageno % SLRU_PAGES_PER_SEGMENT) * BLCKSZ;
SlruFlush(state);
memset(state->buf.data, 0, BLCKSZ);
if (segno != state->segno)
{
if (state->segno != -1)
{
close(state->fd);
state->fd = -1;
pg_free(state->fn);
state->fn = NULL;
state->segno = -1;
}
/* Create the segment */
state->fn = SlruFileName(state, segno);
if ((state->fd = open(state->fn, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
pg_file_create_mode)) < 0)
{
pg_fatal("could not create file \"%s\": %m", state->fn);
}
state->segno = segno;
if (offset > 0)
{
if (pg_pwrite_zeros(state->fd, offset, 0) < 0)
pg_fatal("could not write file \"%s\": %m", state->fn);
}
}
state->pageno = pageno;
return state->buf.data;
}
static void
SlruFlush(SlruSegState *state)
{
struct iovec iovec = {
.iov_base = &state->buf,
.iov_len = BLCKSZ,
};
off_t offset;
if (state->segno == -1)
return;
offset = (state->pageno % SLRU_PAGES_PER_SEGMENT) * BLCKSZ;
if (pg_pwritev_with_retry(state->fd, &iovec, 1, offset) < 0)
pg_fatal("could not write file \"%s\": %m", state->fn);
}
/*
* Free the writer.
*/
void
FreeSlruWrite(SlruSegState *state)
{
Assert(state->writing);
SlruFlush(state);
if (state->fd != -1)
close(state->fd);
pg_free(state);
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/bin/pg_upgrade/slru_io.c
|
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemRetrieveEvent"]
class ConversationItemRetrieveEvent(BaseModel):
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
item_id: str
"""The ID of the item to retrieve."""
type: Literal["conversation.item.retrieve"]
"""The event type, must be `conversation.item.retrieve`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/realtime/conversation_item_retrieve_event.py
|
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_DTENSOR_CC_DSTATUS_H_
#define TENSORFLOW_DTENSOR_CC_DSTATUS_H_
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "xla/tsl/platform/statusor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace dtensor {
template <typename T>
using StatusOr = tsl::StatusOr<T>;
inline absl::Status WithContext(const absl::Status& ds, absl::string_view file,
int line_number,
absl::string_view context = "") {
if (ds.ok()) {
return ds;
}
return absl::Status(ds.code(), absl::StrCat(ds.message(), "\n", file, ":",
line_number, " :: ", context));
}
template <class T>
inline StatusOr<T> WithContext(StatusOr<T>&& ds, absl::string_view file,
int line_number,
absl::string_view context = "") {
if (ds.ok()) {
return ds;
}
return absl::Status(ds.status().code(),
absl::StrCat(ds.status().message(), "\n", file, ":",
line_number, " :: ", context));
}
#define DT_CTX(dstatus, ...) \
::tensorflow::dtensor::WithContext(dstatus, __FILE__, __LINE__, #__VA_ARGS__);
#undef TF_RETURN_IF_ERROR
#define TF_RETURN_IF_ERROR(...) \
do { \
::tensorflow::Status _status = (__VA_ARGS__); \
if (!_status.ok()) { \
return ::tensorflow::dtensor::WithContext(_status, __FILE__, __LINE__); \
} \
} while (0);
#undef TF_RETURN_WITH_CONTEXT
#define TF_RETURN_WITH_CONTEXT(status, ...) \
do { \
::tensorflow::Status _status = (status); \
if (!_status.ok()) { \
return ::tensorflow::dtensor::WithContext(_status, __FILE__, __LINE__, \
##__VA_ARGS__); \
} \
} while (0);
#define DT_STATUS_MACROS_CONCAT_NAME(x, y) DT_STATUS_MACROS_CONCAT_IMPL(x, y)
#define DT_STATUS_MACROS_CONCAT_IMPL(x, y) x##y
#define DT_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr, ...) \
auto statusor = (rexpr); \
if (!statusor.ok()) { \
return ::tensorflow::dtensor::WithContext(statusor.status(), __FILE__, \
__LINE__, ##__VA_ARGS__); \
} \
lhs = std::move(statusor.value())
#undef TF_ASSIGN_OR_RETURN
#define TF_ASSIGN_OR_RETURN(lhs, rexpr, ...) \
DT_ASSIGN_OR_RETURN_IMPL( \
DT_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr, \
##__VA_ARGS__)
// Undefine TF status macros to ensure users use the context macros instead
} // namespace dtensor
} // namespace tensorflow
#endif // TENSORFLOW_DTENSOR_CC_DSTATUS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/dtensor/cc/dstatus.h
|
# (c) 2016, James Tanner
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import subprocess
from ansible.module_utils.common.text.converters import to_bytes
from ansible.utils.display import Display
display = Display()
_HAS_CONTROLPERSIST = {} # type: dict[str, bool]
def check_for_controlpersist(ssh_executable):
try:
# If we've already checked this executable
return _HAS_CONTROLPERSIST[ssh_executable]
except KeyError:
pass
b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
has_cp = True
try:
cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if b"Bad configuration option" in err or b"Usage:" in err:
has_cp = False
except OSError:
has_cp = False
_HAS_CONTROLPERSIST[ssh_executable] = has_cp
return has_cp
|
python
|
github
|
https://github.com/ansible/ansible
|
lib/ansible/utils/ssh_functions.py
|
from test import support
from test.support import bigmemtest, _4G
import array
import unittest
import io
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import random
import shutil
import subprocess
import threading
from test.support import import_helper
from test.support import threading_helper
from test.support.os_helper import unlink, FakePath
from compression._common import _streams
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = import_helper.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(glob.escape(os.path.dirname(__file__)), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
# compresslevel is keyword-only
self.assertRaises(TypeError, BZ2File, os.devnull, "r", 3)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _streams.BUFFER_SIZE
_streams.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_streams.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertStartsWith(self.TEXT, pdata)
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@threading_helper.requires_working_threading()
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with threading_helper.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenFilename(self):
with BZ2File(self.filename, "wb") as f:
f.write(b'content')
self.assertEqual(f.name, self.filename)
self.assertIsInstance(f.fileno(), int)
self.assertEqual(f.mode, 'wb')
self.assertIs(f.readable(), False)
self.assertIs(f.writable(), True)
self.assertIs(f.seekable(), False)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'wb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
with BZ2File(self.filename, "ab") as f:
f.write(b'appendix')
self.assertEqual(f.name, self.filename)
self.assertIsInstance(f.fileno(), int)
self.assertEqual(f.mode, 'wb')
self.assertIs(f.readable(), False)
self.assertIs(f.writable(), True)
self.assertIs(f.seekable(), False)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'wb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
with BZ2File(self.filename, 'rb') as f:
self.assertEqual(f.read(), b'contentappendix')
self.assertEqual(f.name, self.filename)
self.assertIsInstance(f.fileno(), int)
self.assertEqual(f.mode, 'rb')
self.assertIs(f.readable(), True)
self.assertIs(f.writable(), False)
self.assertIs(f.seekable(), True)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'rb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
def testOpenFileWithName(self):
with open(self.filename, 'wb') as raw:
with BZ2File(raw, 'wb') as f:
f.write(b'content')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'wb')
self.assertIs(f.readable(), False)
self.assertIs(f.writable(), True)
self.assertIs(f.seekable(), False)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'wb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
with open(self.filename, 'ab') as raw:
with BZ2File(raw, 'ab') as f:
f.write(b'appendix')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'wb')
self.assertIs(f.readable(), False)
self.assertIs(f.writable(), True)
self.assertIs(f.seekable(), False)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'wb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
with open(self.filename, 'rb') as raw:
with BZ2File(raw, 'rb') as f:
self.assertEqual(f.read(), b'contentappendix')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'rb')
self.assertIs(f.readable(), True)
self.assertIs(f.writable(), False)
self.assertIs(f.seekable(), True)
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
self.assertEqual(f.mode, 'rb')
self.assertRaises(ValueError, f.readable)
self.assertRaises(ValueError, f.writable)
self.assertRaises(ValueError, f.seekable)
def testOpenFileWithoutName(self):
bio = BytesIO()
with BZ2File(bio, 'wb') as f:
f.write(b'content')
with self.assertRaises(AttributeError):
f.name
self.assertRaises(io.UnsupportedOperation, f.fileno)
self.assertEqual(f.mode, 'wb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
with BZ2File(bio, 'ab') as f:
f.write(b'appendix')
with self.assertRaises(AttributeError):
f.name
self.assertRaises(io.UnsupportedOperation, f.fileno)
self.assertEqual(f.mode, 'wb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
bio.seek(0)
with BZ2File(bio, 'rb') as f:
self.assertEqual(f.read(), b'contentappendix')
with self.assertRaises(AttributeError):
f.name
self.assertRaises(io.UnsupportedOperation, f.fileno)
self.assertEqual(f.mode, 'rb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
def testOpenFileWithIntName(self):
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
with open(fd, 'wb') as raw:
with BZ2File(raw, 'wb') as f:
f.write(b'content')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'wb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
with open(fd, 'ab') as raw:
with BZ2File(raw, 'ab') as f:
f.write(b'appendix')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'wb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
fd = os.open(self.filename, os.O_RDONLY)
with open(fd, 'rb') as raw:
with BZ2File(raw, 'rb') as f:
self.assertEqual(f.read(), b'contentappendix')
self.assertEqual(f.name, raw.name)
self.assertEqual(f.fileno(), raw.fileno())
self.assertEqual(f.mode, 'rb')
with self.assertRaises(ValueError):
f.name
self.assertRaises(ValueError, f.fileno)
def testOpenBytesFilename(self):
str_filename = self.filename
bytes_filename = os.fsencode(str_filename)
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
self.assertEqual(f.name, bytes_filename)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
self.assertEqual(f.name, bytes_filename)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
self.assertEqual(f.name, str_filename)
def testOpenPathLikeFilename(self):
filename = FakePath(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
self.assertEqual(f.name, self.filename)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
self.assertEqual(f.name, self.filename)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _streams.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
with self.assertRaises(AttributeError):
bz2.name
self.assertEqual(bz2f.mode, 'rb')
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertStartsWith(self.TEXT, pdata)
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with self.assertRaises(AttributeError):
bz2.name
self.assertEqual(bz2f.mode, 'wb')
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_issue44439(self):
q = array.array('Q', [1, 2, 3, 4, 5])
LENGTH = len(q) * q.itemsize
with BZ2File(BytesIO(), 'w') as f:
self.assertEqual(f.write(q), LENGTH)
self.assertEqual(f.tell(), LENGTH)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = min(10 * 1024 * 1024, size)
block = random.randbytes(blocksize)
try:
data = block * ((size-1) // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
def test_uninitialized_BZ2Decompressor_crash(self):
self.assertEqual(BZ2Decompressor.__new__(BZ2Decompressor).
decompress(bytes()), b'')
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode, encoding="ascii") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="ascii") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at", encoding="ascii") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
encoding = "utf-8" if "t" in mode else None
with self.open(self.filename, mode, encoding=encoding) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt", encoding="utf-8") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", encoding="utf-8", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", encoding="utf-8", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def tearDownModule():
support.reap_children()
if __name__ == '__main__':
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_bz2.py
|
# Copyright 2012 Red Hat, Inc.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from testtools import matchers
from pbr.tests import base
from pbr import version
from_pip_string = version.SemanticVersion.from_pip_string
class TestSemanticVersion(base.BaseTestCase):
def test_ordering(self):
ordered_versions = [
"1.2.3.dev6",
"1.2.3.dev7",
"1.2.3.a4.dev12",
"1.2.3.a4.dev13",
"1.2.3.a4",
"1.2.3.a5.dev1",
"1.2.3.a5",
"1.2.3.b3.dev1",
"1.2.3.b3",
"1.2.3.rc2.dev1",
"1.2.3.rc2",
"1.2.3.rc3.dev1",
"1.2.3",
"1.2.4",
"1.3.3",
"2.2.3",
]
for v in ordered_versions:
sv = version.SemanticVersion.from_pip_string(v)
self.expectThat(sv, matchers.Equals(sv))
for left, right in itertools.combinations(ordered_versions, 2):
l_pos = ordered_versions.index(left)
r_pos = ordered_versions.index(right)
if l_pos < r_pos:
m1 = matchers.LessThan
m2 = matchers.GreaterThan
else:
m1 = matchers.GreaterThan
m2 = matchers.LessThan
left_sv = version.SemanticVersion.from_pip_string(left)
right_sv = version.SemanticVersion.from_pip_string(right)
self.expectThat(left_sv, m1(right_sv))
self.expectThat(right_sv, m2(left_sv))
def test_from_pip_string_legacy_alpha(self):
expected = version.SemanticVersion(
1, 2, 0, prerelease_type='rc', prerelease=1)
parsed = from_pip_string('1.2.0rc1')
self.assertEqual(expected, parsed)
def test_from_pip_string_legacy_postN(self):
# When pbr trunk was incompatible with PEP-440, a stable release was
# made that used postN versions to represent developer builds. As
# we expect only to be parsing versions of our own, we map those
# into dev builds of the next version.
expected = version.SemanticVersion(1, 2, 4, dev_count=5)
parsed = from_pip_string('1.2.3.post5')
self.expectThat(expected, matchers.Equals(parsed))
expected = version.SemanticVersion(1, 2, 3, 'a', 5, dev_count=6)
parsed = from_pip_string('1.2.3.0a4.post6')
self.expectThat(expected, matchers.Equals(parsed))
# We can't define a mapping for .postN.devM, so it should raise.
self.expectThat(
lambda: from_pip_string('1.2.3.post5.dev6'),
matchers.raises(ValueError))
def test_from_pip_string_legacy_nonzero_lead_in(self):
# reported in bug 1361251
expected = version.SemanticVersion(
0, 0, 1, prerelease_type='a', prerelease=2)
parsed = from_pip_string('0.0.1a2')
self.assertEqual(expected, parsed)
def test_from_pip_string_legacy_short_nonzero_lead_in(self):
expected = version.SemanticVersion(
0, 1, 0, prerelease_type='a', prerelease=2)
parsed = from_pip_string('0.1a2')
self.assertEqual(expected, parsed)
def test_from_pip_string_legacy_no_0_prerelease(self):
expected = version.SemanticVersion(
2, 1, 0, prerelease_type='rc', prerelease=1)
parsed = from_pip_string('2.1.0.rc1')
self.assertEqual(expected, parsed)
def test_from_pip_string_legacy_no_0_prerelease_2(self):
expected = version.SemanticVersion(
2, 0, 0, prerelease_type='rc', prerelease=1)
parsed = from_pip_string('2.0.0.rc1')
self.assertEqual(expected, parsed)
def test_from_pip_string_legacy_non_440_beta(self):
expected = version.SemanticVersion(
2014, 2, prerelease_type='b', prerelease=2)
parsed = from_pip_string('2014.2.b2')
self.assertEqual(expected, parsed)
def test_from_pip_string_pure_git_hash(self):
self.assertRaises(ValueError, from_pip_string, '6eed5ae')
def test_from_pip_string_non_digit_start(self):
self.assertRaises(ValueError, from_pip_string,
'non-release-tag/2014.12.16-1')
def test_final_version(self):
semver = version.SemanticVersion(1, 2, 3)
self.assertEqual((1, 2, 3, 'final', 0), semver.version_tuple())
self.assertEqual("1.2.3", semver.brief_string())
self.assertEqual("1.2.3", semver.debian_string())
self.assertEqual("1.2.3", semver.release_string())
self.assertEqual("1.2.3", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.3"))
def test_parsing_short_forms(self):
semver = version.SemanticVersion(1, 0, 0)
self.assertEqual(semver, from_pip_string("1"))
self.assertEqual(semver, from_pip_string("1.0"))
self.assertEqual(semver, from_pip_string("1.0.0"))
def test_dev_version(self):
semver = version.SemanticVersion(1, 2, 4, dev_count=5)
self.assertEqual((1, 2, 4, 'dev', 4), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~dev5", semver.debian_string())
self.assertEqual("1.2.4.dev5", semver.release_string())
self.assertEqual("1.2.3.dev5", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.dev5"))
def test_dev_no_git_version(self):
semver = version.SemanticVersion(1, 2, 4, dev_count=5)
self.assertEqual((1, 2, 4, 'dev', 4), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~dev5", semver.debian_string())
self.assertEqual("1.2.4.dev5", semver.release_string())
self.assertEqual("1.2.3.dev5", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.dev5"))
def test_dev_zero_version(self):
semver = version.SemanticVersion(1, 2, 0, dev_count=5)
self.assertEqual((1, 2, 0, 'dev', 4), semver.version_tuple())
self.assertEqual("1.2.0", semver.brief_string())
self.assertEqual("1.2.0~dev5", semver.debian_string())
self.assertEqual("1.2.0.dev5", semver.release_string())
self.assertEqual("1.1.9999.dev5", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.0.dev5"))
def test_alpha_dev_version(self):
semver = version.SemanticVersion(1, 2, 4, 'a', 1, 12)
self.assertEqual((1, 2, 4, 'alphadev', 12), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~a1.dev12", semver.debian_string())
self.assertEqual("1.2.4.0a1.dev12", semver.release_string())
self.assertEqual("1.2.3.a1.dev12", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0a1.dev12"))
def test_alpha_version(self):
semver = version.SemanticVersion(1, 2, 4, 'a', 1)
self.assertEqual((1, 2, 4, 'alpha', 1), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~a1", semver.debian_string())
self.assertEqual("1.2.4.0a1", semver.release_string())
self.assertEqual("1.2.3.a1", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0a1"))
def test_alpha_zero_version(self):
semver = version.SemanticVersion(1, 2, 0, 'a', 1)
self.assertEqual((1, 2, 0, 'alpha', 1), semver.version_tuple())
self.assertEqual("1.2.0", semver.brief_string())
self.assertEqual("1.2.0~a1", semver.debian_string())
self.assertEqual("1.2.0.0a1", semver.release_string())
self.assertEqual("1.1.9999.a1", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.0.0a1"))
def test_alpha_major_zero_version(self):
semver = version.SemanticVersion(1, 0, 0, 'a', 1)
self.assertEqual((1, 0, 0, 'alpha', 1), semver.version_tuple())
self.assertEqual("1.0.0", semver.brief_string())
self.assertEqual("1.0.0~a1", semver.debian_string())
self.assertEqual("1.0.0.0a1", semver.release_string())
self.assertEqual("0.9999.9999.a1", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.0.0.0a1"))
def test_alpha_default_version(self):
semver = version.SemanticVersion(1, 2, 4, 'a')
self.assertEqual((1, 2, 4, 'alpha', 0), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~a0", semver.debian_string())
self.assertEqual("1.2.4.0a0", semver.release_string())
self.assertEqual("1.2.3.a0", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0a0"))
def test_beta_dev_version(self):
semver = version.SemanticVersion(1, 2, 4, 'b', 1, 12)
self.assertEqual((1, 2, 4, 'betadev', 12), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~b1.dev12", semver.debian_string())
self.assertEqual("1.2.4.0b1.dev12", semver.release_string())
self.assertEqual("1.2.3.b1.dev12", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0b1.dev12"))
def test_beta_version(self):
semver = version.SemanticVersion(1, 2, 4, 'b', 1)
self.assertEqual((1, 2, 4, 'beta', 1), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~b1", semver.debian_string())
self.assertEqual("1.2.4.0b1", semver.release_string())
self.assertEqual("1.2.3.b1", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0b1"))
def test_decrement_nonrelease(self):
# The prior version of any non-release is a release
semver = version.SemanticVersion(1, 2, 4, 'b', 1)
self.assertEqual(
version.SemanticVersion(1, 2, 3), semver.decrement())
def test_decrement_nonrelease_zero(self):
# We set an arbitrary max version of 9999 when decrementing versions
# - this is part of handling rpm support.
semver = version.SemanticVersion(1, 0, 0)
self.assertEqual(
version.SemanticVersion(0, 9999, 9999), semver.decrement())
def test_decrement_release(self):
# The next patch version of a release version requires a change to the
# patch level.
semver = version.SemanticVersion(2, 2, 5)
self.assertEqual(
version.SemanticVersion(2, 2, 4), semver.decrement())
def test_increment_nonrelease(self):
# The next patch version of a non-release version is another
# non-release version as the next release doesn't need to be
# incremented.
semver = version.SemanticVersion(1, 2, 4, 'b', 1)
self.assertEqual(
version.SemanticVersion(1, 2, 4, 'b', 2), semver.increment())
# Major and minor increments however need to bump things.
self.assertEqual(
version.SemanticVersion(1, 3, 0), semver.increment(minor=True))
self.assertEqual(
version.SemanticVersion(2, 0, 0), semver.increment(major=True))
def test_increment_release(self):
# The next patch version of a release version requires a change to the
# patch level.
semver = version.SemanticVersion(1, 2, 5)
self.assertEqual(
version.SemanticVersion(1, 2, 6), semver.increment())
self.assertEqual(
version.SemanticVersion(1, 3, 0), semver.increment(minor=True))
self.assertEqual(
version.SemanticVersion(2, 0, 0), semver.increment(major=True))
def test_rc_dev_version(self):
semver = version.SemanticVersion(1, 2, 4, 'rc', 1, 12)
self.assertEqual((1, 2, 4, 'candidatedev', 12), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~rc1.dev12", semver.debian_string())
self.assertEqual("1.2.4.0rc1.dev12", semver.release_string())
self.assertEqual("1.2.3.rc1.dev12", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0rc1.dev12"))
def test_rc_version(self):
semver = version.SemanticVersion(1, 2, 4, 'rc', 1)
self.assertEqual((1, 2, 4, 'candidate', 1), semver.version_tuple())
self.assertEqual("1.2.4", semver.brief_string())
self.assertEqual("1.2.4~rc1", semver.debian_string())
self.assertEqual("1.2.4.0rc1", semver.release_string())
self.assertEqual("1.2.3.rc1", semver.rpm_string())
self.assertEqual(semver, from_pip_string("1.2.4.0rc1"))
def test_to_dev(self):
self.assertEqual(
version.SemanticVersion(1, 2, 3, dev_count=1),
version.SemanticVersion(1, 2, 3).to_dev(1))
self.assertEqual(
version.SemanticVersion(1, 2, 3, 'rc', 1, dev_count=1),
version.SemanticVersion(1, 2, 3, 'rc', 1).to_dev(1))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
# daemon/daemon.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2004–2005 Chad J. Schroeder
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
#
# Apache Phoenix note: this file is `daemon.py` from the package
# `python-daemon 2.0.5`, https://pypi.python.org/pypi/python-daemon/
#
# The class `PidFile` was added for adapting the `lockfile` package's interface
# without depending on yet another 3rd party package. Based on example from
# http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
#
""" Daemon process behaviour.
"""
from __future__ import (absolute_import, unicode_literals)
import os
import sys
import resource
import errno
import signal
import socket
import atexit
import fcntl
try:
# Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
basestring = basestring
unicode = unicode
except NameError:
# Python 3 names the Unicode data type ‘str’.
basestring = str
unicode = str
class DaemonError(Exception):
""" Base exception class for errors from this module. """
def __init__(self, *args, **kwargs):
self._chain_from_context()
super(DaemonError, self).__init__(*args, **kwargs)
def _chain_from_context(self):
_chain_exception_from_existing_exception_context(self, as_cause=True)
class DaemonOSEnvironmentError(DaemonError, OSError):
""" Exception raised when daemon OS environment setup receives error. """
class DaemonProcessDetachError(DaemonError, OSError):
""" Exception raised when process detach fails. """
class DaemonContext:
""" Context for turning the current program into a daemon process.
A `DaemonContext` instance represents the behaviour settings and
process context for the program when it becomes a daemon. The
behaviour and environment is customised by setting options on the
instance, before calling the `open` method.
Each option can be passed as a keyword argument to the `DaemonContext`
constructor, or subsequently altered by assigning to an attribute on
the instance at any time prior to calling `open`. That is, for
options named `wibble` and `wubble`, the following invocation::
foo = daemon.DaemonContext(wibble=bar, wubble=baz)
foo.open()
is equivalent to::
foo = daemon.DaemonContext()
foo.wibble = bar
foo.wubble = baz
foo.open()
The following options are defined.
`files_preserve`
:Default: ``None``
List of files that should *not* be closed when starting the
daemon. If ``None``, all open file descriptors will be closed.
Elements of the list are file descriptors (as returned by a file
object's `fileno()` method) or Python `file` objects. Each
specifies a file that is not to be closed during daemon start.
`chroot_directory`
:Default: ``None``
Full path to a directory to set as the effective root directory of
the process. If ``None``, specifies that the root directory is not
to be changed.
`working_directory`
:Default: ``'/'``
Full path of the working directory to which the process should
change on daemon start.
Since a filesystem cannot be unmounted if a process has its
current working directory on that filesystem, this should either
be left at default or set to a directory that is a sensible “home
directory” for the daemon while it is running.
`umask`
:Default: ``0``
File access creation mask (“umask”) to set for the process on
daemon start.
A daemon should not rely on the parent process's umask value,
which is beyond its control and may prevent creating a file with
the required access mode. So when the daemon context opens, the
umask is set to an explicit known value.
If the conventional value of 0 is too open, consider setting a
value such as 0o022, 0o027, 0o077, or another specific value.
Otherwise, ensure the daemon creates every file with an
explicit access mode for the purpose.
`pidfile`
:Default: ``None``
Context manager for a PID lock file. When the daemon context opens
and closes, it enters and exits the `pidfile` context manager.
`detach_process`
:Default: ``None``
If ``True``, detach the process context when opening the daemon
context; if ``False``, do not detach.
If unspecified (``None``) during initialisation of the instance,
this will be set to ``True`` by default, and ``False`` only if
detaching the process is determined to be redundant; for example,
in the case when the process was started by `init`, by `initd`, or
by `inetd`.
`signal_map`
:Default: system-dependent
Mapping from operating system signals to callback actions.
The mapping is used when the daemon context opens, and determines
the action for each signal's signal handler:
* A value of ``None`` will ignore the signal (by setting the
signal action to ``signal.SIG_IGN``).
* A string value will be used as the name of an attribute on the
``DaemonContext`` instance. The attribute's value will be used
as the action for the signal handler.
* Any other value will be used as the action for the
signal handler. See the ``signal.signal`` documentation
for details of the signal handler interface.
The default value depends on which signals are defined on the
running system. Each item from the list below whose signal is
actually defined in the ``signal`` module will appear in the
default map:
* ``signal.SIGTTIN``: ``None``
* ``signal.SIGTTOU``: ``None``
* ``signal.SIGTSTP``: ``None``
* ``signal.SIGTERM``: ``'terminate'``
Depending on how the program will interact with its child
processes, it may need to specify a signal map that
includes the ``signal.SIGCHLD`` signal (received when a
child process exits). See the specific operating system's
documentation for more detail on how to determine what
circumstances dictate the need for signal handlers.
`uid`
:Default: ``os.getuid()``
`gid`
:Default: ``os.getgid()``
The user ID (“UID”) value and group ID (“GID”) value to switch
the process to on daemon start.
The default values, the real UID and GID of the process, will
relinquish any effective privilege elevation inherited by the
process.
`prevent_core`
:Default: ``True``
If true, prevents the generation of core files, in order to avoid
leaking sensitive information from daemons run as `root`.
`stdin`
:Default: ``None``
`stdout`
:Default: ``None``
`stderr`
:Default: ``None``
Each of `stdin`, `stdout`, and `stderr` is a file-like object
which will be used as the new file for the standard I/O stream
`sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
should therefore be open, with a minimum of mode 'r' in the case
of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
`stderr`.
If the object has a `fileno()` method that returns a file
descriptor, the corresponding file will be excluded from being
closed during daemon start (that is, it will be treated as though
it were listed in `files_preserve`).
If ``None``, the corresponding system stream is re-bound to the
file named by `os.devnull`.
"""
__metaclass__ = type
def __init__(
self,
chroot_directory=None,
working_directory="/",
umask=0,
uid=None,
gid=None,
prevent_core=True,
detach_process=None,
files_preserve=None,
pidfile=None,
stdin=None,
stdout=None,
stderr=None,
signal_map=None,
):
""" Set up a new instance. """
self.chroot_directory = chroot_directory
self.working_directory = working_directory
self.umask = umask
self.prevent_core = prevent_core
self.files_preserve = files_preserve
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
if uid is None:
uid = os.getuid()
self.uid = uid
if gid is None:
gid = os.getgid()
self.gid = gid
if detach_process is None:
detach_process = is_detach_process_context_required()
self.detach_process = detach_process
if signal_map is None:
signal_map = make_default_signal_map()
self.signal_map = signal_map
self._is_open = False
@property
def is_open(self):
""" ``True`` if the instance is currently open. """
return self._is_open
def open(self):
""" Become a daemon process.
:return: ``None``.
Open the daemon context, turning the current program into a daemon
process. This performs the following steps:
* If this instance's `is_open` property is true, return
immediately. This makes it safe to call `open` multiple times on
an instance.
* If the `prevent_core` attribute is true, set the resource limits
for the process to prevent any core dump from the process.
* If the `chroot_directory` attribute is not ``None``, set the
effective root directory of the process to that directory (via
`os.chroot`).
This allows running the daemon process inside a “chroot gaol”
as a means of limiting the system's exposure to rogue behaviour
by the process. Note that the specified directory needs to
already be set up for this purpose.
* Set the process UID and GID to the `uid` and `gid` attribute
values.
* Close all open file descriptors. This excludes those listed in
the `files_preserve` attribute, and those that correspond to the
`stdin`, `stdout`, or `stderr` attributes.
* Change current working directory to the path specified by the
`working_directory` attribute.
* Reset the file access creation mask to the value specified by
the `umask` attribute.
* If the `detach_process` option is true, detach the current
process into its own process group, and disassociate from any
controlling terminal.
* Set signal handlers as specified by the `signal_map` attribute.
* If any of the attributes `stdin`, `stdout`, `stderr` are not
``None``, bind the system streams `sys.stdin`, `sys.stdout`,
and/or `sys.stderr` to the files represented by the
corresponding attributes. Where the attribute has a file
descriptor, the descriptor is duplicated (instead of re-binding
the name).
* If the `pidfile` attribute is not ``None``, enter its context
manager.
* Mark this instance as open (for the purpose of future `open` and
`close` calls).
* Register the `close` method to be called during Python's exit
processing.
When the function returns, the running program is a daemon
process.
"""
if self.is_open:
return
if self.chroot_directory is not None:
change_root_directory(self.chroot_directory)
if self.prevent_core:
prevent_core_dump()
change_file_creation_mask(self.umask)
change_working_directory(self.working_directory)
change_process_owner(self.uid, self.gid)
if self.detach_process:
detach_process_context()
signal_handler_map = self._make_signal_handler_map()
set_signal_handlers(signal_handler_map)
exclude_fds = self._get_exclude_file_descriptors()
close_all_open_files(exclude=exclude_fds)
redirect_stream(sys.stdin, self.stdin)
redirect_stream(sys.stdout, self.stdout)
redirect_stream(sys.stderr, self.stderr)
if self.pidfile is not None:
self.pidfile.__enter__()
self._is_open = True
register_atexit_function(self.close)
def __enter__(self):
""" Context manager entry point. """
self.open()
return self
def close(self):
""" Exit the daemon process context.
:return: ``None``.
Close the daemon context. This performs the following steps:
* If this instance's `is_open` property is false, return
immediately. This makes it safe to call `close` multiple times
on an instance.
* If the `pidfile` attribute is not ``None``, exit its context
manager.
* Mark this instance as closed (for the purpose of future `open`
and `close` calls).
"""
if not self.is_open:
return
if self.pidfile is not None:
# Follow the interface for telling a context manager to exit,
# <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
self.pidfile.__exit__(None, None, None)
self._is_open = False
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager exit point. """
self.close()
def terminate(self, signal_number, stack_frame):
""" Signal handler for end-process signals.
:param signal_number: The OS signal number received.
:param stack_frame: The frame object at the point the
signal was received.
:return: ``None``.
Signal handler for the ``signal.SIGTERM`` signal. Performs the
following step:
* Raise a ``SystemExit`` exception explaining the signal.
"""
exception = SystemExit(
"Terminating on signal {signal_number!r}".format(
signal_number=signal_number))
raise exception
def _get_exclude_file_descriptors(self):
""" Get the set of file descriptors to exclude closing.
:return: A set containing the file descriptors for the
files to be preserved.
The file descriptors to be preserved are those from the
items in `files_preserve`, and also each of `stdin`,
`stdout`, and `stderr`. For each item:
* If the item is ``None``, it is omitted from the return
set.
* If the item's ``fileno()`` method returns a value, that
value is in the return set.
* Otherwise, the item is in the return set verbatim.
"""
files_preserve = self.files_preserve
if files_preserve is None:
files_preserve = []
files_preserve.extend(
item for item in [self.stdin, self.stdout, self.stderr]
if hasattr(item, 'fileno'))
exclude_descriptors = set()
for item in files_preserve:
if item is None:
continue
file_descriptor = _get_file_descriptor(item)
if file_descriptor is not None:
exclude_descriptors.add(file_descriptor)
else:
exclude_descriptors.add(item)
return exclude_descriptors
def _make_signal_handler(self, target):
""" Make the signal handler for a specified target object.
:param target: A specification of the target for the
handler; see below.
:return: The value for use by `signal.signal()`.
If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
is a text string, return the attribute of this instance named
by that string. Otherwise, return `target` itself.
"""
if target is None:
result = signal.SIG_IGN
elif isinstance(target, unicode):
name = target
result = getattr(self, name)
else:
result = target
return result
def _make_signal_handler_map(self):
""" Make the map from signals to handlers for this instance.
:return: The constructed signal map for this instance.
Construct a map from signal numbers to handlers for this
context instance, suitable for passing to
`set_signal_handlers`.
"""
signal_handler_map = dict(
(signal_number, self._make_signal_handler(target))
for (signal_number, target) in self.signal_map.items())
return signal_handler_map
def _get_file_descriptor(obj):
""" Get the file descriptor, if the object has one.
:param obj: The object expected to be a file-like object.
:return: The file descriptor iff the file supports it; otherwise
``None``.
The object may be a non-file object. It may also be a
file-like object with no support for a file descriptor. In
either case, return ``None``.
"""
file_descriptor = None
if hasattr(obj, 'fileno'):
try:
file_descriptor = obj.fileno()
except ValueError:
# The item doesn't support a file descriptor.
pass
return file_descriptor
def change_working_directory(directory):
""" Change the working directory of this process.
:param directory: The target directory path.
:return: ``None``.
"""
try:
os.chdir(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change working directory ({exc})".format(exc=exc))
raise error
def change_root_directory(directory):
""" Change the root directory of this process.
:param directory: The target directory path.
:return: ``None``.
Set the current working directory, then the process root directory,
to the specified `directory`. Requires appropriate OS privileges
for this process.
"""
try:
os.chdir(directory)
os.chroot(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change root directory ({exc})".format(exc=exc))
raise error
def change_file_creation_mask(mask):
""" Change the file creation mask for this process.
:param mask: The numeric file creation mask to set.
:return: ``None``.
"""
try:
os.umask(mask)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask ({exc})".format(exc=exc))
raise error
def change_process_owner(uid, gid):
""" Change the owning UID and GID of this process.
:param uid: The target UID for the daemon process.
:param gid: The target GID for the daemon process.
:return: ``None``.
Set the GID then the UID of the process (in that order, to avoid
permission errors) to the specified `gid` and `uid` values.
Requires appropriate OS privileges for this process.
"""
try:
os.setgid(gid)
os.setuid(uid)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change process owner ({exc})".format(exc=exc))
raise error
def prevent_core_dump():
""" Prevent this process from generating a core dump.
:return: ``None``.
Set the soft and hard limits for core dump size to zero. On Unix,
this entirely prevents the process from creating core dump.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value.
core_limit_prev = resource.getrlimit(core_resource)
except ValueError as exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit"
" ({exc})".format(exc=exc))
raise error
# Set hard and soft limits to zero, i.e. no core dump at all.
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
def detach_process_context():
""" Detach the process context from parent and session.
:return: ``None``.
Detach from the parent process and session group, allowing the
parent to exit while this process continues running.
Reference: “Advanced Programming in the Unix Environment”,
section 13.3, by W. Richard Stevens, published 1993 by
Addison-Wesley.
"""
def fork_then_exit_parent(error_message):
""" Fork a child process, then exit the parent process.
:param error_message: Message for the exception in case of a
detach failure.
:return: ``None``.
:raise DaemonProcessDetachError: If the fork fails.
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError as exc:
error = DaemonProcessDetachError(
"{message}: [{exc.errno:d}] {exc.strerror}".format(
message=error_message, exc=exc))
raise error
fork_then_exit_parent(error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(error_message="Failed second fork")
def is_process_started_by_init():
""" Determine whether the current process is started by `init`.
:return: ``True`` iff the parent process is `init`; otherwise
``False``.
The `init` process is the one with process ID of 1.
"""
result = False
init_pid = 1
if os.getppid() == init_pid:
result = True
return result
def is_socket(fd):
""" Determine whether the file descriptor is a socket.
:param fd: The file descriptor to interrogate.
:return: ``True`` iff the file descriptor is a socket; otherwise
``False``.
Query the socket type of `fd`. If there is no error, the file is a
socket.
"""
result = False
file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
try:
socket_type = file_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error as exc:
exc_errno = exc.args[0]
if exc_errno == errno.ENOTSOCK:
# Socket operation on non-socket.
pass
else:
# Some other socket error.
result = True
else:
# No error getting socket type.
result = True
return result
def is_process_started_by_superserver():
""" Determine whether the current process is started by the superserver.
:return: ``True`` if this process was started by the internet
superserver; otherwise ``False``.
The internet superserver creates a network socket, and
attaches it to the standard streams of the child process. If
that is the case for this process, return ``True``, otherwise
``False``.
"""
result = False
stdin_fd = sys.__stdin__.fileno()
if is_socket(stdin_fd):
result = True
return result
def is_detach_process_context_required():
""" Determine whether detaching the process context is required.
:return: ``True`` iff the process is already detached; otherwise
``False``.
The process environment is interrogated for the following:
* Process was started by `init`; or
* Process was started by `inetd`.
If any of the above are true, the process is deemed to be already
detached.
"""
result = True
if is_process_started_by_init() or is_process_started_by_superserver():
result = False
return result
def close_file_descriptor_if_open(fd):
""" Close a file descriptor if already open.
:param fd: The file descriptor to close.
:return: ``None``.
Close the file descriptor `fd`, suppressing an error in the
case the file was not open.
"""
try:
os.close(fd)
except EnvironmentError as exc:
if exc.errno == errno.EBADF:
# File descriptor was not open.
pass
else:
error = DaemonOSEnvironmentError(
"Failed to close file descriptor {fd:d} ({exc})".format(
fd=fd, exc=exc))
raise error
MAXFD = 2048
def get_maximum_file_descriptors():
""" Get the maximum number of open file descriptors for this process.
:return: The number (integer) to use as the maximum number of open
files for this process.
The maximum is the process hard resource limit of maximum number of
open file descriptors. If the limit is “infinity”, a default value
of ``MAXFD`` is returned.
"""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
if result == resource.RLIM_INFINITY:
result = MAXFD
return result
def close_all_open_files(exclude=set()):
""" Close all open file descriptors.
:param exclude: Collection of file descriptors to skip when closing
files.
:return: ``None``.
Closes every file descriptor (if open) of this process. If
specified, `exclude` is a set of file descriptors to *not*
close.
"""
maxfd = get_maximum_file_descriptors()
for fd in reversed(range(maxfd)):
if fd not in exclude:
close_file_descriptor_if_open(fd)
def redirect_stream(system_stream, target_stream):
""" Redirect a system stream to a specified file.
:param standard_stream: A file object representing a standard I/O
stream.
:param target_stream: The target file object for the redirected
stream, or ``None`` to specify the null device.
:return: ``None``.
`system_stream` is a standard system stream such as
``sys.stdout``. `target_stream` is an open file object that
should replace the corresponding system stream object.
If `target_stream` is ``None``, defaults to opening the
operating system's null device and using its file descriptor.
"""
if target_stream is None:
target_fd = os.open(os.devnull, os.O_RDWR)
else:
target_fd = target_stream.fileno()
os.dup2(target_fd, system_stream.fileno())
def make_default_signal_map():
""" Make the default signal map for this system.
:return: A mapping from signal number to handler object.
The signals available differ by system. The map will not contain
any signals not defined on the running system.
"""
name_map = {
'SIGTSTP': None,
'SIGTTIN': None,
'SIGTTOU': None,
'SIGTERM': 'terminate',
}
signal_map = dict(
(getattr(signal, name), target)
for (name, target) in name_map.items()
if hasattr(signal, name))
return signal_map
def set_signal_handlers(signal_handler_map):
""" Set the signal handlers as specified.
:param signal_handler_map: A map from signal number to handler
object.
:return: ``None``.
See the `signal` module for details on signal numbers and signal
handlers.
"""
for (signal_number, handler) in signal_handler_map.items():
signal.signal(signal_number, handler)
def register_atexit_function(func):
""" Register a function for processing at program exit.
:param func: A callable function expecting no arguments.
:return: ``None``.
The function `func` is registered for a call with no arguments
at program exit.
"""
atexit.register(func)
def _chain_exception_from_existing_exception_context(exc, as_cause=False):
""" Decorate the specified exception with the existing exception context.
:param exc: The exception instance to decorate.
:param as_cause: If true, the existing context is declared to be
the cause of the exception.
:return: ``None``.
:PEP:`344` describes syntax and attributes (`__traceback__`,
`__context__`, `__cause__`) for use in exception chaining.
Python 2 does not have that syntax, so this function decorates
the exception with values from the current exception context.
"""
(existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
if as_cause:
exc.__cause__ = existing_exc
else:
exc.__context__ = existing_exc
exc.__traceback__ = existing_traceback
class PidFile(object):
"""
Adapter between a file path string and the `lockfile` API [0]. Based example
found at [1].
[0]: https://pythonhosted.org/lockfile/lockfile.html
[1]: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
"""
def __init__(self, path, enter_err_msg=None):
self.path = path
self.enter_err_msg = enter_err_msg
self.pidfile = None
def __enter__(self):
self.pidfile = open(self.path, 'a+')
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
sys.exit(self.enter_err_msg)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __exit__(self, exc_type, exc_value, exc_tb):
try:
self.pidfile.close()
except IOError as err:
if err.errno != 9:
raise
os.remove(self.path)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# User Role/Group Per Company
# Copyright 2014 wangbuke <wangbuke@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
def get_company(cr, uid):
cr.execute("SELECT company_id FROM res_users WHERE id=%s", (uid,))
company_id = cr.fetchone()
company_id = company_id[0] if company_id else False
return company_id
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
company_id = get_company(cr, idnew) if rel == 'res_groups_users_rel' else False
if rel == 'res_groups_users_rel' and company_id:
cr.execute('insert into '+rel+' ('+id1+','+id2+', company_id) values (%s,%s,%s)', (id, idnew, company_id))
else:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
company_id = get_company(cr, id) if rel == 'res_groups_users_rel' and id1 == 'uid' else False
company_id = get_company(cr, act[1]) if rel == 'res_groups_users_rel' and id1 == 'gid' else company_id or False
if rel == 'res_groups_users_rel' and company_id:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s and company_id=%s', (id, act[1], company_id))
else:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
company_id = get_company(cr, id) if rel == 'res_groups_users_rel' and id1 == 'uid' else False
company_id = get_company(cr, act[1]) if rel == 'res_groups_users_rel' and id1 == 'gid' else company_id or False
if rel == 'res_groups_users_rel' and company_id:
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s and company_id=%s', (id, act[1], company_id))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+', company_id) values (%s,%s,%s)', (id, act[1], company_id))
else:
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
company_id = get_company(cr, id) if rel == 'res_groups_users_rel' and id1 == 'uid' else False
company_id = get_company(cr, act[1]) if rel == 'res_groups_users_rel' and id1 == 'gid' else company_id or False
if rel == 'res_groups_users_rel' and company_id:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s and company_id=%s', (id, company_id))
else:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
company_id = get_company(cr, id) if rel == 'res_groups_users_rel' and id1 == 'uid' else False
company_id = get_company(cr, act_nbr) if rel == 'res_groups_users_rel' and id1 == 'gid' else company_id or False
if rel == 'res_groups_users_rel' and company_id:
cr.execute('insert into '+rel+' ('+id1+','+id2+', company_id) values (%s, %s, %s)', (id, act_nbr, company_id))
else:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
openerp.osv.fields.many2many.set = set
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Contains the implementation for the DirectoryWatcher class."""
import os
from tensorflow.python.platform import gfile
from tensorflow.python.platform import logging
class DirectoryWatcher(object):
"""A DirectoryWatcher wraps a loader to load from a directory.
A loader reads a file on disk and produces some kind of values as an
iterator. A DirectoryWatcher takes a directory with one file at a time being
written to and a factory for loaders and watches all the files at once.
This class is *only* valid under the assumption that files are never removed
and the only file ever changed is whichever one is lexicographically last.
"""
def __init__(self, directory, loader_factory, path_filter=lambda x: True):
"""Constructs a new DirectoryWatcher.
Args:
directory: The directory to watch. The directory doesn't have to exist.
loader_factory: A factory for creating loaders. The factory should take a
file path and return an object that has a Load method returning an
iterator that will yield all events that have not been yielded yet.
path_filter: Only files whose full path matches this predicate will be
loaded. If not specified, all files are loaded.
Raises:
ValueError: If directory or loader_factory is None.
"""
if directory is None:
raise ValueError('A directory is required')
if loader_factory is None:
raise ValueError('A loader factory is required')
self._directory = directory
self._loader_factory = loader_factory
self._loader = None
self._path = None
self._path_filter = path_filter
def Load(self):
"""Loads new values from disk.
The watcher will load from one file at a time; as soon as that file stops
yielding events, it will move on to the next file. We assume that old files
are never modified after a newer file has been written. As a result, Load()
can be called multiple times in a row without losing events that have not
been yielded yet. In other words, we guarantee that every event will be
yielded exactly once.
Yields:
All values that were written to disk that have not been yielded yet.
"""
# If the loader exists, check it for a value.
if not self._loader:
self._InitializeLoader()
while True:
# Yield all the new events in the file we're currently loading from.
for event in self._loader.Load():
yield event
next_path = self._GetNextPath()
if not next_path:
logging.info('No more files in %s', self._directory)
# Current file is empty and there are no new files, so we're done.
return
# There's a new file, so check to make sure there weren't any events
# written between when we finished reading the current file and when we
# checked for the new one. The sequence of events might look something
# like this:
#
# 1. Event #1 written to file #1.
# 2. We check for events and yield event #1 from file #1
# 3. We check for events and see that there are no more events in file #1.
# 4. Event #2 is written to file #1.
# 5. Event #3 is written to file #2.
# 6. We check for a new file and see that file #2 exists.
#
# Without this loop, we would miss event #2. We're also guaranteed by the
# loader contract that no more events will be written to file #1 after
# events start being written to file #2, so we don't have to worry about
# that.
for event in self._loader.Load():
yield event
logging.info('Directory watcher for %s advancing to file %s',
self._directory, next_path)
# Advance to the next file and start over.
self._SetPath(next_path)
def _InitializeLoader(self):
path = self._GetNextPath()
if path:
self._SetPath(path)
else:
raise StopIteration
def _SetPath(self, path):
self._path = path
self._loader = self._loader_factory(path)
def _GetNextPath(self):
"""Returns the path of the next file to use or None if no file exists."""
sorted_paths = [os.path.join(self._directory, path)
for path in sorted(gfile.ListDirectory(self._directory))]
# We filter here so the filter gets the full directory name.
filtered_paths = (path for path in sorted_paths
if self._path_filter(path) and path > self._path)
return next(filtered_paths, None)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Criteria or rubric based evaluators.
These evaluators are useful for evaluating the
output of a language model or chain against
specified criteria or rubric.
Classes
-------
CriteriaEvalChain : Evaluates the output of a language model or
chain against specified criteria.
Examples:
--------
Using a predefined criterion:
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import CriteriaEvalChain
>>> model = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria)
>>> chain.evaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
Using a custom criterion:
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain
>>> model = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=model,
criteria=criteria,
)
>>> chain.evaluate_strings(
prediction="The answer to life is 42.",
reference="It's commonly known that the answer to life is 42.",
input="Please summarize the following: The answer to life, the universe, and everything is unknowable.",
)
""" # noqa: E501
from langchain_classic.evaluation.criteria.eval_chain import (
Criteria,
CriteriaEvalChain,
LabeledCriteriaEvalChain,
)
__all__ = ["Criteria", "CriteriaEvalChain", "LabeledCriteriaEvalChain"]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/evaluation/criteria/__init__.py
|
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package transit
import (
"context"
"testing"
"github.com/hashicorp/vault/sdk/logical"
)
func BenchmarkTransit_BatchEncryption1(b *testing.B) {
BTransit_BatchEncryption(b, 1)
}
func BenchmarkTransit_BatchEncryption10(b *testing.B) {
BTransit_BatchEncryption(b, 10)
}
func BenchmarkTransit_BatchEncryption50(b *testing.B) {
BTransit_BatchEncryption(b, 50)
}
func BenchmarkTransit_BatchEncryption100(b *testing.B) {
BTransit_BatchEncryption(b, 100)
}
func BenchmarkTransit_BatchEncryption1000(b *testing.B) {
BTransit_BatchEncryption(b, 1_000)
}
func BenchmarkTransit_BatchEncryption10000(b *testing.B) {
BTransit_BatchEncryption(b, 10_000)
}
func BTransit_BatchEncryption(b *testing.B, bsize int) {
b.StopTimer()
var resp *logical.Response
var err error
backend, s := createBackendWithStorage(b)
batchEncryptionInput := make([]interface{}, 0, bsize)
for i := 0; i < bsize; i++ {
batchEncryptionInput = append(
batchEncryptionInput,
map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
)
}
batchEncryptionData := map[string]interface{}{
"batch_input": batchEncryptionInput,
}
batchEncryptionReq := &logical.Request{
Operation: logical.CreateOperation,
Path: "encrypt/upserted_key",
Storage: s,
Data: batchEncryptionData,
}
b.StartTimer()
for i := 0; i < b.N; i++ {
resp, err = backend.HandleRequest(context.Background(), batchEncryptionReq)
if err != nil || (resp != nil && resp.IsError()) {
b.Fatalf("err:%v resp:%#v", err, resp)
}
}
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
builtin/logical/transit/path_encrypt_bench_test.go
|
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import DocArrayInMemorySearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DocArrayInMemorySearch": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DocArrayInMemorySearch",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/vectorstores/docarray/in_memory.py
|
import {ChangeDetectorRef, Component, inject} from '@angular/core';
import {HousingLocation} from '../housing-location/housing-location';
import {HousingLocationInfo} from '../housinglocation';
import {HousingService} from '../housing.service';
@Component({
selector: 'app-home',
imports: [HousingLocation],
template: `
<section>
<form>
<input type="text" placeholder="Filter by city" #filter />
<button class="primary" type="button" (click)="filterResults(filter.value)">Search</button>
</form>
</section>
<section class="results">
@for (housingLocation of filteredLocationList; track $index) {
<app-housing-location [housingLocation]="housingLocation" />
}
</section>
`,
styleUrls: ['./home.css'],
})
export class Home {
private readonly changeDetectorRef = inject(ChangeDetectorRef);
housingLocationList: HousingLocationInfo[] = [];
housingService: HousingService = inject(HousingService);
filteredLocationList: HousingLocationInfo[] = [];
constructor() {
this.housingLocationList = this.housingService.getAllHousingLocations();
this.filteredLocationList = this.housingLocationList;
}
filterResults(text: string) {
if (!text) {
this.filteredLocationList = this.housingLocationList;
return;
}
this.filteredLocationList = this.housingLocationList.filter((housingLocation) =>
housingLocation?.city.toLowerCase().includes(text.toLowerCase()),
);
}
}
|
typescript
|
github
|
https://github.com/angular/angular
|
adev/src/content/tutorials/first-app/steps/14-http/src/app/home/home.ts
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Nghia Tran
"""
Generate csv for predicting steering angles.
Usage:
usage: submission.py [-h] [--limit LIMIT] [--save SAVE] logdir test_folder
Create submission for Udacity.
positional arguments:
logdir Path to logdir.
test_folder Path to test folder.
optional arguments:
-h, --help show this help message and exit
--limit LIMIT, -l LIMIT
Number of files.
--save SAVE, -s SAVE Save file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import argparse
import os
import sys
import logging
from inputs.udacity_input import load_image
def load(logdir):
import tensorflow as tf
import tensorvision.utils as tv_utils
import tensorvision.core as core
tv_utils.set_gpus_to_use()
# Loading hyperparameters from logdir
hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')
logging.info("Hypes loaded successfully.")
# Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
modules = tv_utils.load_modules_from_logdir(logdir)
logging.info("Modules loaded successfully. Starting to build tf graph.")
with tf.Graph().as_default():
# Create placeholder for input
image_pl = tf.placeholder(tf.float32, shape=(hypes["image_height"], hypes["image_width"], 3))
image = tf.expand_dims(image_pl, 0)
# build Tensorflow graph using the model from logdir
prediction = core.build_inference_graph(hypes, modules,
image=image)
logging.info("Graph build successfully.")
# Create a session for running Ops on the Graph.
sess = tf.Session()
saver = tf.train.Saver()
# Load weights from logdir
core.load_weights(logdir, sess, saver)
logging.info("Weights loaded successfully.")
return image_pl, prediction, sess, hypes
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
parser = argparse.ArgumentParser(description='Create submission for Udacity.')
parser.add_argument('logdir', type=str, help='Path to logdir.')
parser.add_argument('test_folder', type=str, help='Path to test folder.')
parser.add_argument('--limit', '-l', type=int, default=-1, help='Number of files.')
parser.add_argument('--save', '-s', type=str, default='submission.csv', help='Save file.')
args = parser.parse_args()
logdir = args.logdir
image_pl, prediction, sess, hypes = load(logdir)
save_file = args.save
files = sorted(os.listdir(args.test_folder))[:args.limit]
if len(files) == 0:
logging.warning('No image found at path %s' % args.test_folder)
exit(1)
start = time.time()
with open(save_file, 'w') as f:
f.write('frame_id,steering_angle\n')
for i, file in enumerate(files):
sys.stdout.write('\r>> Processubg %d/%d images' % (i + 1, len(files)))
sys.stdout.flush()
filepath = os.path.join(args.test_folder, file)
img = load_image(path=filepath, hypes=hypes)
feed = {image_pl: img}
output = prediction['output']
pred = sess.run(output,
feed_dict=feed)
pred = pred[0][0]
frame_id = os.path.splitext(file)[0]
f.write('%s,%f\n' % (frame_id, pred))
time_taken = time.time() - start
logging.info('Video saved as %s' % save_file)
logging.info('Number of images: %d' % len(files))
logging.info('Time takes: %.2f s' % (time_taken))
logging.info('Frequency: %.2f fps' % (len(files) / time_taken))
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_PROTO_DESCRIPTORS_H_
#define TENSORFLOW_CORE_UTIL_PROTO_DESCRIPTORS_H_
#include <memory>
#include <string>
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
namespace tsl {
class Env;
} // namespace tsl
namespace tensorflow {
using tsl::Env;
// Gets a `DescriptorPool` object from the `descriptor_source`. This may be:
//
// 1) An empty string or "local://", in which case the local descriptor pool
// created for proto definitions linked to the binary is returned.
//
// 2) A file path, in which case the descriptor pool is created from the
// contents of the file, which is expected to contain a `FileDescriptorSet`
// serialized as a string. The descriptor pool ownership is transferred to the
// caller via `owned_desc_pool`.
//
// 3) A "bytes://<bytes>", in which case the descriptor pool is created from
// `<bytes>`, which is expected to be a `FileDescriptorSet` serialized as a
// string. The descriptor pool ownership is transferred to the caller via
// `owned_desc_pool`.
//
// Custom schemas can be supported by registering a handler with the
// `DescriptorPoolRegistry`.
absl::Status GetDescriptorPool(
Env* env, const std::string& descriptor_source,
protobuf::DescriptorPool const** desc_pool,
std::unique_ptr<protobuf::DescriptorPool>* owned_desc_pool);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_UTIL_PROTO_DESCRIPTORS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/util/proto/descriptors.h
|
# -*- coding: utf-8 -*-
# @Author: ZwEin
# @Date: 2016-06-20 10:55:39
# @Last Modified by: ZwEin
# @Last Modified time: 2016-06-29 18:38:53
"""
spark-submit \
--conf "spark.yarn.executor.memoryOverhead=8192" \
--conf "spark.rdd.compress=true" \
--conf "spark.shuffle.compress=true" \
--driver-memory 6g \
--executor-memory 6g --executor-cores 4 --num-executors 20 \
--py-files /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/python_main.zip,/Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/python_lib.zip \
/Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/spark_workflow.py \
--input_file /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/tests/data/memex_data \
--output_dir /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/tests/data/spark_output \
--seed_file /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/seeds \
--labelled_data /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/labelled_data
--files_dir /Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/spark_dependencies/python_files
"""
import json
import sys
import os
import argparse
from pyspark import SparkContext, SparkConf, SparkFiles
from digSparkUtil.fileUtil import FileUtil
# sys.path.insert(1, os.path.join(os.path.abspath(__file__), 'wedc', 'domain', 'vendor', 'en'))
# sys.path.append(os.path.join(os.path.dirname(__file__), 'wedc', 'domain', 'vendor'))
def load_jsonlines(sc, input, file_format='sequence', data_type='json', separator='\t'):
fUtil = FileUtil(sc)
rdd = fUtil.load_file(input, file_format=file_format, data_type=data_type, separator=separator)
return rdd
def save_jsonlines(sc, rdd, output_dir, file_format='sequence', data_type='json', separator='\t'):
fUtil = FileUtil(sc)
fUtil.save_file(rdd, output_dir, file_format=file_format, data_type=data_type, separator=separator)
def load_labelled_data_file(path):
labelled_data = []
with open(path, 'rb') as f:
for line in f.readlines():
line = line.strip().split('\t')
labelled_data.append([line[0], line[1]])
return labelled_data
def load_seed_file(path):
seeds = {}
with open(path, 'rb') as f:
for line in f.readlines():
line = line.strip().split('\t')
seeds.setdefault(line[0], line[1])
return seeds
def extract_content(raw):
if not raw:
return ''
content = []
if isinstance(raw, basestring):
content.append(raw)
else:
content = raw
return ' '.join(content)
def run(sc, input_file, output_dir, seed_file, labelled_data_file):
seeds = load_seed_file(seed_file)
broadcast_seeds = sc.broadcast(seeds)
labelled_data = load_labelled_data_file(labelled_data_file)
broadcast_labelled_data = sc.broadcast(labelled_data)
def map_load_data(data):
key, json_obj = data
text_list = []
if 'description' in json_obj:
desc = extract_content(json_obj['description'])
text_list.append(desc)
if 'name' in json_obj:
name = extract_content(json_obj['name'])
text_list.append(name)
return (str(key), ' '.join(text_list))
def map_clean(data):
from wedc.domain.core.data.loader import generate_extraction
key, text = data
return (key, generate_extraction(text))
def map_vectorize(data):
from wedc.domain.core.data.seed.seed_vector import generate_vector
key, tokens = data
seeds = broadcast_seeds.value
return (key, generate_vector(tokens, seeds))
def map_labelprop(iterator):
from wedc.domain.core.ml.classifier.label_propagation import labelprop
labelled_data = broadcast_labelled_data.value
ans = labelprop.run(list(iterator), labelled_data)
for k, v in ans.items():
yield (k, v)
# for file_path in os.listdir(files_dir):
# if file_path[0] != '.':
# sc.addFile(os.path.join(files_dir, file_path))
# print os.listdir(SparkFiles.getRootDirectory())
# print os.listdir(os.path.join(SparkFiles.getRootDirectory(), 'python_files.zip'))
# if os.path.isfile(SparkFiles.get(os.path.join('python_files.zip', 'en', 'lexnames'))):
# print 'exist'
rdd_original = load_jsonlines(sc, input_file)
rdd_content = rdd_original.map(map_load_data)
rdd_extraction = rdd_content.map(map_clean)
rdd_vector = rdd_extraction.map(map_vectorize)
# rdd = sc.textFile(input_file)
rdd_prediction = rdd_vector.mapPartitions(map_labelprop)
# ans = rdd.collect()
rdd = rdd_prediction.join(rdd_content)
rdd.saveAsTextFile(output_dir)
# save_jsonlines(sc, rdd, output_dir, file_format='sequence', data_type='json')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-i','--input_file', required=True)
arg_parser.add_argument('-o','--output_dir')#, required=True)
arg_parser.add_argument('-s','--seed_file', required=True)
arg_parser.add_argument('-l','--labelled_data_file', required=True)
# arg_parser.add_argument('-f','--files_dir', required=True)
args = arg_parser.parse_args()
spark_config = SparkConf().setAppName('WEDC')
sc = SparkContext(conf=spark_config)
run(sc, args.input_file, args.output_dir, args.seed_file, args.labelled_data_file)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Copyright 2015 - Gurjant Kalsi <me@gurjantkalsi.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from fabric.api import local, lcd, task
# NOTE: Fill these in.
LK_PROJECT_BASE = os.path.expanduser("~/code/lk")
SOD_PROJECT_BASE = os.path.expanduser("~/code/sod")
OPEN_OCD_BASE = os.path.expanduser("~/code/openocd")
DARTUINO_BUILD_TARGET = "dartuinoP0-test"
DARTUINO_SOD_BUILD_TARGET = "dartuino-p0-dartino"
DISCO_BUILD_TARGET = "stm32f746g-disco-test"
EVAL_BUILD_TARGET = "stm32746g-eval2-test"
class LKTarget:
def __init__(self, repo_root, target_project, board_cfg, stlink_cfg, bin_dir):
build_subdir = "build-" + target_project
full_binary_path = os.path.join(repo_root, bin_dir, build_subdir, "lk.bin")
program_command_list = ["program", full_binary_path, "reset", "exit", "0x08000000"]
program_command = " ".join(program_command_list)
program_command = "\"" + program_command + "\""
flash_command_list = [
"openocd",
"-f", stlink_cfg,
"-f", board_cfg,
"-c", program_command
]
self.flash_command = " ".join(flash_command_list)
self.target_project = target_project
self.repo_root = repo_root
DiscoLKTarget = LKTarget(LK_PROJECT_BASE, DISCO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "")
DartuinioTarget = LKTarget(LK_PROJECT_BASE, DARTUINO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2.cfg", "")
EvalLKTarget = LKTarget(LK_PROJECT_BASE, EVAL_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "")
DiscoSODTarget = LKTarget(SOD_PROJECT_BASE, DISCO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "out")
DartuinoSODTarget = LKTarget(SOD_PROJECT_BASE, DARTUINO_SOD_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2.cfg", "out")
@task
def disco_do():
build(DiscoLKTarget)
flash(DiscoLKTarget)
@task
def dartuino_do():
build(DartuinioTarget)
flash(DartuinioTarget)
@task
def eval_do():
build(EvalLKTarget)
flash(EvalLKTarget)
@task
def sod_do():
build(DiscoSODTarget)
flash(DiscoSODTarget)
@task
def sod_dartuino_do():
build(DartuinoSODTarget)
flash(DartuinoSODTarget)
def flash(target):
with lcd(OPEN_OCD_BASE):
local(target.flash_command)
def build(target):
make_cmd = "make PROJECT=%s" % target.target_project
with lcd(target.repo_root):
local(make_cmd)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the type-hint objects and decorators."""
from __future__ import absolute_import
import functools
import inspect
import unittest
from builtins import next
from builtins import range
import apache_beam.typehints.typehints as typehints
from apache_beam.typehints import Any
from apache_beam.typehints import Tuple
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import Union
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import _interleave_type_check
from apache_beam.typehints.decorators import _positional_arg_hints
from apache_beam.typehints.decorators import get_type_hints
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.decorators import getfullargspec
from apache_beam.typehints.typehints import is_consistent_with
def check_or_interleave(hint, value, var):
if hint is None:
return value
elif isinstance(hint, typehints.IteratorHint.IteratorTypeConstraint):
return _interleave_type_check(hint, var)(value)
_check_instance_type(hint, value, var)
return value
def check_type_hints(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
hints = get_type_hints(f)
if hints.input_types: # pylint: disable=too-many-nested-blocks
input_hints = getcallargs_forhints(
f, *hints.input_types[0], **hints.input_types[1])
inputs = inspect.getcallargs(f, *args, **kwargs)
for var, hint in input_hints.items():
value = inputs[var]
new_value = check_or_interleave(hint, value, var)
if new_value is not value:
if var in kwargs:
kwargs[var] = new_value
else:
args = list(args)
for ix, pvar in enumerate(getfullargspec(f).args):
if pvar == var:
args[ix] = new_value
break
else:
raise NotImplementedError('Iterable in nested argument %s' % var)
res = f(*args, **kwargs)
return check_or_interleave(hints.simple_output_type('typecheck'), res, None)
return wrapper
class DummyTestClass1(object):
pass
class DummyTestClass2(object):
pass
class SuperClass(object):
pass
class SubClass(SuperClass):
pass
class TypeHintTestCase(unittest.TestCase):
def assertCompatible(self, base, sub): # pylint: disable=invalid-name
self.assertTrue(
is_consistent_with(sub, base),
'%s is not consistent with %s' % (sub, base))
def assertNotCompatible(self, base, sub): # pylint: disable=invalid-name
self.assertFalse(
is_consistent_with(sub, base),
'%s is consistent with %s' % (sub, base))
class AnyTypeConstraintTestCase(TypeHintTestCase):
def test_any_compatibility(self):
self.assertCompatible(typehints.Any, typehints.List[int])
self.assertCompatible(typehints.Any, DummyTestClass1)
self.assertCompatible(typehints.Union[int, bool], typehints.Any)
self.assertCompatible(typehints.Optional[int], typehints.Any)
self.assertCompatible(typehints.Tuple[int], typehints.Any)
self.assertCompatible(typehints.KV[int, str], typehints.Any)
self.assertCompatible(typehints.Dict[int, bool], typehints.Any)
self.assertCompatible(typehints.Set[int], typehints.Any)
self.assertCompatible(typehints.Iterable[int], typehints.Any)
self.assertCompatible(typehints.Iterator[int], typehints.Any)
self.assertCompatible(typehints.Generator[int], typehints.Any)
self.assertCompatible(object, typehints.Any)
self.assertCompatible(typehints.Any, object)
def test_repr(self):
self.assertEqual('Any', repr(typehints.Any))
def test_type_check(self):
# This test passes if the type_check call does not raise any exception.
typehints.Any.type_check(1)
typehints.Any.type_check([1, 2, 3])
typehints.Any.type_check(DummyTestClass1())
class UnionHintTestCase(TypeHintTestCase):
def test_getitem_must_be_valid_type_param_cant_be_object_instance(self):
with self.assertRaises(TypeError) as e:
typehints.Union[5]
self.assertEqual('Cannot create Union without a sequence of types.',
e.exception.args[0])
def test_getitem_must_be_valid_type_param(self):
t = [2, 3]
with self.assertRaises(TypeError) as e:
typehints.Union[t]
self.assertEqual('All parameters to a Union hint must be a non-sequence, '
'a type, or a TypeConstraint. 2 is an instance of int.',
e.exception.args[0])
def test_getitem_duplicates_ignored(self):
# Types should be de-duplicated.
hint = typehints.Union[int, int, str]
self.assertEqual(len(hint.union_types), 2)
def test_getitem_nested_unions_flattened(self):
# The two Union's should be merged into 1.
hint = typehints.Union[typehints.Union[int, str],
typehints.Union[float, bool]]
self.assertTrue(len(hint.union_types) == 4)
self.assertTrue(all(t in hint.union_types for t in (int, str, float, bool)))
def test_union_hint_compatibility(self):
self.assertCompatible(typehints.Union[int, float], int)
self.assertCompatible(typehints.Union[int, str], typehints.Union[str, int])
self.assertCompatible(typehints.Union[int, float, str],
typehints.Union[str, int])
self.assertCompatible(
typehints.Union[DummyTestClass1, str],
typehints.Union[DummyTestClass1, str])
self.assertCompatible(typehints.Union[int, str],
typehints.Union[str, typehints.Union[int, str]])
self.assertNotCompatible(typehints.Union[float, bool],
typehints.Union[int, bool])
self.assertNotCompatible(typehints.Union[bool, str],
typehints.Union[float, bool, int])
def test_nested_compatibility(self):
self.assertCompatible(Union[int, Tuple[Any, int]], Tuple[int, int])
self.assertCompatible(Union[int, Tuple[Any, Any]],
Union[Tuple[int, Any], Tuple[Any, int]])
self.assertCompatible(Union[int, SuperClass], SubClass)
self.assertCompatible(Union[int, float, SuperClass], Union[int, SubClass])
self.assertNotCompatible(Union[int, SubClass], SuperClass)
self.assertNotCompatible(Union[int, float, SubClass],
Union[int, SuperClass])
self.assertNotCompatible(Union[int, SuperClass],
Union[int, float, SubClass])
self.assertCompatible(Tuple[Any, Any],
Union[Tuple[str, int], Tuple[str, float]])
def test_union_hint_repr(self):
hint = typehints.Union[DummyTestClass1, str]
self.assertIn(
str(hint),
# Uses frozen set internally, so order not guaranteed.
['Union[str, DummyTestClass1]',
'Union[DummyTestClass1, str]']
)
def test_union_hint_enforcement_composite_type_in_union(self):
o = DummyTestClass1()
hint = typehints.Union[int, DummyTestClass1]
self.assertIsNone(hint.type_check(4))
self.assertIsNone(hint.type_check(o))
def test_union_hint_enforcement_part_of_union(self):
hint = typehints.Union[int, str]
self.assertIsNone(hint.type_check(5))
self.assertIsNone(hint.type_check('test'))
def test_union_hint_enforcement_not_part_of_union(self):
hint = typehints.Union[int, float]
with self.assertRaises(TypeError) as e:
hint.type_check('test')
self.assertEqual("Union[float, int] type-constraint violated. Expected an "
"instance of one of: ('float', 'int'), received str "
"instead.",
e.exception.args[0])
class OptionalHintTestCase(TypeHintTestCase):
def test_getitem_sequence_not_allowed(self):
with self.assertRaises(TypeError) as e:
typehints.Optional[int, str]
self.assertTrue(e.exception.args[0].startswith(
'An Option type-hint only accepts a single type parameter.'))
def test_getitem_proxy_to_union(self):
hint = typehints.Optional[int]
self.assertTrue(isinstance(hint, typehints.UnionHint.UnionConstraint))
class TupleHintTestCase(TypeHintTestCase):
def test_getitem_invalid_ellipsis_type_param(self):
error_msg = ('Ellipsis can only be used to type-hint an arbitrary length '
'tuple of containing a single type: Tuple[A, ...].')
with self.assertRaises(TypeError) as e:
typehints.Tuple[int, int, ...]
self.assertEqual(error_msg, e.exception.args[0])
with self.assertRaises(TypeError) as e:
typehints.Tuple[...]
self.assertEqual(error_msg, e.exception.args[0])
def test_getitem_params_must_be_type_or_constraint(self):
expected_error_prefix = 'All parameters to a Tuple hint must be'
with self.assertRaises(TypeError) as e:
typehints.Tuple[5, [1, 3]]
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
with self.assertRaises(TypeError) as e:
typehints.Tuple[list, dict]
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
def test_compatibility_arbitrary_length(self):
self.assertNotCompatible(
typehints.Tuple[int, int], typehints.Tuple[int, ...])
self.assertCompatible(
typehints.Tuple[int, ...], typehints.Tuple[int, int])
self.assertCompatible(
typehints.Tuple[Any, ...], typehints.Tuple[int, float])
self.assertCompatible(
typehints.Tuple[SuperClass, ...], typehints.Tuple[SubClass, SuperClass])
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[SuperClass],
typehints.Tuple[SubClass, ...])
def test_compatibility(self):
self.assertCompatible(typehints.Tuple[int, str], typehints.Tuple[int, str])
self.assertCompatible(typehints.Tuple[int, Any], typehints.Tuple[int, str])
self.assertCompatible(typehints.Tuple[int, str], typehints.Tuple[int, Any])
self.assertCompatible(typehints.Tuple[typehints.Union[int, str], bool],
typehints.Tuple[typehints.Union[int, str], bool])
self.assertCompatible(typehints.Tuple[typehints.Union[str, int], int],
typehints.Tuple[typehints.Union[int, str], int])
self.assertCompatible(typehints.Tuple[SuperClass, int],
typehints.Tuple[SubClass, int])
self.assertNotCompatible(typehints.Tuple[int, int],
typehints.Tuple[int, int, int])
def test_raw_tuple(self):
self.assertCompatible(tuple, typehints.Tuple[int])
self.assertCompatible(tuple, typehints.Tuple[int, float])
self.assertCompatible(tuple, typehints.Tuple[int, ...])
def test_repr(self):
hint = typehints.Tuple[int, str, float]
self.assertEqual('Tuple[int, str, float]', str(hint))
hint = typehints.Tuple[DummyTestClass1, DummyTestClass2]
self.assertEqual('Tuple[DummyTestClass1, DummyTestClass2]',
str(hint))
hint = typehints.Tuple[float, ...]
self.assertEqual('Tuple[float, ...]', str(hint))
def test_type_check_must_be_tuple(self):
hint = typehints.Tuple[int, str]
expected_error_prefix = 'Tuple type constraint violated. Valid object'
invalid_instances = ([1, 2, 3], {4: 'f'}, 9, 'test', None)
for t in invalid_instances:
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
def test_type_check_must_have_same_arity(self):
# A 2-tuple of ints.
hint = typehints.Tuple[int, int]
t = (1, 2, 3)
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Passed object instance is of the proper type, but '
'differs in length from the hinted type. Expected a '
'tuple of length 2, received a tuple of length 3.',
e.exception.args[0])
def test_type_check_invalid_simple_types(self):
hint = typehints.Tuple[str, bool]
with self.assertRaises(TypeError) as e:
hint.type_check((4, False))
self.assertEqual('Tuple[str, bool] hint type-constraint violated. The '
'type of element #0 in the passed tuple is incorrect.'
' Expected an instance of type str, instead received '
'an instance of type int.',
e.exception.args[0])
def test_type_check_invalid_composite_type(self):
hint = typehints.Tuple[DummyTestClass1, DummyTestClass2]
t = (DummyTestClass2(), DummyTestClass1())
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Tuple[DummyTestClass1, DummyTestClass2] hint '
'type-constraint violated. The type of element #0 in the '
'passed tuple is incorrect. Expected an instance of type '
'DummyTestClass1, instead received an instance of type '
'DummyTestClass2.',
e.exception.args[0])
def test_type_check_valid_simple_types(self):
hint = typehints.Tuple[float, bool]
self.assertIsNone(hint.type_check((4.3, True)))
hint = typehints.Tuple[int]
self.assertIsNone(hint.type_check((1,)))
def test_type_check_valid_composite_types(self):
hint = typehints.Tuple[typehints.Tuple[int, str],
typehints.Tuple[int, bool]]
self.assertIsNone(hint.type_check(
((4, 'test'), (4, True))
))
def test_type_check_valid_simple_type_arbitrary_length(self):
hint = typehints.Tuple[int, ...]
t = (1, 2, 3, 4)
self.assertIsNone(hint.type_check(t))
def test_type_check_valid_composite_type_arbitrary_length(self):
hint = typehints.Tuple[typehints.List[str], ...]
t = (['h', 'e'], ['l', 'l'], ['o'])
self.assertIsNone(hint.type_check(t))
def test_type_check_invalid_simple_type_arbitrary_length(self):
hint = typehints.Tuple[str, ...]
t = ('t', 'e', 5, 't')
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Tuple[str, ...] hint type-constraint violated. The type '
'of element #2 in the passed tuple is incorrect. Expected '
'an instance of type str, instead received an instance of '
'type int.',
e.exception.args[0])
def test_type_check_invalid_composite_type_arbitrary_length(self):
hint = typehints.Tuple[typehints.List[int], ...]
t = ([1, 2], 'e', 's', 't')
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual("Tuple[List[int], ...] hint type-constraint violated. The "
"type of element #1 in the passed tuple is incorrect: "
"List type-constraint violated. Valid object instance "
"must be of type 'list'. Instead, an instance of 'str' "
"was received.",
e.exception.args[0])
class ListHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError):
typehints.List[4]
def test_list_constraint_compatibility(self):
hint1 = typehints.List[typehints.Tuple[int, str]]
hint2 = typehints.List[typehints.Tuple[float, bool]]
self.assertCompatible(hint1, hint1)
self.assertNotCompatible(hint1, hint2)
self.assertCompatible(typehints.List[SuperClass], typehints.List[SubClass])
def test_list_repr(self):
hint = (
typehints.List[typehints.Tuple[DummyTestClass1, DummyTestClass2]]
)
self.assertEqual('List[Tuple[DummyTestClass1, DummyTestClass2]]',
repr(hint))
def test_enforce_list_type_constraint_valid_simple_type(self):
hint = typehints.List[int]
self.assertIsNone(hint.type_check([1, 2, 3]))
def test_enforce_list_type_constraint_valid_composite_type(self):
hint = typehints.List[DummyTestClass1]
l = [DummyTestClass1(), DummyTestClass1()]
self.assertIsNone(hint.type_check(l))
def test_enforce_list_type_constraint_invalid_simple_type(self):
hint = typehints.List[int]
l = ['f', 'd', 'm']
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('List[int] hint type-constraint violated. The type of '
'element #0 in the passed list is incorrect. Expected an '
'instance of type int, instead received an instance of '
'type str.',
e.exception.args[0])
def test_enforce_list_type_constraint_invalid_composite_type(self):
hint = typehints.List[typehints.Tuple[int, int]]
l = [('f', 1), ('m', 5)]
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('List[Tuple[int, int]] hint type-constraint violated.'
' The type of element #0 in the passed list is '
'incorrect: Tuple[int, int] hint type-constraint '
'violated. The type of element #0 in the passed tuple'
' is incorrect. Expected an instance of type int, '
'instead received an instance of type str.',
e.exception.args[0])
class KVHintTestCase(TypeHintTestCase):
def test_getitem_param_must_be_tuple(self):
with self.assertRaises(TypeError) as e:
typehints.KV[4]
self.assertEqual('Parameter to KV type-hint must be a tuple of types: '
'KV[.., ..].',
e.exception.args[0])
def test_getitem_param_must_have_length_2(self):
with self.assertRaises(TypeError) as e:
typehints.KV[int, str, bool]
self.assertEqual("Length of parameters to a KV type-hint must be "
"exactly 2. Passed parameters: ({}, {}, {}), have a "
"length of 3.".format(int, str, bool),
e.exception.args[0])
def test_getitem_proxy_to_tuple(self):
hint = typehints.KV[int, str]
self.assertTrue(isinstance(hint, typehints.Tuple.TupleConstraint))
def test_enforce_kv_type_constraint(self):
hint = typehints.KV[str, typehints.Tuple[int, int, int]]
t = ('test', (1, 2, 3))
self.assertIsNone(hint.type_check(t))
class DictHintTestCase(TypeHintTestCase):
def test_getitem_param_must_be_tuple(self):
with self.assertRaises(TypeError) as e:
typehints.Dict[4]
self.assertEqual('Parameter to Dict type-hint must be a tuple of '
'types: Dict[.., ..].',
e.exception.args[0])
def test_getitem_param_must_have_length_2(self):
with self.assertRaises(TypeError) as e:
typehints.Dict[float, int, bool]
self.assertEqual("Length of parameters to a Dict type-hint must be "
"exactly 2. Passed parameters: ({}, {}, {}), have a "
"length of 3.".format(float, int, bool),
e.exception.args[0])
def test_key_type_must_be_valid_composite_param(self):
with self.assertRaises(TypeError):
typehints.Dict[list, int]
def test_value_type_must_be_valid_composite_param(self):
with self.assertRaises(TypeError):
typehints.Dict[str, 5]
def test_compatibility(self):
hint1 = typehints.Dict[int, str]
hint2 = typehints.Dict[bool, int]
hint3 = typehints.Dict[int, typehints.List[typehints.Tuple[str, str, str]]]
self.assertCompatible(hint1, hint1)
self.assertCompatible(hint3, hint3)
self.assertNotCompatible(hint3, 4)
self.assertNotCompatible(hint2, hint1)
def test_repr(self):
hint3 = typehints.Dict[int, typehints.List[typehints.Tuple[str, str, str]]]
self.assertEqual('Dict[int, List[Tuple[str, str, str]]]', repr(hint3))
def test_type_checks_not_dict(self):
hint = typehints.Dict[int, str]
l = [1, 2]
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('Dict type-constraint violated. All passed instances '
'must be of type dict. [1, 2] is of type list.',
e.exception.args[0])
def test_type_check_invalid_key_type(self):
hint = typehints.Dict[typehints.Tuple[int, int, int],
typehints.List[str]
]
d = {(1, 2): ['m', '1', '2', '3']}
with self.assertRaises((TypeError, TypeError)) as e:
hint.type_check(d)
self.assertEqual('Dict[Tuple[int, int, int], List[str]] hint key-type '
'constraint violated. All keys should be of type '
'Tuple[int, int, int]. Instead: Passed object '
'instance is of the proper type, but differs in '
'length from the hinted type. Expected a tuple of '
'length 3, received a tuple of length 2.',
e.exception.args[0])
def test_type_check_invalid_value_type(self):
hint = typehints.Dict[str, typehints.Dict[int, str]]
d = {'f': [1, 2, 3]}
with self.assertRaises(TypeError) as e:
hint.type_check(d)
self.assertEqual('Dict[str, Dict[int, str]] hint value-type constraint'
' violated. All values should be of type '
'Dict[int, str]. Instead: Dict type-constraint '
'violated. All passed instances must be of type dict.'
' [1, 2, 3] is of type list.',
e.exception.args[0])
def test_type_check_valid_simple_type(self):
hint = typehints.Dict[int, str]
d = {4: 'f', 9: 'k'}
self.assertIsNone(hint.type_check(d))
def test_type_check_valid_composite_type(self):
hint = typehints.Dict[typehints.Tuple[str, str], typehints.List[int]]
d = {('f', 'k'): [1, 2, 3], ('m', 'r'): [4, 6, 9]}
self.assertIsNone(hint.type_check(d))
def test_match_type_variables(self):
S = typehints.TypeVariable('S') # pylint: disable=invalid-name
T = typehints.TypeVariable('T') # pylint: disable=invalid-name
hint = typehints.Dict[S, T]
self.assertEqual({S: int, T: str},
hint.match_type_variables(typehints.Dict[int, str]))
class SetHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError) as e:
typehints.Set[list]
self.assertEqual("Parameter to a Set hint must be a non-sequence, a "
"type, or a TypeConstraint. {} is an instance of "
"type.".format(list),
e.exception.args[0])
def test_compatibility(self):
hint1 = typehints.Set[typehints.List[str]]
hint2 = typehints.Set[typehints.Tuple[int, int]]
self.assertCompatible(hint1, hint1)
self.assertNotCompatible(hint2, hint1)
def test_repr(self):
hint = typehints.Set[typehints.List[bool]]
self.assertEqual('Set[List[bool]]', repr(hint))
def test_type_check_must_be_set(self):
hint = typehints.Set[str]
with self.assertRaises(TypeError) as e:
hint.type_check(4)
self.assertEqual("Set type-constraint violated. Valid object instance "
"must be of type 'set'. Instead, an instance of 'int'"
" was received.",
e.exception.args[0])
def test_type_check_invalid_elem_type(self):
hint = typehints.Set[float]
with self.assertRaises(TypeError):
hint.type_check('f')
def test_type_check_valid_elem_simple_type(self):
hint = typehints.Set[str]
s = set(['f', 'm', 'k'])
self.assertIsNone(hint.type_check(s))
def test_type_check_valid_elem_composite_type(self):
hint = typehints.Set[typehints.Union[int, str]]
s = set([9, 'm', 'k'])
self.assertIsNone(hint.type_check(s))
class IterableHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError) as e:
typehints.Iterable[5]
self.assertEqual('Parameter to an Iterable hint must be a '
'non-sequence, a type, or a TypeConstraint. 5 is '
'an instance of int.',
e.exception.args[0])
def test_compatibility(self):
self.assertCompatible(typehints.Iterable[int], typehints.List[int])
self.assertCompatible(typehints.Iterable[int], typehints.Set[int])
self.assertCompatible(typehints.Iterable[typehints.Any],
typehints.List[typehints.Tuple[int, bool]])
self.assertCompatible(typehints.Iterable[int], typehints.Iterable[int])
self.assertCompatible(typehints.Iterable[typehints.Union[int, str]],
typehints.Iterable[typehints.Union[int, str]])
self.assertNotCompatible(typehints.Iterable[str], typehints.Iterable[bool])
self.assertCompatible(typehints.Iterable[int], typehints.List[int])
self.assertCompatible(typehints.Iterable[int], typehints.Set[int])
self.assertCompatible(typehints.Iterable[typehints.Any],
typehints.List[typehints.Tuple[int, bool]])
def test_tuple_compatibility(self):
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[SuperClass],
typehints.Tuple[SubClass, ...])
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, int])
self.assertCompatible(typehints.Iterable[Any], typehints.Tuple[int, float])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, float])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, float, int])
def test_repr(self):
hint = typehints.Iterable[typehints.Set[str]]
self.assertEqual('Iterable[Set[str]]', repr(hint))
def test_type_check_must_be_iterable(self):
with self.assertRaises(TypeError) as e:
hint = typehints.Iterable[int]
hint.type_check(5)
self.assertEqual("Iterable type-constraint violated. Valid object "
"instance must be of type 'iterable'. Instead, an "
"instance of 'int' was received.",
e.exception.args[0])
def test_type_check_violation_invalid_simple_type(self):
hint = typehints.Iterable[float]
l = set([1, 2, 3, 4])
with self.assertRaises(TypeError):
hint.type_check(l)
def test_type_check_violation_valid_simple_type(self):
hint = typehints.Iterable[str]
l = ('t', 'e', 's', 't')
self.assertIsNone(hint.type_check(l))
def test_type_check_violation_invalid_composite_type(self):
hint = typehints.Iterable[typehints.List[int]]
l = ([['t', 'e'], ['s', 't']])
with self.assertRaises(TypeError):
hint.type_check(l)
def test_type_check_violation_valid_composite_type(self):
hint = typehints.Iterable[typehints.List[int]]
l = ([[1, 2], [3, 4, 5]])
self.assertIsNone(hint.type_check(l))
class TestGeneratorWrapper(TypeHintTestCase):
def test_functions_as_regular_generator(self):
def count(n):
for i in range(n):
yield i
l = []
interleave_func = lambda x: l.append(x)
wrapped_gen = GeneratorWrapper(count(4), interleave_func)
# Should function as a normal generator.
self.assertEqual(0, next(wrapped_gen))
self.assertEqual((1, 2, 3), tuple(wrapped_gen))
# Interleave function should have been called each time.
self.assertEqual([0, 1, 2, 3], l)
class GeneratorHintTestCase(TypeHintTestCase):
def test_repr(self):
hint = typehints.Iterator[typehints.Set[str]]
self.assertEqual('Iterator[Set[str]]', repr(hint))
def test_compatibility(self):
self.assertCompatible(typehints.Iterator[int], typehints.Iterator[int])
self.assertNotCompatible(typehints.Iterator[str], typehints.Iterator[float])
def test_generator_return_hint_invalid_yield_type(self):
@check_type_hints
@with_output_types(typehints.Iterator[int])
def all_upper(s):
for e in s:
yield e.upper()
with self.assertRaises(TypeCheckError) as e:
next(all_upper('hello'))
self.assertEqual('Type-hint for return type violated: Iterator[int] '
'hint type-constraint violated. Expected a iterator '
'of type int. Instead received a iterator of type '
'str.',
e.exception.args[0])
def test_generator_argument_hint_invalid_yield_type(self):
def wrong_yield_gen():
for e in ['a', 'b']:
yield e
@check_type_hints
@with_input_types(a=typehints.Iterator[int])
def increment(a):
return [e + 1 for e in a]
with self.assertRaises(TypeCheckError) as e:
increment(wrong_yield_gen())
self.assertEqual("Type-hint for argument: 'a' violated: Iterator[int] "
"hint type-constraint violated. Expected a iterator "
"of type int. Instead received a iterator of type "
"str.",
e.exception.args[0])
class TakesDecoratorTestCase(TypeHintTestCase):
def test_must_be_primitive_type_or_constraint(self):
with self.assertRaises(TypeError) as e:
t = [1, 2]
@with_input_types(a=t)
def unused_foo(a):
pass
self.assertEqual('All type hint arguments must be a non-sequence, a '
'type, or a TypeConstraint. [1, 2] is an instance of '
'list.',
e.exception.args[0])
with self.assertRaises(TypeError) as e:
t = 5
@check_type_hints
@with_input_types(a=t)
def unused_foo(a):
pass
self.assertEqual('All type hint arguments must be a non-sequence, a type, '
'or a TypeConstraint. 5 is an instance of int.',
e.exception.args[0])
def test_basic_type_assertion(self):
@check_type_hints
@with_input_types(a=int)
def foo(a):
return a + 1
with self.assertRaises(TypeCheckError) as e:
m = 'a'
foo(m)
self.assertEqual("Type-hint for argument: 'a' violated. Expected an "
"instance of {}, instead found an instance of "
"{}.".format(int, type(m)),
e.exception.args[0])
def test_composite_type_assertion(self):
@check_type_hints
@with_input_types(a=typehints.List[int])
def foo(a):
a.append(1)
return a
with self.assertRaises(TypeCheckError) as e:
m = ['f', 'f']
foo(m)
self.assertEqual("Type-hint for argument: 'a' violated: List[int] hint "
"type-constraint violated. The type of element #0 in "
"the passed list is incorrect. Expected an instance of "
"type int, instead received an instance of type str.",
e.exception.args[0])
def test_valid_simple_type_arguments(self):
@with_input_types(a=str)
def upper(a):
return a.upper()
# Type constraints should pass, and function will be evaluated as normal.
self.assertEqual('M', upper('m'))
def test_any_argument_type_hint(self):
@check_type_hints
@with_input_types(a=typehints.Any)
def foo(a):
return 4
self.assertEqual(4, foo('m'))
def test_valid_mix_positional_and_keyword_arguments(self):
@check_type_hints
@with_input_types(typehints.List[int], elem=typehints.List[int])
def combine(container, elem):
return container + elem
self.assertEqual([1, 2, 3], combine([1, 2], [3]))
def test_invalid_only_positional_arguments(self):
@check_type_hints
@with_input_types(int, int)
def sub(a, b):
return a - b
with self.assertRaises(TypeCheckError) as e:
m = 'two'
sub(1, m)
self.assertEqual("Type-hint for argument: 'b' violated. Expected an "
"instance of {}, instead found an instance of "
"{}.".format(int, type(m)),
e.exception.args[0])
def test_valid_only_positional_arguments(self):
@with_input_types(int, int)
def add(a, b):
return a + b
self.assertEqual(3, add(1, 2))
class ReturnsDecoratorTestCase(TypeHintTestCase):
def test_no_kwargs_accepted(self):
with self.assertRaises(ValueError):
@with_output_types(m=int)
def unused_foo():
return 5
def test_must_be_primitive_type_or_type_constraint(self):
with self.assertRaises(TypeError):
@with_output_types(5)
def unused_foo():
pass
with self.assertRaises(TypeError):
@with_output_types([1, 2])
def unused_foo():
pass
def test_must_be_single_return_type(self):
with self.assertRaises(ValueError):
@with_output_types(int, str)
def unused_foo():
return 4, 'f'
def test_type_check_violation(self):
@check_type_hints
@with_output_types(int)
def foo(a):
return 'test'
with self.assertRaises(TypeCheckError) as e:
m = 4
foo(m)
self.assertEqual("Type-hint for return type violated. Expected an "
"instance of {}, instead found an instance of "
"{}.".format(int, type('test')),
e.exception.args[0])
def test_type_check_simple_type(self):
@with_output_types(str)
def upper(a):
return a.upper()
self.assertEqual('TEST', upper('test'))
def test_type_check_composite_type(self):
@with_output_types(typehints.List[typehints.Tuple[int, int]])
def bar():
return [(i, i+1) for i in range(5)]
self.assertEqual([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], bar())
def test_any_return_type_hint(self):
@with_output_types(typehints.Any)
def bar():
return 'foo'
self.assertEqual('foo', bar())
class CombinedReturnsAndTakesTestCase(TypeHintTestCase):
def test_enable_and_disable_type_checking_takes(self):
@with_input_types(a=int)
def int_to_str(a):
return str(a)
# The function call below violates the argument type-hint above, but won't
# result in an exception since run-time type-checking was disabled above.
self.assertEqual('a', int_to_str('a'))
# Must re-define since the conditional is in the (maybe)wrapper.
@check_type_hints
@with_input_types(a=int)
def int_to_str(a):
return str(a)
# With run-time type checking enabled once again the same call-atttempt
# should result in a TypeCheckError.
with self.assertRaises(TypeCheckError):
int_to_str('a')
def test_enable_and_disable_type_checking_returns(self):
@with_output_types(str)
def int_to_str(a):
return a
# The return value of the function above violates the return-type
# type-hint above, but won't result in an exception since run-time
# type-checking was disabled above.
self.assertEqual(9, int_to_str(9))
# Must re-define since the conditional is in the (maybe)wrapper.
@check_type_hints
@with_output_types(str)
def int_to_str(a):
return a
# With type-checking enabled once again we should get a TypeCheckError here.
with self.assertRaises(TypeCheckError):
int_to_str(9)
def test_valid_mix_pos_and_keyword_with_both_orders(self):
@with_input_types(str, start=int)
@with_output_types(str)
def to_upper_with_slice(string, start):
return string.upper()[start:]
self.assertEqual('ELLO', to_upper_with_slice('hello', 1))
def test_simple_takes_and_returns_hints(self):
@check_type_hints
@with_output_types(str)
@with_input_types(a=str)
def to_lower(a):
return a.lower()
# Return type and argument type satisfied, should work as normal.
self.assertEqual('m', to_lower('M'))
# Invalid argument type should raise a TypeCheckError
with self.assertRaises(TypeCheckError):
to_lower(5)
@check_type_hints
@with_output_types(str)
@with_input_types(a=str)
def to_lower(a):
return 9
# Modified function now has an invalid return type.
with self.assertRaises(TypeCheckError):
to_lower('a')
def test_composite_takes_and_returns_hints(self):
@check_type_hints
@with_input_types(it=typehints.List[int])
@with_output_types(typehints.List[typehints.Tuple[int, int]])
def expand_ints(it):
return [(i, i + 1) for i in it]
# Return type and argument type satisfied, should work as normal.
self.assertEqual([(0, 1), (1, 2), (2, 3)], expand_ints(list(range(3))))
# Invalid argument, list of str instead of int.
with self.assertRaises(TypeCheckError):
expand_ints('t e s t'.split())
@check_type_hints
@with_output_types(typehints.List[typehints.Tuple[int, int]])
@with_input_types(it=typehints.List[int])
def expand_ints(it):
return [str(i) for i in it]
# Modified function now has invalid return type.
with self.assertRaises(TypeCheckError):
expand_ints(list(range(2)))
class DecoratorHelpers(TypeHintTestCase):
def test_hint_helper(self):
self.assertTrue(is_consistent_with(Any, int))
self.assertTrue(is_consistent_with(int, Any))
self.assertTrue(is_consistent_with(str, object))
self.assertFalse(is_consistent_with(object, str))
self.assertTrue(is_consistent_with(str, Union[str, int]))
self.assertFalse(is_consistent_with(Union[str, int], str))
def test_positional_arg_hints(self):
self.assertEquals(typehints.Any, _positional_arg_hints('x', {}))
self.assertEquals(int, _positional_arg_hints('x', {'x': int}))
self.assertEquals(typehints.Tuple[int, typehints.Any],
_positional_arg_hints(['x', 'y'], {'x': int}))
def test_getcallargs_forhints(self):
def func(a, b_c, *d):
b, c = b_c # pylint: disable=unused-variable
return None
self.assertEquals(
{'a': Any, 'b_c': Any, 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[Any, Any]))
self.assertEquals(
{'a': Any, 'b_c': Any, 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[Any, Any, Any, int]))
self.assertEquals(
{'a': int, 'b_c': Tuple[str, Any], 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[int, Tuple[str, Any]]))
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mmc/fsl-imx-mmc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Secure Digital Host Controller for i.MX2/3 series
maintainers:
- Markus Pargmann <mpa@pengutronix.de>
allOf:
- $ref: mmc-controller.yaml
properties:
compatible:
oneOf:
- const: fsl,imx21-mmc
- const: fsl,imx31-mmc
- items:
- const: fsl,imx27-mmc
- const: fsl,imx21-mmc
reg:
maxItems: 1
clocks:
maxItems: 2
clock-names:
items:
- const: ipg
- const: per
interrupts:
maxItems: 1
dmas:
maxItems: 1
dma-names:
const: rx-tx
required:
- clocks
- clock-names
- compatible
- reg
- interrupts
unevaluatedProperties: false
examples:
- |
mmc@10014000 {
compatible = "fsl,imx27-mmc", "fsl,imx21-mmc";
reg = <0x10014000 0x1000>;
interrupts = <11>;
clocks = <&clks 29>, <&clks 60>;
clock-names = "ipg", "per";
dmas = <&dma 7>;
dma-names = "rx-tx";
bus-width = <4>;
cd-gpios = <&gpio3 29>;
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/mmc/fsl-imx-mmc.yaml
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the Mac OS X user plist plugin."""
# TODO: Only plists from Mac OS X 10.8 and 10.9 were tested. Look at other
# versions as well.
import binascii
from binplist import binplist
from dfvfs.file_io import fake_file_io
from dfvfs.path import fake_path_spec
from dfvfs.resolver import context
from xml.etree import ElementTree
from plaso.events import plist_event
from plaso.lib import timelib
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class MacUserPlugin(interface.PlistPlugin):
"""Basic plugin to extract timestamp Mac user information."""
NAME = 'plist_macuser'
DESCRIPTION = u'Parser for Mac OS X user plist files.'
# The PLIST_PATH is dynamic, "user".plist is the name of the
# Mac OS X user.
PLIST_KEYS = frozenset([
'name', 'uid', 'home',
'passwordpolicyoptions', 'ShadowHashData'])
_ROOT = u'/'
def Process(self, parser_context, plist_name=None, top_level=None, **kwargs):
"""Check if it is a valid Mac OS X system account plist file name.
Args:
parser_context: A parser context object (instance of ParserContext).
plist_name: name of the plist file.
top_level: dictionary with the plist file parsed.
"""
super(MacUserPlugin, self).Process(
parser_context, plist_name=self.PLIST_PATH, top_level=top_level,
**kwargs)
# Genearated events:
# name: string with the system user.
# uid: user ID.
# passwordpolicyoptions: XML Plist structures with the timestamp.
# passwordLastSetTime: last time the password was changed.
# lastLoginTimestamp: last time the user was authenticated (*).
# failedLoginTimestamp: last time the user passwd was incorrectly(*).
# failedLoginCount: times of incorrect passwords.
# (*): depending on the situation, these timestamps are reset (0 value).
# It is translated by the library as a 2001-01-01 00:00:00 (COCAO
# zero time representation). If this happens, the event is not yield.
def GetEntries(self, parser_context, match=None, **unused_kwargs):
"""Extracts relevant user timestamp entries.
Args:
parser_context: A parser context object (instance of ParserContext).
match: Optional dictionary containing keys extracted from PLIST_KEYS.
The default is None.
"""
account = match['name'][0]
uid = match['uid'][0]
cocoa_zero = (
timelib.Timestamp.COCOA_TIME_TO_POSIX_BASE *
timelib.Timestamp.MICRO_SECONDS_PER_SECOND)
# INFO: binplist return a string with the Plist XML.
for policy in match['passwordpolicyoptions']:
xml_policy = ElementTree.fromstring(policy)
for dict_elements in xml_policy.iterfind('dict'):
key_values = [value.text for value in dict_elements.getchildren()]
policy_dict = dict(zip(key_values[0::2], key_values[1::2]))
if policy_dict.get('passwordLastSetTime', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('passwordLastSetTime', '0'))
if timestamp > cocoa_zero:
# Extract the hash password information.
# It is store in the attribure ShadowHasData which is
# a binary plist data; However binplist only extract one
# level of binary plist, then it returns this information
# as a string.
# TODO: change this into a DataRange instead. For this we
# need the file offset and size of the ShadowHashData value data.
resolver_context = context.Context()
fake_file = fake_file_io.FakeFile(
resolver_context, match['ShadowHashData'][0])
fake_file.open(path_spec=fake_path_spec.FakePathSpec(
location=u'ShadowHashData'))
try:
plist_file = binplist.BinaryPlist(file_obj=fake_file)
top_level = plist_file.Parse()
except binplist.FormatError:
top_level = dict()
salted_hash = top_level.get('SALTED-SHA512-PBKDF2', None)
if salted_hash:
password_hash = u'$ml${0:d}${1:s}${2:s}'.format(
salted_hash['iterations'],
binascii.hexlify(salted_hash['salt']),
binascii.hexlify(salted_hash['entropy']))
else:
password_hash = u'N/A'
description = (
u'Last time {0:s} ({1!s}) changed the password: {2!s}').format(
account, uid, password_hash)
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'passwordLastSetTime', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if policy_dict.get('lastLoginTimestamp', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('lastLoginTimestamp', '0'))
description = u'Last login from {0:s} ({1!s})'.format(account, uid)
if timestamp > cocoa_zero:
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'lastLoginTimestamp', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if policy_dict.get('failedLoginTimestamp', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('failedLoginTimestamp', '0'))
description = (
u'Last failed login from {0:s} ({1!s}) ({2!s} times)').format(
account, uid, policy_dict['failedLoginCount'])
if timestamp > cocoa_zero:
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'failedLoginTimestamp', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
plist.PlistParser.RegisterPlugin(MacUserPlugin)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'client_fva/ui/ui_elements/contactAddDialog.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AddContactDialog(object):
def setupUi(self, AddContactDialog):
AddContactDialog.setObjectName("AddContactDialog")
AddContactDialog.resize(495, 265)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
AddContactDialog.setWindowIcon(icon)
self.dialogbox = QtWidgets.QDialogButtonBox(AddContactDialog)
self.dialogbox.setGeometry(QtCore.QRect(280, 230, 166, 23))
self.dialogbox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.dialogbox.setObjectName("dialogbox")
self.formLayoutWidget = QtWidgets.QWidget(AddContactDialog)
self.formLayoutWidget.setGeometry(QtCore.QRect(20, 30, 451, 181))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(11, 11, 11, 11)
self.formLayout.setHorizontalSpacing(50)
self.formLayout.setVerticalSpacing(25)
self.formLayout.setObjectName("formLayout")
self.label_firstname = QtWidgets.QLabel(self.formLayoutWidget)
self.label_firstname.setObjectName("label_firstname")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_firstname)
self.firstname = QtWidgets.QLineEdit(self.formLayoutWidget)
self.firstname.setObjectName("firstname")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstname)
self.label_lastname = QtWidgets.QLabel(self.formLayoutWidget)
self.label_lastname.setObjectName("label_lastname")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_lastname)
self.lastname = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lastname.setObjectName("lastname")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lastname)
self.label_identification = QtWidgets.QLabel(self.formLayoutWidget)
self.label_identification.setObjectName("label_identification")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_identification)
self.identification = QtWidgets.QLineEdit(self.formLayoutWidget)
self.identification.setObjectName("identification")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.identification)
self.retranslateUi(AddContactDialog)
QtCore.QMetaObject.connectSlotsByName(AddContactDialog)
def retranslateUi(self, AddContactDialog):
_translate = QtCore.QCoreApplication.translate
AddContactDialog.setWindowTitle(_translate("AddContactDialog", "Agregar Contacto"))
self.label_firstname.setText(_translate("AddContactDialog", "Nombre"))
self.label_lastname.setText(_translate("AddContactDialog", "Apellidos"))
self.label_identification.setText(_translate("AddContactDialog", "Identificación"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
AddContactDialog = QtWidgets.QDialog()
ui = Ui_AddContactDialog()
ui.setupUi(AddContactDialog)
AddContactDialog.show()
sys.exit(app.exec_())
|
unknown
|
codeparrot/codeparrot-clean
| ||
#pragma once
#include <c10/core/SymBool.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Export.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
#include <cstdint>
#include <limits>
#include <ostream>
#include <utility>
namespace c10 {
// NB: this is actually double precision; we're using the Python naming here
class C10_API SymFloat {
public:
/*implicit*/ SymFloat(double d) : data_(d) {}
SymFloat(SymNode ptr)
: data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
TORCH_CHECK(ptr_->is_float());
}
SymFloat() : data_(0.0) {}
SymNodeImpl* toSymNodeImplUnowned() const {
return ptr_.get();
}
SymNodeImpl* release() && {
return std::move(ptr_).release();
}
// Only valid if is_symbolic()
SymNode toSymNodeImpl() const;
// Guaranteed to return a SymNode, wrapping using base if necessary
SymNode wrap_node(const SymNode& base) const;
double expect_float() const {
TORCH_CHECK(!is_symbolic());
return data_;
}
SymFloat operator+(const SymFloat& /*sci*/) const;
SymFloat operator-(const SymFloat& /*sci*/) const;
SymFloat operator*(const SymFloat& /*sci*/) const;
SymFloat operator/(const SymFloat& /*sci*/) const;
SymBool sym_eq(const SymFloat& /*sci*/) const;
SymBool sym_ne(const SymFloat& /*sci*/) const;
SymBool sym_lt(const SymFloat& /*sci*/) const;
SymBool sym_le(const SymFloat& /*sci*/) const;
SymBool sym_gt(const SymFloat& /*sci*/) const;
SymBool sym_ge(const SymFloat& /*sci*/) const;
bool operator==(const SymFloat& o) const {
return sym_eq(o).guard_bool(__FILE__, __LINE__);
}
bool operator!=(const SymFloat& o) const {
return sym_ne(o).guard_bool(__FILE__, __LINE__);
}
bool operator<(const SymFloat& o) const {
return sym_lt(o).guard_bool(__FILE__, __LINE__);
}
bool operator<=(const SymFloat& o) const {
return sym_le(o).guard_bool(__FILE__, __LINE__);
}
bool operator>(const SymFloat& o) const {
return sym_gt(o).guard_bool(__FILE__, __LINE__);
}
bool operator>=(const SymFloat& o) const {
return sym_ge(o).guard_bool(__FILE__, __LINE__);
}
SymFloat min(const SymFloat& sci) const;
SymFloat max(const SymFloat& sci) const;
// Need guidance on where to put this code
SymFloat sqrt() const;
// Insert a guard for the float to be its concrete value, and then return
// that value. This operation always works, even if the float is symbolic,
// so long as we know what the underlying value is. Don't blindly put this
// everywhere; you can cause overspecialization of PyTorch programs with
// this method.
//
// It should be called as guard_float(__FILE__, __LINE__). The file and line
// number can be used to diagnose overspecialization.
double guard_float(const char* file, int64_t line) const;
bool has_hint() const;
// N.B. It's important to keep this definition in the header
// as we expect if checks to be folded for mobile builds
// where `is_symbolic` is always false
C10_ALWAYS_INLINE bool is_symbolic() const {
return ptr_;
}
// UNSAFELY coerce this SymFloat into a double. You MUST have
// established that this is a non-symbolic by some other means,
// typically by having tested is_symbolic(). You will get garbage
// from this function if is_symbolic()
double as_float_unchecked() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_symbolic());
return data_;
}
private:
// TODO: optimize to union
double data_;
SymNode ptr_;
};
C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
} // namespace c10
|
c
|
github
|
https://github.com/pytorch/pytorch
|
c10/core/SymFloat.h
|
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package teststorage
import (
"context"
"errors"
"fmt"
"math"
"slices"
"strings"
"sync"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/testutil"
)
// Sample represents test, combined sample for mocking storage.AppenderV2.
type Sample struct {
MF string
L labels.Labels
M metadata.Metadata
ST, T int64
V float64
H *histogram.Histogram
FH *histogram.FloatHistogram
ES []exemplar.Exemplar
}
func (s Sample) String() string {
// Attempting to format similar to ~ OpenMetrics 2.0 for readability.
b := strings.Builder{}
if s.M.Help != "" {
b.WriteString("HELP ")
b.WriteString(s.M.Help)
b.WriteString("\n")
}
if s.M.Type != model.MetricTypeUnknown && s.M.Type != "" {
b.WriteString("type@")
b.WriteString(string(s.M.Type))
b.WriteString(" ")
}
if s.M.Unit != "" {
b.WriteString("unit@")
b.WriteString(s.M.Unit)
b.WriteString(" ")
}
// Print all value types on purpose, to catch bugs for appending multiple sample types at once.
h := ""
if s.H != nil {
h = " " + s.H.String()
}
fh := ""
if s.FH != nil {
fh = " " + s.FH.String()
}
b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v", s.L.String(), s.V, h, fh, s.ST, s.T))
if len(s.ES) > 0 {
b.WriteString(fmt.Sprintf(" %v", s.ES))
}
b.WriteString("\n")
return b.String()
}
func (s Sample) Equals(other Sample) bool {
return strings.Compare(s.MF, other.MF) == 0 &&
labels.Equal(s.L, other.L) &&
s.M.Equals(other.M) &&
s.ST == other.ST &&
s.T == other.T &&
math.Float64bits(s.V) == math.Float64bits(other.V) && // Compare Float64bits so NaN values which are exactly the same will compare equal.
s.H.Equals(other.H) &&
s.FH.Equals(other.FH) &&
slices.EqualFunc(s.ES, other.ES, exemplar.Exemplar.Equals)
}
// IsStale returns whether the sample represents a stale sample, according to
// https://prometheus.io/docs/specs/native_histograms/#staleness-markers.
func (s Sample) IsStale() bool {
switch {
case s.FH != nil:
return value.IsStaleNaN(s.FH.Sum)
case s.H != nil:
return value.IsStaleNaN(s.H.Sum)
default:
return value.IsStaleNaN(s.V)
}
}
var sampleComparer = cmp.Comparer(func(a, b Sample) bool {
return a.Equals(b)
})
// RequireEqual is a special require equal that correctly compare Prometheus structures.
//
// In comparison to testutil.RequireEqual, this function adds special logic for comparing []Samples.
//
// It also ignores ordering between consecutive stale samples to avoid false
// negatives due to map iteration order in staleness tracking.
func RequireEqual(t testing.TB, expected, got []Sample, msgAndArgs ...any) {
opts := []cmp.Option{sampleComparer}
expected = reorderExpectedForStaleness(expected, got)
testutil.RequireEqualWithOptions(t, expected, got, opts, msgAndArgs...)
}
// RequireNotEqual is the negation of RequireEqual.
func RequireNotEqual(t testing.TB, expected, got []Sample, msgAndArgs ...any) {
t.Helper()
opts := []cmp.Option{cmp.Comparer(labels.Equal), sampleComparer}
expected = reorderExpectedForStaleness(expected, got)
if !cmp.Equal(expected, got, opts...) {
return
}
require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+
"a: %s\n"+
"b: %s", expected, got), msgAndArgs...)
}
func reorderExpectedForStaleness(expected, got []Sample) []Sample {
if len(expected) != len(got) || !includeStaleNaNs(expected) {
return expected
}
result := make([]Sample, len(expected))
copy(result, expected)
// Try to reorder only consecutive stale samples to avoid false negatives
// due to map iteration order in staleness tracking.
for i := range result {
if !result[i].IsStale() {
continue
}
if result[i].Equals(got[i]) {
continue
}
for j := i + 1; j < len(result); j++ {
if !result[j].IsStale() {
break
}
if result[j].Equals(got[i]) {
// Swap.
result[i], result[j] = result[j], result[i]
break
}
}
}
return result
}
func includeStaleNaNs(s []Sample) bool {
for _, e := range s {
if e.IsStale() {
return true
}
}
return false
}
// Appendable is a storage.Appendable mock.
// It allows recording all samples that were added through the appender and injecting errors.
// Appendable will panic if more than one Appender is open.
type Appendable struct {
appendErrFn func(ls labels.Labels) error // If non-nil, inject appender error on every Append, AppendHistogram and ST zero calls.
appendExemplarsError error // If non-nil, inject exemplar error.
commitErr error // If non-nil, inject commit error.
skipRecording bool // If true, Appendable won't record samples, useful for benchmarks.
mtx sync.Mutex
openAppenders atomic.Int32 // Guard against multi-appender use.
// Recorded results.
pendingSamples []Sample
resultSamples []Sample
rolledbackSamples []Sample
// Optional chain (Appender will collect samples, then run next).
next compatAppendable
}
// NewAppendable returns mock Appendable.
func NewAppendable() *Appendable {
return &Appendable{}
}
type compatAppendable interface {
storage.Appendable
storage.AppendableV2
}
// Then chains another appender from the provided Appendable for the Appender calls.
func (a *Appendable) Then(appendable compatAppendable) *Appendable {
a.next = appendable
return a
}
// WithErrs allows injecting errors to the appender.
func (a *Appendable) WithErrs(appendErrFn func(ls labels.Labels) error, appendExemplarsError, commitErr error) *Appendable {
a.appendErrFn = appendErrFn
a.appendExemplarsError = appendExemplarsError
a.commitErr = commitErr
return a
}
// SkipRecording enables or disables recording appended samples.
// If skipped, Appendable allocs less, but Result*() methods will give always empty results. This is useful for benchmarking.
func (a *Appendable) SkipRecording(skipRecording bool) *Appendable {
a.skipRecording = skipRecording
return a
}
// PendingSamples returns pending samples (samples appended without commit).
func (a *Appendable) PendingSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
if len(a.pendingSamples) == 0 {
return nil
}
ret := make([]Sample, len(a.pendingSamples))
copy(ret, a.pendingSamples)
return ret
}
// ResultSamples returns committed samples.
func (a *Appendable) ResultSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
if len(a.resultSamples) == 0 {
return nil
}
ret := make([]Sample, len(a.resultSamples))
copy(ret, a.resultSamples)
return ret
}
// RolledbackSamples returns rolled back samples.
func (a *Appendable) RolledbackSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
if len(a.rolledbackSamples) == 0 {
return nil
}
ret := make([]Sample, len(a.rolledbackSamples))
copy(ret, a.rolledbackSamples)
return ret
}
func (a *Appendable) ResultReset() {
a.mtx.Lock()
defer a.mtx.Unlock()
a.pendingSamples = a.pendingSamples[:0]
a.resultSamples = a.resultSamples[:0]
a.rolledbackSamples = a.rolledbackSamples[:0]
}
// ResultMetadata returns resultSamples with samples only containing L and M.
// This is for compatibility with tests that only focus on metadata.
//
// TODO: Rewrite tests to test metadata on resultSamples instead.
func (a *Appendable) ResultMetadata() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
var ret []Sample
for _, s := range a.resultSamples {
if s.M.IsEmpty() {
continue
}
ret = append(ret, Sample{L: s.L, M: s.M})
}
return ret
}
func (a *Appendable) String() string {
var sb strings.Builder
sb.WriteString("committed:\n")
for _, s := range a.resultSamples {
sb.WriteString("\n")
sb.WriteString(s.String())
}
sb.WriteString("pending:\n")
for _, s := range a.pendingSamples {
sb.WriteString("\n")
sb.WriteString(s.String())
}
sb.WriteString("rolledback:\n")
for _, s := range a.rolledbackSamples {
sb.WriteString("\n")
sb.WriteString(s.String())
}
return sb.String()
}
var errClosedAppender = errors.New("appender was already committed/rolledback")
type baseAppender struct {
err error
nextTr storage.AppenderTransaction
a *Appendable
}
func (a *baseAppender) checkErr() error {
a.a.mtx.Lock()
defer a.a.mtx.Unlock()
return a.err
}
func (a *baseAppender) Commit() error {
if err := a.checkErr(); err != nil {
return err
}
defer a.a.openAppenders.Dec()
if a.a.commitErr != nil {
return a.a.commitErr
}
a.a.mtx.Lock()
if !a.a.skipRecording {
a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
a.a.pendingSamples = a.a.pendingSamples[:0]
}
a.err = errClosedAppender
a.a.mtx.Unlock()
if a.nextTr != nil {
return a.nextTr.Commit()
}
return nil
}
func (a *baseAppender) Rollback() error {
if err := a.checkErr(); err != nil {
return err
}
defer a.a.openAppenders.Dec()
a.a.mtx.Lock()
if !a.a.skipRecording {
a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
a.a.pendingSamples = a.a.pendingSamples[:0]
}
a.err = errClosedAppender
a.a.mtx.Unlock()
if a.nextTr != nil {
return a.nextTr.Rollback()
}
return nil
}
type appender struct {
baseAppender
next storage.Appender
}
func (a *Appendable) Appender(ctx context.Context) storage.Appender {
ret := &appender{baseAppender: baseAppender{a: a}}
if a.openAppenders.Inc() > 1 {
ret.err = errors.New("teststorage.Appendable.Appender() concurrent use is not supported; attempted opening new Appender() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed")
return ret
}
if a.next != nil {
app := a.next.Appender(ctx)
ret.next, ret.nextTr = app, app
}
return ret
}
func (*appender) SetOptions(*storage.AppendOptions) {}
func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if err := a.checkErr(); err != nil {
return 0, err
}
if a.a.appendErrFn != nil {
if err := a.a.appendErrFn(ls); err != nil {
return 0, err
}
}
if !a.a.skipRecording {
a.a.mtx.Lock()
a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
a.a.mtx.Unlock()
}
if a.next != nil {
return a.next.Append(ref, ls, t, v)
}
return computeOrCheckRef(ref, ls)
}
func computeOrCheckRef(ref storage.SeriesRef, ls labels.Labels) (storage.SeriesRef, error) {
h := ls.Hash()
if ref == 0 {
// Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
return storage.SeriesRef(h), nil
}
if storage.SeriesRef(h) != ref {
// Check for buggy ref while we are at it. This only makes sense for cases without .Then*, because further appendable
// might have a different ref computation logic e.g. TSDB uses atomic increments.
return 0, errors.New("teststorage.appender: found input ref not matching labels; potential bug in Appendable usage")
}
return ref, nil
}
func (a *appender) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if err := a.checkErr(); err != nil {
return 0, err
}
if a.a.appendErrFn != nil {
if err := a.a.appendErrFn(ls); err != nil {
return 0, err
}
}
if !a.a.skipRecording {
a.a.mtx.Lock()
a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
a.a.mtx.Unlock()
}
if a.next != nil {
return a.next.AppendHistogram(ref, ls, t, h, fh)
}
return computeOrCheckRef(ref, ls)
}
func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if err := a.checkErr(); err != nil {
return 0, err
}
if a.a.appendExemplarsError != nil {
return 0, a.a.appendExemplarsError
}
if !a.a.skipRecording {
var appended bool
a.a.mtx.Lock()
// NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
// the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
// with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
i := len(a.a.pendingSamples) - 1
for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
if labels.Equal(l, a.a.pendingSamples[i].L) {
a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
appended = true
break
}
}
a.a.mtx.Unlock()
if !appended {
return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
}
}
if a.next != nil {
return a.next.AppendExemplar(ref, l, e)
}
return computeOrCheckRef(ref, l)
}
func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) {
return a.Append(ref, l, st, 0.0) // This will change soon with AppenderV2, but we already report ST as 0 samples.
}
func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil)
}
return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{}) // This will change soon with AppenderV2, but we already report ST as 0 histograms.
}
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
if err := a.checkErr(); err != nil {
return 0, err
}
if !a.a.skipRecording {
var updated bool
a.a.mtx.Lock()
// NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
// the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
// with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
i := len(a.a.pendingSamples) - 1
for ; i >= 0; i-- { // Attach metadata to the last matching sample.
if labels.Equal(l, a.a.pendingSamples[i].L) {
a.a.pendingSamples[i].M = m
updated = true
break
}
}
a.a.mtx.Unlock()
if !updated {
return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
}
}
if a.next != nil {
return a.next.UpdateMetadata(ref, l, m)
}
return computeOrCheckRef(ref, l)
}
type appenderV2 struct {
baseAppender
next storage.AppenderV2
}
func (a *Appendable) AppenderV2(ctx context.Context) storage.AppenderV2 {
ret := &appenderV2{baseAppender: baseAppender{a: a}}
if a.openAppenders.Inc() > 1 {
ret.err = errors.New("teststorage.Appendable.AppenderV2() concurrent use is not supported; attempted opening new AppenderV2() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed")
return ret
}
if a.next != nil {
app := a.next.AppenderV2(ctx)
ret.next, ret.nextTr = app, app
}
return ret
}
func (a *appenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
if err := a.checkErr(); err != nil {
return 0, err
}
if a.a.appendErrFn != nil {
if err := a.a.appendErrFn(ls); err != nil {
return 0, err
}
}
var partialErr error
if !a.a.skipRecording {
var es []exemplar.Exemplar
if len(opts.Exemplars) > 0 {
if a.a.appendExemplarsError != nil {
var exErrs []error
for range opts.Exemplars {
exErrs = append(exErrs, a.a.appendExemplarsError)
}
if len(exErrs) > 0 {
partialErr = &storage.AppendPartialError{ExemplarErrors: exErrs}
}
} else {
// As per AppenderV2 interface, opts.Exemplar slice is unsafe for reuse.
es = make([]exemplar.Exemplar, len(opts.Exemplars))
copy(es, opts.Exemplars)
}
}
a.a.mtx.Lock()
a.a.pendingSamples = append(a.a.pendingSamples, Sample{
MF: opts.MetricFamilyName,
M: opts.Metadata,
L: ls,
ST: st, T: t,
V: v, H: h, FH: fh,
ES: es,
})
a.a.mtx.Unlock()
}
if a.next != nil {
ref, err = a.next.Append(ref, ls, st, t, v, h, fh, opts)
if err != nil {
return 0, err
}
} else {
ref, err = computeOrCheckRef(ref, ls)
if err != nil {
return ref, err
}
}
return ref, partialErr
}
|
go
|
github
|
https://github.com/prometheus/prometheus
|
util/teststorage/appender.go
|
#!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(
json1, json2, args.display_aggregates_only,
args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
This module is a thin wrapper around librbd.
It currently provides all the synchronous methods of librbd that do
not use callbacks.
Error codes from librbd are turned into exceptions that subclass
:class:`Error`. Almost all methods may raise :class:`Error`
(the base class of all rbd exceptions), :class:`PermissionError`
and :class:`IOError`, in addition to those documented for the
method.
A number of methods have string arguments, which must not be unicode
to interact correctly with librbd. If unicode is passed to these
methods, a :class:`TypeError` will be raised.
"""
# Copyright 2011 Josh Durgin
from ctypes import CDLL, c_char, c_char_p, c_size_t, c_void_p, c_int, \
create_string_buffer, byref, Structure, c_uint64, c_int64, c_uint8, \
CFUNCTYPE
from ctypes.util import find_library
import ctypes
import errno
ANONYMOUS_AUID = 0xffffffffffffffff
ADMIN_AUID = 0
RBD_FEATURE_LAYERING = 1
RBD_FEATURE_STRIPINGV2 = 2
RBD_FEATURE_EXCLUSIVE_LOCK = 4
RBD_FLAG_OBJECT_MAP_INVALID = 1
class Error(Exception):
pass
class PermissionError(Error):
pass
class ImageNotFound(Error):
pass
class ImageExists(Error):
pass
class IOError(Error):
pass
class NoSpace(Error):
pass
class IncompleteWriteError(Error):
pass
class InvalidArgument(Error):
pass
class LogicError(Error):
pass
class ReadOnlyImage(Error):
pass
class ImageBusy(Error):
pass
class ImageHasSnapshots(Error):
pass
class FunctionNotSupported(Error):
pass
class ArgumentOutOfRange(Error):
pass
class ConnectionShutdown(Error):
pass
class Timeout(Error):
pass
def make_ex(ret, msg):
"""
Translate a librbd return code into an exception.
:param ret: the return code
:type ret: int
:param msg: the error message to use
:type msg: str
:returns: a subclass of :class:`Error`
"""
errors = {
errno.EPERM : PermissionError,
errno.ENOENT : ImageNotFound,
errno.EIO : IOError,
errno.ENOSPC : NoSpace,
errno.EEXIST : ImageExists,
errno.EINVAL : InvalidArgument,
errno.EROFS : ReadOnlyImage,
errno.EBUSY : ImageBusy,
errno.ENOTEMPTY : ImageHasSnapshots,
errno.ENOSYS : FunctionNotSupported,
errno.EDOM : ArgumentOutOfRange,
errno.ESHUTDOWN : ConnectionShutdown,
errno.ETIMEDOUT : Timeout,
}
ret = abs(ret)
if ret in errors:
return errors[ret](msg)
else:
return Error(msg + (": error code %d" % ret))
class rbd_image_info_t(Structure):
_fields_ = [("size", c_uint64),
("obj_size", c_uint64),
("num_objs", c_uint64),
("order", c_int),
("block_name_prefix", c_char * 24),
("parent_pool", c_int64),
("parent_name", c_char * 96)]
class rbd_snap_info_t(Structure):
_fields_ = [("id", c_uint64),
("size", c_uint64),
("name", c_char_p)]
def load_librbd():
"""
Load the librbd shared library.
"""
librbd_path = find_library('rbd')
if librbd_path:
return CDLL(librbd_path)
# try harder, find_library() doesn't search LD_LIBRARY_PATH
# in addition, it doesn't seem work on centos 6.4 (see e46d2ca067b5)
try:
return CDLL('librbd.so.1')
except OSError as e:
raise EnvironmentError("Unable to load librbd: %s" % e)
class RBD(object):
"""
This class wraps librbd CRUD functions.
"""
def __init__(self):
self.librbd = load_librbd()
def version(self):
"""
Get the version number of the ``librbd`` C library.
:returns: a tuple of ``(major, minor, extra)`` components of the
librbd version
"""
major = c_int(0)
minor = c_int(0)
extra = c_int(0)
self.librbd.rbd_version(byref(major), byref(minor), byref(extra))
return (major.value, minor.value, extra.value)
def create(self, ioctx, name, size, order=None, old_format=True,
features=0, stripe_unit=0, stripe_count=0):
"""
Create an rbd image.
:param ioctx: the context in which to create the image
:type ioctx: :class:`rados.Ioctx`
:param name: what the image is called
:type name: str
:param size: how big the image is in bytes
:type size: int
:param order: the image is split into (2**order) byte objects
:type order: int
:param old_format: whether to create an old-style image that
is accessible by old clients, but can't
use more advanced features like layering.
:type old_format: bool
:param features: bitmask of features to enable
:type features: int
:param stripe_unit: stripe unit in bytes (default 0 for object size)
:type stripe_unit: int
:param stripe_count: objects to stripe over before looping
:type stripe_count: int
:raises: :class:`ImageExists`
:raises: :class:`TypeError`
:raises: :class:`InvalidArgument`
:raises: :class:`FunctionNotSupported`
"""
if order is None:
order = 0
if not isinstance(name, str):
raise TypeError('name must be a string')
if old_format:
if features != 0 or stripe_unit != 0 or stripe_count != 0:
raise InvalidArgument('format 1 images do not support feature'
' masks or non-default striping')
ret = self.librbd.rbd_create(ioctx.io, c_char_p(name),
c_uint64(size),
byref(c_int(order)))
else:
if not hasattr(self.librbd, 'rbd_create2'):
raise FunctionNotSupported('installed version of librbd does'
' not support format 2 images')
has_create3 = hasattr(self.librbd, 'rbd_create3')
if (stripe_unit != 0 or stripe_count != 0) and not has_create3:
raise FunctionNotSupported('installed version of librbd does'
' not support stripe unit or count')
if has_create3:
ret = self.librbd.rbd_create3(ioctx.io, c_char_p(name),
c_uint64(size),
c_uint64(features),
byref(c_int(order)),
c_uint64(stripe_unit),
c_uint64(stripe_count))
else:
ret = self.librbd.rbd_create2(ioctx.io, c_char_p(name),
c_uint64(size),
c_uint64(features),
byref(c_int(order)))
if ret < 0:
raise make_ex(ret, 'error creating image')
def clone(self, p_ioctx, p_name, p_snapname, c_ioctx, c_name,
features=0, order=None):
"""
Clone a parent rbd snapshot into a COW sparse child.
:param p_ioctx: the parent context that represents the parent snap
:type ioctx: :class:`rados.Ioctx`
:param p_name: the parent image name
:type name: str
:param p_snapname: the parent image snapshot name
:type name: str
:param c_ioctx: the child context that represents the new clone
:type ioctx: :class:`rados.Ioctx`
:param c_name: the clone (child) name
:type name: str
:param features: bitmask of features to enable; if set, must include layering
:type features: int
:param order: the image is split into (2**order) byte objects
:type order: int
:raises: :class:`TypeError`
:raises: :class:`InvalidArgument`
:raises: :class:`ImageExists`
:raises: :class:`FunctionNotSupported`
:raises: :class:`ArgumentOutOfRange`
"""
if order is None:
order = 0
if not isinstance(p_snapname, str) or not isinstance(p_name, str):
raise TypeError('parent name and snapname must be strings')
if not isinstance(c_name, str):
raise TypeError('child name must be a string')
ret = self.librbd.rbd_clone(p_ioctx.io, c_char_p(p_name),
c_char_p(p_snapname),
c_ioctx.io, c_char_p(c_name),
c_uint64(features),
byref(c_int(order)))
if ret < 0:
raise make_ex(ret, 'error creating clone')
def list(self, ioctx):
"""
List image names.
:param ioctx: determines which RADOS pool is read
:type ioctx: :class:`rados.Ioctx`
:returns: list -- a list of image names
"""
size = c_size_t(512)
while True:
c_names = create_string_buffer(size.value)
ret = self.librbd.rbd_list(ioctx.io, byref(c_names), byref(size))
if ret >= 0:
break
elif ret != -errno.ERANGE:
raise make_ex(ret, 'error listing images')
return filter(lambda name: name != '', c_names.raw.split('\0'))
def remove(self, ioctx, name):
"""
Delete an RBD image. This may take a long time, since it does
not return until every object that comprises the image has
been deleted. Note that all snapshots must be deleted before
the image can be removed. If there are snapshots left,
:class:`ImageHasSnapshots` is raised. If the image is still
open, or the watch from a crashed client has not expired,
:class:`ImageBusy` is raised.
:param ioctx: determines which RADOS pool the image is in
:type ioctx: :class:`rados.Ioctx`
:param name: the name of the image to remove
:type name: str
:raises: :class:`ImageNotFound`, :class:`ImageBusy`,
:class:`ImageHasSnapshots`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_remove(ioctx.io, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error removing image')
def rename(self, ioctx, src, dest):
"""
Rename an RBD image.
:param ioctx: determines which RADOS pool the image is in
:type ioctx: :class:`rados.Ioctx`
:param src: the current name of the image
:type src: str
:param dest: the new name of the image
:type dest: str
:raises: :class:`ImageNotFound`, :class:`ImageExists`
"""
if not isinstance(src, str) or not isinstance(dest, str):
raise TypeError('src and dest must be strings')
ret = self.librbd.rbd_rename(ioctx.io, c_char_p(src), c_char_p(dest))
if ret != 0:
raise make_ex(ret, 'error renaming image')
class Image(object):
"""
This class represents an RBD image. It is used to perform I/O on
the image and interact with snapshots.
**Note**: Any method of this class may raise :class:`ImageNotFound`
if the image has been deleted.
"""
def __init__(self, ioctx, name, snapshot=None, read_only=False):
"""
Open the image at the given snapshot.
If a snapshot is specified, the image will be read-only, unless
:func:`Image.set_snap` is called later.
If read-only mode is used, metadata for the :class:`Image`
object (such as which snapshots exist) may become obsolete. See
the C api for more details.
To clean up from opening the image, :func:`Image.close` should
be called. For ease of use, this is done automatically when
an :class:`Image` is used as a context manager (see :pep:`343`).
:param ioctx: determines which RADOS pool the image is in
:type ioctx: :class:`rados.Ioctx`
:param name: the name of the image
:type name: str
:param snapshot: which snapshot to read from
:type snaphshot: str
:param read_only: whether to open the image in read-only mode
:type read_only: bool
"""
self.closed = True
self.librbd = load_librbd()
self.image = c_void_p()
self.name = name
if not isinstance(name, str):
raise TypeError('name must be a string')
if snapshot is not None and not isinstance(snapshot, str):
raise TypeError('snapshot must be a string or None')
if read_only:
if not hasattr(self.librbd, 'rbd_open_read_only'):
raise FunctionNotSupported('installed version of librbd does '
'not support open in read-only mode')
ret = self.librbd.rbd_open_read_only(ioctx.io, c_char_p(name),
byref(self.image),
c_char_p(snapshot))
else:
ret = self.librbd.rbd_open(ioctx.io, c_char_p(name),
byref(self.image), c_char_p(snapshot))
if ret != 0:
raise make_ex(ret, 'error opening image %s at snapshot %s' % (name, snapshot))
self.closed = False
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
"""
Closes the image. See :func:`close`
"""
self.close()
return False
def close(self):
"""
Release the resources used by this image object.
After this is called, this object should not be used.
"""
if not self.closed:
self.closed = True
self.librbd.rbd_close(self.image)
def __del__(self):
self.close()
def __str__(self):
s = "rbd.Image(" + dict.__repr__(self.__dict__) + ")"
return s
def resize(self, size):
"""
Change the size of the image.
:param size: the new size of the image
:type size: int
"""
ret = self.librbd.rbd_resize(self.image, c_uint64(size))
if ret < 0:
raise make_ex(ret, 'error resizing image %s' % (self.name,))
def stat(self):
"""
Get information about the image. Currently parent pool and
parent name are always -1 and ''.
:returns: dict - contains the following keys:
* ``size`` (int) - the size of the image in bytes
* ``obj_size`` (int) - the size of each object that comprises the
image
* ``num_objs`` (int) - the number of objects in the image
* ``order`` (int) - log_2(object_size)
* ``block_name_prefix`` (str) - the prefix of the RADOS objects used
to store the image
* ``parent_pool`` (int) - deprecated
* ``parent_name`` (str) - deprecated
See also :meth:`format` and :meth:`features`.
"""
info = rbd_image_info_t()
ret = self.librbd.rbd_stat(self.image, byref(info), ctypes.sizeof(info))
if ret != 0:
raise make_ex(ret, 'error getting info for image %s' % (self.name,))
return {
'size' : info.size,
'obj_size' : info.obj_size,
'num_objs' : info.num_objs,
'order' : info.order,
'block_name_prefix' : info.block_name_prefix,
'parent_pool' : info.parent_pool,
'parent_name' : info.parent_name
}
def parent_info(self):
"""
Get information about a cloned image's parent (if any)
:returns: tuple - ``(pool name, image name, snapshot name)`` components
of the parent image
:raises: :class:`ImageNotFound` if the image doesn't have a parent
"""
ret = -errno.ERANGE
size = 8
while ret == -errno.ERANGE and size <= 4096:
pool = create_string_buffer(size)
name = create_string_buffer(size)
snapname = create_string_buffer(size)
ret = self.librbd.rbd_get_parent_info(self.image, byref(pool),
c_size_t(size),
byref(name),
c_size_t(size),
byref(snapname),
c_size_t(size))
if ret == -errno.ERANGE:
size *= 2
if (ret != 0):
raise make_ex(ret, 'error getting parent info for image %s' % (self.name,))
return (pool.value, name.value, snapname.value)
def old_format(self):
"""
Find out whether the image uses the old RBD format.
:returns: bool - whether the image uses the old RBD format
"""
old = c_uint8()
ret = self.librbd.rbd_get_old_format(self.image, byref(old))
if (ret != 0):
raise make_ex(ret, 'error getting old_format for image' % (self.name))
return old.value != 0
def size(self):
"""
Get the size of the image. If open to a snapshot, returns the
size of that snapshot.
:returns: the size of the image in bytes
"""
image_size = c_uint64()
ret = self.librbd.rbd_get_size(self.image, byref(image_size))
if (ret != 0):
raise make_ex(ret, 'error getting size for image' % (self.name))
return image_size.value
def features(self):
"""
Gets the features bitmask of the image.
:returns: int - the features bitmask of the image
"""
features = c_uint64()
ret = self.librbd.rbd_get_features(self.image, byref(features))
if (ret != 0):
raise make_ex(ret, 'error getting features for image' % (self.name))
return features.value
def overlap(self):
"""
Gets the number of overlapping bytes between the image and its parent
image. If open to a snapshot, returns the overlap between the snapshot
and the parent image.
:returns: int - the overlap in bytes
:raises: :class:`ImageNotFound` if the image doesn't have a parent
"""
overlap = c_uint64()
ret = self.librbd.rbd_get_overlap(self.image, byref(overlap))
if (ret != 0):
raise make_ex(ret, 'error getting overlap for image' % (self.name))
return overlap.value
def flags(self):
"""
Gets the flags bitmask of the image.
:returns: int - the flags bitmask of the image
"""
flags = c_uint64()
ret = self.librbd.rbd_get_flags(self.image, byref(flags))
if (ret != 0):
raise make_ex(ret, 'error getting flags for image' % (self.name))
return flags.value
def is_exclusive_lock_owner(self):
"""
Gets the status of the image exclusive lock.
:returns: bool - true if the image is exclusively locked
"""
owner = c_int()
ret = self.librbd.rbd_is_exclusive_lock_owner(self.image, byref(owner))
if (ret != 0):
raise make_ex(ret, 'error getting lock status for image' % (self.name))
return owner.value == 1
def copy(self, dest_ioctx, dest_name):
"""
Copy the image to another location.
:param dest_ioctx: determines which pool to copy into
:type dest_ioctx: :class:`rados.Ioctx`
:param dest_name: the name of the copy
:type dest_name: str
:raises: :class:`ImageExists`
"""
if not isinstance(dest_name, str):
raise TypeError('dest_name must be a string')
ret = self.librbd.rbd_copy(self.image, dest_ioctx.io, c_char_p(dest_name))
if ret < 0:
raise make_ex(ret, 'error copying image %s to %s' % (self.name, dest_name))
def list_snaps(self):
"""
Iterate over the snapshots of an image.
:returns: :class:`SnapIterator`
"""
return SnapIterator(self)
def create_snap(self, name):
"""
Create a snapshot of the image.
:param name: the name of the snapshot
:type name: str
:raises: :class:`ImageExists`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_create(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error creating snapshot %s from %s' % (name, self.name))
def remove_snap(self, name):
"""
Delete a snapshot of the image.
:param name: the name of the snapshot
:type name: str
:raises: :class:`IOError`, :class:`ImageBusy`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_remove(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error removing snapshot %s from %s' % (name, self.name))
def rollback_to_snap(self, name):
"""
Revert the image to its contents at a snapshot. This is a
potentially expensive operation, since it rolls back each
object individually.
:param name: the snapshot to rollback to
:type name: str
:raises: :class:`IOError`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_rollback(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error rolling back image %s to snapshot %s' % (self.name, name))
def protect_snap(self, name):
"""
Mark a snapshot as protected. This means it can't be deleted
until it is unprotected.
:param name: the snapshot to protect
:type name: str
:raises: :class:`IOError`, :class:`ImageNotFound`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_protect(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error protecting snapshot %s@%s' % (self.name, name))
def unprotect_snap(self, name):
"""
Mark a snapshot unprotected. This allows it to be deleted if
it was protected.
:param name: the snapshot to unprotect
:type name: str
:raises: :class:`IOError`, :class:`ImageNotFound`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_unprotect(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error unprotecting snapshot %s@%s' % (self.name, name))
def is_protected_snap(self, name):
"""
Find out whether a snapshot is protected from deletion.
:param name: the snapshot to check
:type name: str
:returns: bool - whether the snapshot is protected
:raises: :class:`IOError`, :class:`ImageNotFound`
"""
if not isinstance(name, str):
raise TypeError('name must be a string')
is_protected = c_int()
ret = self.librbd.rbd_snap_is_protected(self.image, c_char_p(name),
byref(is_protected))
if ret != 0:
raise make_ex(ret, 'error checking if snapshot %s@%s is protected' % (self.name, name))
return is_protected.value == 1
def set_snap(self, name):
"""
Set the snapshot to read from. Writes will raise ReadOnlyImage
while a snapshot is set. Pass None to unset the snapshot
(reads come from the current image) , and allow writing again.
:param name: the snapshot to read from, or None to unset the snapshot
:type name: str or None
"""
if name is not None and not isinstance(name, str):
raise TypeError('name must be a string')
ret = self.librbd.rbd_snap_set(self.image, c_char_p(name))
if ret != 0:
raise make_ex(ret, 'error setting image %s to snapshot %s' % (self.name, name))
def read(self, offset, length, fadvise_flags=0):
"""
Read data from the image. Raises :class:`InvalidArgument` if
part of the range specified is outside the image.
:param offset: the offset to start reading at
:type offset: int
:param length: how many bytes to read
:type length: int
:param fadvise_flags: fadvise flags for this read
:type fadvise_flags: int
:returns: str - the data read
:raises: :class:`InvalidArgument`, :class:`IOError`
"""
ret_buf = create_string_buffer(length)
if fadvise_flags == 0:
ret = self.librbd.rbd_read(self.image, c_uint64(offset),
c_size_t(length), byref(ret_buf))
else:
ret = self.librbd.rbd_read2(self.image, c_uint64(offset),
c_size_t(length), byref(ret_buf), c_int(fadvise_flags))
if ret < 0:
raise make_ex(ret, 'error reading %s %ld~%ld' % (self.image, offset, length))
return ctypes.string_at(ret_buf, ret)
def diff_iterate(self, offset, length, from_snapshot, iterate_cb):
"""
Iterate over the changed extents of an image.
This will call iterate_cb with three arguments:
(offset, length, exists)
where the changed extent starts at offset bytes, continues for
length bytes, and is full of data (if exists is True) or zeroes
(if exists is False).
If from_snapshot is None, it is interpreted as the beginning
of time and this generates all allocated extents.
The end version is whatever is currently selected (via set_snap)
for the image.
Raises :class:`InvalidArgument` if from_snapshot is after
the currently set snapshot.
Raises :class:`ImageNotFound` if from_snapshot is not the name
of a snapshot of the image.
:param offset: start offset in bytes
:type offset: int
:param length: size of region to report on, in bytes
:type length: int
:param from_snapshot: starting snapshot name, or None
:type from_snapshot: str or None
:param iterate_cb: function to call for each extent
:type iterate_cb: function acception arguments for offset,
length, and exists
:raises: :class:`InvalidArgument`, :class:`IOError`,
:class:`ImageNotFound`
"""
if from_snapshot is not None and not isinstance(from_snapshot, str):
raise TypeError('client must be a string')
RBD_DIFF_CB = CFUNCTYPE(c_int, c_uint64, c_size_t, c_int, c_void_p)
cb_holder = DiffIterateCB(iterate_cb)
cb = RBD_DIFF_CB(cb_holder.callback)
ret = self.librbd.rbd_diff_iterate(self.image,
c_char_p(from_snapshot),
c_uint64(offset),
c_uint64(length),
cb,
c_void_p(None))
if ret < 0:
msg = 'error generating diff from snapshot %s' % from_snapshot
raise make_ex(ret, msg)
def write(self, data, offset, fadvise_flags=0):
"""
Write data to the image. Raises :class:`InvalidArgument` if
part of the write would fall outside the image.
:param data: the data to be written
:type data: str
:param offset: where to start writing data
:type offset: int
:param fadvise_flags: fadvise flags for this write
:type fadvise_flags: int
:returns: int - the number of bytes written
:raises: :class:`IncompleteWriteError`, :class:`LogicError`,
:class:`InvalidArgument`, :class:`IOError`
"""
if not isinstance(data, str):
raise TypeError('data must be a string')
length = len(data)
if fadvise_flags == 0:
ret = self.librbd.rbd_write(self.image, c_uint64(offset),
c_size_t(length), c_char_p(data))
else:
ret = self.librbd.rbd_write2(self.image, c_uint64(offset),
c_size_t(length), c_char_p(data), c_int(fadvise_flags))
if ret == length:
return ret
elif ret < 0:
raise make_ex(ret, "error writing to %s" % (self.name,))
elif ret < length:
raise IncompleteWriteError("Wrote only %ld out of %ld bytes" % (ret, length))
else:
raise LogicError("logic error: rbd_write(%s) \
returned %d, but %d was the maximum number of bytes it could have \
written." % (self.name, ret, length))
def discard(self, offset, length):
"""
Trim the range from the image. It will be logically filled
with zeroes.
"""
ret = self.librbd.rbd_discard(self.image,
c_uint64(offset),
c_uint64(length))
if ret < 0:
msg = 'error discarding region %d~%d' % (offset, length)
raise make_ex(ret, msg)
def flush(self):
"""
Block until all writes are fully flushed if caching is enabled.
"""
ret = self.librbd.rbd_flush(self.image)
if ret < 0:
raise make_ex(ret, 'error flushing image')
def invalidate_cache(self):
"""
Drop any cached data for the image.
"""
ret = self.librbd.rbd_invalidate_cache(self.image)
if ret < 0:
raise make_ex(ret, 'error invalidating cache')
def stripe_unit(self):
"""
Returns the stripe unit used for the image.
"""
stripe_unit = c_uint64()
ret = self.librbd.rbd_get_stripe_unit(self.image, byref(stripe_unit))
if ret != 0:
raise make_ex(ret, 'error getting stripe unit for image' % (self.name))
return stripe_unit.value
def stripe_count(self):
"""
Returns the stripe count used for the image.
"""
stripe_count = c_uint64()
ret = self.librbd.rbd_get_stripe_count(self.image, byref(stripe_count))
if ret != 0:
raise make_ex(ret, 'error getting stripe count for image' % (self.name))
return stripe_count.value
def flatten(self):
"""
Flatten clone image (copy all blocks from parent to child)
"""
ret = self.librbd.rbd_flatten(self.image)
if (ret < 0):
raise make_ex(ret, "error flattening %s" % self.name)
def list_children(self):
"""
List children of the currently set snapshot (set via set_snap()).
:returns: list - a list of (pool name, image name) tuples
"""
pools_size = c_size_t(512)
images_size = c_size_t(512)
while True:
c_pools = create_string_buffer(pools_size.value)
c_images = create_string_buffer(images_size.value)
ret = self.librbd.rbd_list_children(self.image,
byref(c_pools),
byref(pools_size),
byref(c_images),
byref(images_size))
if ret >= 0:
break
elif ret != -errno.ERANGE:
raise make_ex(ret, 'error listing images')
if ret == 0:
return []
pools = c_pools.raw[:pools_size.value - 1].split('\0')
images = c_images.raw[:images_size.value - 1].split('\0')
return zip(pools, images)
def list_lockers(self):
"""
List clients that have locked the image and information
about the lock.
:returns: dict - contains the following keys:
* ``tag`` - the tag associated with the lock (every
additional locker must use the same tag)
* ``exclusive`` - boolean indicating whether the
lock is exclusive or shared
* ``lockers`` - a list of (client, cookie, address)
tuples
"""
clients_size = c_size_t(512)
cookies_size = c_size_t(512)
addrs_size = c_size_t(512)
tag_size = c_size_t(512)
exclusive = c_int(0)
while True:
c_clients = create_string_buffer(clients_size.value)
c_cookies = create_string_buffer(cookies_size.value)
c_addrs = create_string_buffer(addrs_size.value)
c_tag = create_string_buffer(tag_size.value)
ret = self.librbd.rbd_list_lockers(self.image,
byref(exclusive),
byref(c_tag),
byref(tag_size),
byref(c_clients),
byref(clients_size),
byref(c_cookies),
byref(cookies_size),
byref(c_addrs),
byref(addrs_size))
if ret >= 0:
break
elif ret != -errno.ERANGE:
raise make_ex(ret, 'error listing images')
if ret == 0:
return []
clients = c_clients.raw[:clients_size.value - 1].split('\0')
cookies = c_cookies.raw[:cookies_size.value - 1].split('\0')
addrs = c_addrs.raw[:addrs_size.value - 1].split('\0')
return {
'tag' : c_tag.value,
'exclusive' : exclusive.value == 1,
'lockers' : zip(clients, cookies, addrs),
}
def lock_exclusive(self, cookie):
"""
Take an exclusive lock on the image.
:raises: :class:`ImageBusy` if a different client or cookie locked it
:class:`ImageExists` if the same client and cookie locked it
"""
if not isinstance(cookie, str):
raise TypeError('cookie must be a string')
ret = self.librbd.rbd_lock_exclusive(self.image, c_char_p(cookie))
if ret < 0:
raise make_ex(ret, 'error acquiring exclusive lock on image')
def lock_shared(self, cookie, tag):
"""
Take a shared lock on the image. The tag must match
that of the existing lockers, if any.
:raises: :class:`ImageBusy` if a different client or cookie locked it
:class:`ImageExists` if the same client and cookie locked it
"""
if not isinstance(cookie, str):
raise TypeError('cookie must be a string')
if not isinstance(tag, str):
raise TypeError('tag must be a string')
ret = self.librbd.rbd_lock_shared(self.image, c_char_p(cookie),
c_char_p(tag))
if ret < 0:
raise make_ex(ret, 'error acquiring shared lock on image')
def unlock(self, cookie):
"""
Release a lock on the image that was locked by this rados client.
"""
if not isinstance(cookie, str):
raise TypeError('cookie must be a string')
ret = self.librbd.rbd_unlock(self.image, c_char_p(cookie))
if ret < 0:
raise make_ex(ret, 'error unlocking image')
def break_lock(self, client, cookie):
"""
Release a lock held by another rados client.
"""
if not isinstance(client, str):
raise TypeError('client must be a string')
if not isinstance(cookie, str):
raise TypeError('cookie must be a string')
ret = self.librbd.rbd_break_lock(self.image, c_char_p(client),
c_char_p(cookie))
if ret < 0:
raise make_ex(ret, 'error unlocking image')
class DiffIterateCB(object):
def __init__(self, cb):
self.cb = cb
def callback(self, offset, length, exists, unused):
self.cb(offset, length, exists == 1)
return 0
class SnapIterator(object):
"""
Iterator over snapshot info for an image.
Yields a dictionary containing information about a snapshot.
Keys are:
* ``id`` (int) - numeric identifier of the snapshot
* ``size`` (int) - size of the image at the time of snapshot (in bytes)
* ``name`` (str) - name of the snapshot
"""
def __init__(self, image):
self.librbd = image.librbd
num_snaps = c_int(10)
while True:
self.snaps = (rbd_snap_info_t * num_snaps.value)()
ret = self.librbd.rbd_snap_list(image.image, byref(self.snaps),
byref(num_snaps))
if ret >= 0:
self.num_snaps = ret
break
elif ret != -errno.ERANGE:
raise make_ex(ret, 'error listing snapshots for image %s' % (image.name,))
def __iter__(self):
for i in xrange(self.num_snaps):
yield {
'id' : self.snaps[i].id,
'size' : self.snaps[i].size,
'name' : self.snaps[i].name,
}
def __del__(self):
self.librbd.rbd_snap_list_end(self.snaps)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
User facing error messages during course import and export
"""
from django.utils.translation import ugettext as _
COURSE_ALREADY_EXIST = _('Aborting import because a course with this id: {} already exists.')
COURSE_PERMISSION_DENIED = _('Permission denied. You do not have write access to this course.')
FILE_MISSING = _('Could not find the {0} file in the package.')
FILE_NOT_FOUND = _('Uploaded Tar file not found. Try again.')
INVALID_FILE_TYPE = _('We only support uploading a .tar.gz file.')
LIBRARY_ALREADY_EXIST = _('Aborting import since a library with this id already exists.')
OLX_VALIDATION_FAILED = _('Course olx validation failed. Please check your email.')
PERMISSION_DENIED = _('Permission denied')
UNKNOWN_ERROR_IN_IMPORT = _('Unknown error while importing course.')
UNKNOWN_ERROR_IN_UNPACKING = _('An Unknown error occurred during the unpacking step.')
UNKNOWN_USER_ID = _('Unknown User ID: {0}')
UNSAFE_TAR_FILE = _('Unsafe tar file. Aborting import.')
USER_PERMISSION_DENIED = _('User permission denied.')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: modprobe
short_description: Load or unload kernel modules
version_added: 1.4
author:
- David Stygstra (@stygstra)
- Julien Dauphant
- Matt Jeffery
description:
- Load or unload kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
description:
- Whether the module should be present or absent.
choices: [ absent, present ]
default: present
params:
description:
- Modules parameters.
default: ''
version_added: "1.6"
'''
EXAMPLES = '''
- name: Add the 802.1q module
modprobe:
name: 8021q
state: present
- name: Add the dummy module
modprobe:
name: dummy
state: present
params: 'numdummies=2'
'''
import shlex
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
params=dict(type='str', default=''),
),
supports_check_mode=True,
)
name = module.params['name']
params = module.params['params']
state = module.params['state']
# FIXME: Adding all parameters as result values is useless
result = dict(
changed=False,
name=name,
params=params,
state=state,
)
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = name.replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
# Add/remove module as needed
if state == 'present':
if not present:
if not module.check_mode:
command = [module.get_bin_path('modprobe', True), name]
command.extend(shlex.split(params))
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
elif state == 'absent':
if present:
if not module.check_mode:
rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_dag_tags
short_description: Create tags for DAG's on PAN-OS devices.
description:
- Create the ip address to tag associations. Tags will in turn be used to create DAG's
author: "Vinay Venkataraghavan (@vinayvenkat)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
notes:
- Checkmode is not supported.
- Panorama is not supported.
options:
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
description:
description:
- The purpose / objective of the static Address Group
commit:
description:
- commit if changed
default: true
type: bool
devicegroup:
description: >
- Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
tag_names:
description:
- The list of the tags that will be added or removed from the IP address.
ip_to_register:
description:
- IP that will be registered with the given tag names.
extends_documentation_fragment: panos
'''
EXAMPLES = '''
- name: Create the tags to map IP addresses
panos_dag_tags:
ip_address: "{{ ip_address }}"
password: "{{ password }}"
ip_to_register: "{{ ip_to_register }}"
tag_names: "{{ tag_names }}"
description: "Tags to allow certain IP's to access various SaaS Applications"
operation: 'add'
tags: "adddagip"
- name: List the IP address to tag mapping
panos_dag_tags:
ip_address: "{{ ip_address }}"
password: "{{ password }}"
tag_names: "{{ tag_names }}"
description: "List the IP address to tag mapping"
operation: 'list'
tags: "listdagip"
- name: Unregister an IP address from a tag mapping
panos_dag_tags:
ip_address: "{{ ip_address }}"
password: "{{ password }}"
ip_to_register: "{{ ip_to_register }}"
tag_names: "{{ tag_names }}"
description: "Unregister IP address from tag mappings"
operation: 'delete'
tags: "deletedagip"
'''
RETURN = '''
# Default return values
'''
try:
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def register_ip_to_tag_map(device, ip_addresses, tag):
exc = None
try:
device.userid.register(ip_addresses, tag)
except PanXapiError as exc:
return False, exc
return True, exc
def get_all_address_group_mapping(device):
exc = None
ret = None
try:
ret = device.userid.get_registered_ip()
except PanXapiError as exc:
return False, exc
return ret, exc
def delete_address_from_mapping(device, ip_address, tags):
exc = None
try:
ret = device.userid.unregister(ip_address, tags)
except PanXapiError as exc:
return False, exc
return True, exc
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
devicegroup=dict(default=None),
description=dict(default=None),
ip_to_register=dict(type='str', required=False),
tag_names=dict(type='list', required=True),
commit=dict(type='bool', default=True),
operation=dict(type='str', required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
commit = module.params['commit']
devicegroup = module.params['devicegroup']
operation = module.params['operation']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
result = None
if operation == 'add':
result, exc = register_ip_to_tag_map(device,
ip_addresses=module.params.get('ip_to_register', None),
tag=module.params.get('tag_names', None)
)
elif operation == 'list':
result, exc = get_all_address_group_mapping(device)
elif operation == 'delete':
result, exc = delete_address_from_mapping(device,
ip_address=module.params.get('ip_to_register', None),
tags=module.params.get('tag_names', [])
)
else:
module.fail_json(msg="Unsupported option")
if not result:
module.fail_json(msg=exc.message)
if commit:
try:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg=result)
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/conformance/classes/propertyMemberDeclarations/autoAccessor9.ts] ////
//// [autoAccessor9.ts]
// Auto-accessors do not use Set semantics themselves, so do not need to be transformed if there are no other
// initializers that need to be transformed:
class C1 {
accessor x = 1;
}
// If there are other field initializers to transform, we must transform auto-accessors so that we can preserve
// initialization order:
class C2 {
x = 1;
accessor y = 2;
z = 3;
}
// Private field initializers also do not use Set semantics, so they do not force an auto-accessor transformation:
class C3 {
#x = 1;
accessor y = 2;
}
// However, we still need to hoist private field initializers to the constructor if we need to preserve initialization
// order:
class C4 {
x = 1;
#y = 2;
z = 3;
}
class C5 {
#x = 1;
accessor y = 2;
z = 3;
}
// Static accessors aren't affected:
class C6 {
static accessor x = 1;
}
// Static accessors aren't affected:
class C7 {
static x = 1;
static accessor y = 2;
static z = 3;
}
//// [autoAccessor9.js]
"use strict";
// Auto-accessors do not use Set semantics themselves, so do not need to be transformed if there are no other
// initializers that need to be transformed:
class C1 {
accessor x = 1;
}
// If there are other field initializers to transform, we must transform auto-accessors so that we can preserve
// initialization order:
class C2 {
constructor() {
this.x = 1;
this.#y_accessor_storage = 2;
this.z = 3;
}
#y_accessor_storage;
get y() { return this.#y_accessor_storage; }
set y(value) { this.#y_accessor_storage = value; }
}
// Private field initializers also do not use Set semantics, so they do not force an auto-accessor transformation:
class C3 {
#x = 1;
accessor y = 2;
}
// However, we still need to hoist private field initializers to the constructor if we need to preserve initialization
// order:
class C4 {
constructor() {
this.x = 1;
this.#y = 2;
this.z = 3;
}
#y;
}
class C5 {
constructor() {
this.#x = 1;
this.#y_accessor_storage = 2;
this.z = 3;
}
#x;
#y_accessor_storage;
get y() { return this.#y_accessor_storage; }
set y(value) { this.#y_accessor_storage = value; }
}
// Static accessors aren't affected:
class C6 {
static accessor x = 1;
}
// Static accessors aren't affected:
class C7 {
static { this.x = 1; }
static accessor y = 2;
static { this.z = 3; }
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/autoAccessor9.js
|
import falcon
from graceful.serializers import BaseSerializer
from graceful.fields import IntField, StringField
from graceful.parameters import StringParam
from graceful.errors import ValidationError
from graceful.resources.generic import (
RetrieveUpdateDeleteAPI,
PaginatedListCreateAPI,
)
from jinja2 import Environment, FileSystemLoader
# environment allows us to load template files, 'templates' is a dir
# where we want to store them
env = Environment(loader=FileSystemLoader('templates'))
# lets pretend that this is our backend storage
CATS_STORAGE = [
{"id": 0, "name": "kitty", "breed": "saimese"},
{"id": 1, "name": "lucie", "breed": "maine coon"},
{"id": 2, "name": "molly", "breed": "sphynx"},
]
def new_id():
return max(map(lambda cat: cat['id'], CATS_STORAGE)) + 1
def is_lower_case(value):
if value.lower() != value:
raise ValidationError("{} is not lowercase".format(value))
# this is how we represent cats in our API
class CatSerializer(BaseSerializer):
id = IntField("cat identification number", read_only=True)
name = StringField("cat name", validators=[is_lower_case])
breed = StringField("official breed name")
class V1:
class Cat(RetrieveUpdateDeleteAPI):
"""
Single cat identified by its id
"""
serializer = CatSerializer()
def get_cat(self, cat_id):
try:
return [
cat for cat in CATS_STORAGE if cat['id'] == int(cat_id)
][0]
except IndexError:
raise falcon.HTTPNotFound
def update(self, params, meta, validated, **kwargs):
cat_id = kwargs['cat_id']
cat = self.get_cat(cat_id=cat_id)
cat.update(validated)
return validated
def on_get(self, req, resp, **kwargs):
super().on_get(req, resp, additional=None, **kwargs)
def retrieve(self, params, meta, **kwargs):
cat_id = kwargs['cat_id']
return self.get_cat(cat_id)
def delete(self, params, meta, **kwargs):
cat_id = kwargs['cat_id']
cat = self.get_cat(cat_id=cat_id)
CATS_STORAGE.remove(cat)
class CatList(PaginatedListCreateAPI):
"""
List of all cats in our API
"""
serializer = CatSerializer()
breed = StringParam("set this param to filter cats by breed")
def list(self, params, meta, **kwargs):
if 'breed' in params:
filtered = [
cat for cat in CATS_STORAGE
if cat['breed'] == params['breed']
]
return filtered
else:
return CATS_STORAGE
def create(self, params, meta, validated, **kwargs):
validated['id'] = new_id()
CATS_STORAGE.append(validated)
return validated
class Templated(object):
template_name = None
def __init__(self, template_name=None, context=None):
# note: this is to ensure that template_name can be set as
# class level attribute in derrived class
self.template_name = template_name or self.template_name
self.context = context or {}
def render(self, req, resp):
template = env.get_template(self.template_name)
return template.render(**self.context)
def on_get(self, req, resp):
resp.body = self.render(req, resp)
resp.content_type = 'text/html'
api = application = falcon.API()
endpoints = {
"/v1/cats/{cat_id}": V1.Cat(),
"/v1/cats/": V1.CatList(),
}
for uri, endpoint in endpoints.items():
# add resource endpoints
api.add_route(uri, endpoint)
# create documentation resource from API endpoints
# and add it to the router
api.add_route("/", Templated('index.html', {
'endpoints': {
uri: endpoint.describe()
for uri, endpoint
in endpoints.items()
}
}))
|
unknown
|
codeparrot/codeparrot-clean
| ||
//===--- SimplifyStructExtract.swift --------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2023 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import SIL
extension StructExtractInst : OnoneSimplifiable {
func simplify(_ context: SimplifyContext) {
guard let structInst = self.struct as? StructInst else {
return
}
context.tryReplaceRedundantInstructionPair(first: structInst, second: self,
with: structInst.operands[fieldIndex].value)
}
}
|
swift
|
github
|
https://github.com/apple/swift
|
SwiftCompilerSources/Sources/Optimizer/InstructionSimplification/SimplifyStructExtract.swift
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Page._keywords'
db.delete_column('pages_page', '_keywords')
# Adding field 'Page.keywords_string'
db.add_column('pages_page', 'keywords_string', self.gf('django.db.models.fields.CharField')(default='', max_length=500, blank=True), keep_default=False)
# Removing M2M table for field keywords on 'Page'
db.delete_table('pages_page_keywords')
# Changing field 'Page.description'
db.alter_column('pages_page', 'description', self.gf('django.db.models.fields.TextField')(blank=True))
def backwards(self, orm):
# Adding field 'Page._keywords'
db.add_column('pages_page', '_keywords', self.gf('django.db.models.fields.CharField')(default='', max_length=500), keep_default=False)
# Deleting field 'Page.keywords_string'
db.delete_column('pages_page', 'keywords_string')
# Adding M2M table for field keywords on 'Page'
db.create_table('pages_page_keywords', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('page', models.ForeignKey(orm['pages.page'], null=False)),
('keyword', models.ForeignKey(orm['core.keyword'], null=False))
))
db.create_unique('pages_page_keywords', ['page_id', 'keyword_id'])
# Changing field 'Page.description'
db.alter_column('pages_page', 'description', self.gf('mezzanine.core.fields.HtmlField')(blank=True))
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.TextField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pages.contentpage': {
'Meta': {'object_name': 'ContentPage', '_ormbases': ['pages.Page']},
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
'pages.page': {
'Meta': {'object_name': 'Page'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
}
}
complete_apps = ['pages']
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &autoscalingv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/autoscaling/v1/register.go
|
/*
posixshmem - A Python extension that provides shm_open() and shm_unlink()
*/
// Need limited C API version 3.13 for Py_mod_gil
#include "pyconfig.h" // Py_GIL_DISABLED
#ifndef Py_GIL_DISABLED
# define Py_LIMITED_API 0x030d0000
#endif
#include <Python.h>
#include <string.h> // strlen()
#include <errno.h> // EINTR
#ifdef HAVE_SYS_MMAN_H
# include <sys/mman.h> // shm_open(), shm_unlink()
#endif
/*[clinic input]
module _posixshmem
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/
/*
*
* Module-level functions & meta stuff
*
*/
#ifdef HAVE_SHM_OPEN
/*[clinic input]
_posixshmem.shm_open -> int
path: unicode
flags: int
mode: int = 0o777
# "shm_open(path, flags, mode=0o777)\n\n\
Open a shared memory object. Returns a file descriptor (integer).
[clinic start generated code]*/
static int
_posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags,
int mode)
/*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/
{
int fd;
int async_err = 0;
Py_ssize_t name_size;
const char *name = PyUnicode_AsUTF8AndSize(path, &name_size);
if (name == NULL) {
return -1;
}
if (strlen(name) != (size_t)name_size) {
PyErr_SetString(PyExc_ValueError, "embedded null character");
return -1;
}
do {
Py_BEGIN_ALLOW_THREADS
fd = shm_open(name, flags, mode);
Py_END_ALLOW_THREADS
} while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals()));
if (fd < 0) {
if (!async_err)
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path);
return -1;
}
return fd;
}
#endif /* HAVE_SHM_OPEN */
#ifdef HAVE_SHM_UNLINK
/*[clinic input]
_posixshmem.shm_unlink
path: unicode
/
Remove a shared memory object (similar to unlink()).
Remove a shared memory object name, and, once all processes have unmapped
the object, de-allocates and destroys the contents of the associated memory
region.
[clinic start generated code]*/
static PyObject *
_posixshmem_shm_unlink_impl(PyObject *module, PyObject *path)
/*[clinic end generated code: output=42f8b23d134b9ff5 input=298369d013dcad63]*/
{
int rv;
int async_err = 0;
Py_ssize_t name_size;
const char *name = PyUnicode_AsUTF8AndSize(path, &name_size);
if (name == NULL) {
return NULL;
}
if (strlen(name) != (size_t)name_size) {
PyErr_SetString(PyExc_ValueError, "embedded null character");
return NULL;
}
do {
Py_BEGIN_ALLOW_THREADS
rv = shm_unlink(name);
Py_END_ALLOW_THREADS
} while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals()));
if (rv < 0) {
if (!async_err)
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path);
return NULL;
}
Py_RETURN_NONE;
}
#endif /* HAVE_SHM_UNLINK */
#include "clinic/posixshmem.c.h"
static PyMethodDef module_methods[ ] = {
_POSIXSHMEM_SHM_OPEN_METHODDEF
_POSIXSHMEM_SHM_UNLINK_METHODDEF
{NULL} /* Sentinel */
};
static PyModuleDef_Slot module_slots[] = {
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
static struct PyModuleDef _posixshmemmodule = {
PyModuleDef_HEAD_INIT,
.m_name = "_posixshmem",
.m_doc = "POSIX shared memory module",
.m_size = 0,
.m_methods = module_methods,
.m_slots = module_slots,
};
/* Module init function */
PyMODINIT_FUNC
PyInit__posixshmem(void)
{
return PyModuleDef_Init(&_posixshmemmodule);
}
|
c
|
github
|
https://github.com/python/cpython
|
Modules/_multiprocessing/posixshmem.c
|
class Node:
"""Represents a node in a linked list"""
def __init__(self, value):
self.value = value
self.next = None
def append(self, node):
self.next = node
class Stack:
"""
Represents a stack.
This implementation of the stack is based on a linked list.
Unit tests in the file: stack_tests.py
Complexity analysis of the stack operations:
Time:
* Push: O(1), add an element at the top of the stack
* Pop: O(1), remove an element from the top of the stack
* Search: O(n), traverse the stack until you find the required element.
In the worst case scenario you need to traverse the whole stack, therefore the time
complexity is O(n)
Space:
The space required to hold the stack in memory is O(n).
"""
def __init__(self):
self.size = 0
self.current = None
def push(self, value):
new_node = Node(value)
if self.current:
self.current, old_node = new_node, self.current
self.current.append(old_node)
else:
self.current = new_node
self.size += 1
def pop(self):
if self.size == 0:
return None
result = self.current
self.current = result.next
self.size -= 1
return result.value
|
unknown
|
codeparrot/codeparrot-clean
| ||
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["<F90>", "+version"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : None,
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['+pic=long']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256,0,1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self,force,ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='hpux')
compiler.customize()
print(compiler.get_version())
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
from __future__ import print_function
from logging import handlers
from os.path import dirname
import logging
import os
import select
import signal
import socket
import subprocess
import sys
import traceback
# Root path
base_path = dirname(os.path.abspath(__file__))
# Insert local directories into path
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
from couchpotato.core.helpers.variable import getDataDir, removePyc
# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
removePyc(base_path)
class Loader(object):
do_restart = False
def __init__(self):
# Get options via arg
from couchpotato.runner import getOptions
self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
if self.options.data_dir:
self.data_dir = self.options.data_dir
else:
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10)
hdlr.setLevel(logging.CRITICAL)
hdlr.setFormatter(formatter)
self.log.logger.addHandler(hdlr)
def addSignals(self):
signal.signal(signal.SIGINT, self.onExit)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
addEvent('app.do_shutdown', self.setRestart)
def setRestart(self, restart):
self.do_restart = restart
return True
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
fireEvent('app.shutdown', single = True)
def run(self):
self.addSignals()
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, sys.argv[1:], data_dir = self.data_dir, log_dir = self.log_dir, Env = Env)
if self.do_restart:
self.restart()
def restart(self):
try:
# remove old pidfile first
try:
if self.runAsDaemon():
try: self.daemon.stop()
except: pass
except:
self.log.critical(traceback.format_exc())
# Release log files and shutdown logger
logging.shutdown()
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)
except:
self.log.critical(traceback.format_exc())
def daemonize(self):
if self.runAsDaemon():
try:
from daemon import Daemon
self.daemon = Daemon(self.options.pid_file)
self.daemon.daemonize()
except SystemExit:
raise
except:
self.log.critical(traceback.format_exc())
def runAsDaemon(self):
return self.options.daemon and self.options.pid_file
if __name__ == '__main__':
l = None
try:
l = Loader()
l.daemonize()
l.run()
except KeyboardInterrupt:
pass
except select.error:
pass
except SystemExit:
raise
except socket.error as e:
# log when socket receives SIGINT, but continue.
# previous code would have skipped over other types of IO errors too.
nr, msg = e
if nr != 4:
try:
l.log.critical(traceback.format_exc())
except:
print(traceback.format_exc())
raise
except:
try:
# if this fails we will have two tracebacks
# one for failing to log, and one for the exception that got us here.
if l:
l.log.critical(traceback.format_exc())
else:
print(traceback.format_exc())
except:
print(traceback.format_exc())
raise
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import messages_pb2
import struct
import h2
import h2.connection
import twisted
import twisted.internet
import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False)
self._recv_buffer = {}
self._handlers = {}
self._handlers['ConnectionMade'] = self.on_connection_made_default
self._handlers['DataReceived'] = self.on_data_received_default
self._handlers['WindowUpdated'] = self.on_window_update_default
self._handlers['RequestReceived'] = self.on_request_received_default
self._handlers['SendDone'] = self.on_send_done_default
self._handlers['ConnectionLost'] = self.on_connection_lost
self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default
self._stream_status = {}
self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers['ConnectionMade']()
def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason)
def on_connection_made_default(self):
logging.info('Connection Made')
self._conn.initiate_connection()
self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason)
def dataReceived(self, data):
try:
events = self._conn.receive_data(data)
except h2.exceptions.ProtocolError:
# this try/except block catches exceptions due to race between sending
# GOAWAY and processing a response in flight.
return
if self._conn.data_to_send:
self.transport.write(self._conn.data_to_send())
for event in events:
if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'):
logging.info('RequestReceived Event for stream: %d' % event.stream_id)
self._handlers['RequestReceived'](event)
elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'):
logging.info('DataReceived Event for stream: %d' % event.stream_id)
self._handlers['DataReceived'](event)
elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'):
logging.info('WindowUpdated Event for stream: %d' % event.stream_id)
self._handlers['WindowUpdated'](event)
elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'):
logging.info('PingAcknowledged Event')
self._handlers['PingAcknowledged'](event)
self.transport.write(self._conn.data_to_send())
def on_ping_acknowledged_default(self, event):
logging.info('ping acknowledged')
self._outstanding_pings -= 1
def on_data_received_default(self, event):
self._conn.acknowledge_received_data(len(event.data), event.stream_id)
self._recv_buffer[event.stream_id] += event.data
def on_request_received_default(self, event):
self._recv_buffer[event.stream_id] = ''
self._stream_id = event.stream_id
self._stream_status[event.stream_id] = True
self._conn.send_headers(
stream_id=event.stream_id,
headers=[
(':status', '200'),
('content-type', 'application/grpc'),
('grpc-encoding', 'identity'),
('grpc-accept-encoding', 'identity,deflate,gzip'),
],
)
self.transport.write(self._conn.data_to_send())
def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
# try to resume sending on all active streams (update might be for connection)
for stream_id in self._send_remaining:
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break
chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
(lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
stream_id, padding_bytes))
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try:
self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
self._send_remaining[stream_id] -= bytes_to_send
self._send_offset += bytes_to_send
if self._send_remaining[stream_id] == 0:
self._handlers['SendDone'](stream_id)
def default_ping(self):
logging.info('sending ping')
self._outstanding_pings += 1
self._conn.ping(b'\x00'*8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error('Stream %d is already closed' % stream_id)
def default_send_trailer(self, stream_id):
logging.info('Sending trailer for stream id %d' % stream_id)
self._conn.send_headers(stream_id,
headers=[ ('grpc-status', '0') ],
end_stream=True
)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b'\x00'*response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto
return response_data
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size
asked in request """
recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None
req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
logging.info('Parsed simple request for stream %d' % stream_id)
return sr
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return Node.find(
Q('__backrefs.parent.node.nodes.0', 'exists', True) &
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False)
)
#return [node for node in Node.find()
#if node.category == 'project'
#and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def test_deleted_projects_excluded(self):
project = ProjectFactory.build()
deleted = ProjectFactory(is_deleted=True)
project.nodes.append(deleted)
project.save()
result = find_nested_projects()
assert deleted not in result
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import pdb
import numpy as np
# ----- Dice Score -----
def computeDice(autoSeg, groundTruth):
""" Returns
-------
DiceArray : floats array
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0 """
n_classes = int( np.max(groundTruth) + 1)
DiceArray = []
for c_i in xrange(1,n_classes):
idx_Auto = np.where(autoSeg.flatten() == c_i)[0]
idx_GT = np.where(groundTruth.flatten() == c_i)[0]
autoArray = np.zeros(autoSeg.size,dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size,dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
#dice = np.sum(autoSeg[groundTruth==c_i])*2.0 / (np.sum(autoSeg) + np.sum(groundTruth))
DiceArray.append(dsc)
return DiceArray
def dice(im1, im2):
"""
Computes the Dice coefficient
----------
im1 : boolean array
im2 : boolean array
If they are not boolean, they will be converted.
-------
It returns the Dice coefficient as a float on the range [0,1].
1: Perfect overlapping
0: Not overlapping
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.size != im2.size:
raise ValueError("Size mismatch between input arrays!!!")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return 1.0
# Compute Dice
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
"""
Data Models for books.service
All classes can be instantiated from an xml string using their FromString
class method.
Notes:
* Book.title displays the first dc:title because the returned XML
repeats that datum as atom:title.
There is an undocumented gbs:openAccess element that is not parsed.
"""
__author__ = "James Sams <sams.james@gmail.com>"
__copyright__ = "Apache License v2.0"
import atom
import gdata
BOOK_SEARCH_NAMESPACE = 'http://schemas.google.com/books/2008'
DC_NAMESPACE = 'http://purl.org/dc/terms'
ANNOTATION_REL = "http://schemas.google.com/books/2008/annotation"
INFO_REL = "http://schemas.google.com/books/2008/info"
LABEL_SCHEME = "http://schemas.google.com/books/2008/labels"
PREVIEW_REL = "http://schemas.google.com/books/2008/preview"
THUMBNAIL_REL = "http://schemas.google.com/books/2008/thumbnail"
FULL_VIEW = "http://schemas.google.com/books/2008#view_all_pages"
PARTIAL_VIEW = "http://schemas.google.com/books/2008#view_partial"
NO_VIEW = "http://schemas.google.com/books/2008#view_no_pages"
UNKNOWN_VIEW = "http://schemas.google.com/books/2008#view_unknown"
EMBEDDABLE = "http://schemas.google.com/books/2008#embeddable"
NOT_EMBEDDABLE = "http://schemas.google.com/books/2008#not_embeddable"
class _AtomFromString(atom.AtomBase):
#@classmethod
def FromString(cls, s):
return atom.CreateClassFromXMLString(cls, s)
FromString = classmethod(FromString)
class Creator(_AtomFromString):
"""
The <dc:creator> element identifies an author-or more generally, an entity
responsible for creating the volume in question. Examples of a creator
include a person, an organization, or a service. In the case of
anthologies, proceedings, or other edited works, this field may be used to
indicate editors or other entities responsible for collecting the volume's
contents.
This element appears as a child of <entry>. If there are multiple authors or
contributors to the book, there may be multiple <dc:creator> elements in the
volume entry (one for each creator or contributor).
"""
_tag = 'creator'
_namespace = DC_NAMESPACE
class Date(_AtomFromString): #iso 8601 / W3CDTF profile
"""
The <dc:date> element indicates the publication date of the specific volume
in question. If the book is a reprint, this is the reprint date, not the
original publication date. The date is encoded according to the ISO-8601
standard (and more specifically, the W3CDTF profile).
The <dc:date> element can appear only as a child of <entry>.
Usually only the year or the year and the month are given.
YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm
"""
_tag = 'date'
_namespace = DC_NAMESPACE
class Description(_AtomFromString):
"""
The <dc:description> element includes text that describes a book or book
result. In a search result feed, this may be a search result "snippet" that
contains the words around the user's search term. For a single volume feed,
this element may contain a synopsis of the book.
The <dc:description> element can appear only as a child of <entry>
"""
_tag = 'description'
_namespace = DC_NAMESPACE
class Format(_AtomFromString):
"""
The <dc:format> element describes the physical properties of the volume.
Currently, it indicates the number of pages in the book, but more
information may be added to this field in the future.
This element can appear only as a child of <entry>.
"""
_tag = 'format'
_namespace = DC_NAMESPACE
class Identifier(_AtomFromString):
"""
The <dc:identifier> element provides an unambiguous reference to a
particular book.
* Every <entry> contains at least one <dc:identifier> child.
* The first identifier is always the unique string Book Search has assigned
to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the
book's URL in the Book Search GUI, as well as in the URL of that book's
single item feed.
* Many books contain additional <dc:identifier> elements. These provide
alternate, external identifiers to the volume. Such identifiers may
include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs),
and OCLC numbers; they are prepended with a corresponding namespace
prefix (such as "ISBN:").
* Any <dc:identifier> can be passed to the Dynamic Links, used to
instantiate an Embedded Viewer, or even used to construct static links to
Book Search.
The <dc:identifier> element can appear only as a child of <entry>.
"""
_tag = 'identifier'
_namespace = DC_NAMESPACE
class Publisher(_AtomFromString):
"""
The <dc:publisher> element contains the name of the entity responsible for
producing and distributing the volume (usually the specific edition of this
book). Examples of a publisher include a person, an organization, or a
service.
This element can appear only as a child of <entry>. If there is more than
one publisher, multiple <dc:publisher> elements may appear.
"""
_tag = 'publisher'
_namespace = DC_NAMESPACE
class Subject(_AtomFromString):
"""
The <dc:subject> element identifies the topic of the book. Usually this is
a Library of Congress Subject Heading (LCSH) or Book Industry Standards
and Communications Subject Heading (BISAC).
The <dc:subject> element can appear only as a child of <entry>. There may
be multiple <dc:subject> elements per entry.
"""
_tag = 'subject'
_namespace = DC_NAMESPACE
class Title(_AtomFromString):
"""
The <dc:title> element contains the title of a book as it was published. If
a book has a subtitle, it appears as a second <dc:title> element in the book
result's <entry>.
"""
_tag = 'title'
_namespace = DC_NAMESPACE
class Viewability(_AtomFromString):
"""
Google Book Search respects the user's local copyright restrictions. As a
result, previews or full views of some books are not available in all
locations. The <gbs:viewability> element indicates whether a book is fully
viewable, can be previewed, or only has "about the book" information. These
three "viewability modes" are the same ones returned by the Dynamic Links
API.
The <gbs:viewability> element can appear only as a child of <entry>.
The value attribute will take the form of the following URIs to represent
the relevant viewing capability:
Full View: http://schemas.google.com/books/2008#view_all_pages
Limited Preview: http://schemas.google.com/books/2008#view_partial
Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages
Unknown view: http://schemas.google.com/books/2008#view_unknown
"""
_tag = 'viewability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Embeddability(_AtomFromString):
"""
Many of the books found on Google Book Search can be embedded on third-party
sites using the Embedded Viewer. The <gbs:embeddability> element indicates
whether a particular book result is available for embedding. By definition,
a book that cannot be previewed on Book Search cannot be embedded on third-
party sites.
The <gbs:embeddability> element can appear only as a child of <entry>.
The value attribute will take on one of the following URIs:
embeddable: http://schemas.google.com/books/2008#embeddable
not embeddable: http://schemas.google.com/books/2008#not_embeddable
"""
_tag = 'embeddability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None, extension_elements=None,
extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Review(_AtomFromString):
"""
When present, the <gbs:review> element contains a user-generated review for
a given book. This element currently appears only in the user library and
user annotation feeds, as a child of <entry>.
type: text, html, xhtml
xml:lang: id of the language, a guess, (always two letters?)
"""
_tag = 'review'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
_attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang'
def __init__(self, type=None, lang=None, text=None,
extension_elements=None, extension_attributes=None):
self.type = type
self.lang = lang
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Rating(_AtomFromString):
"""All attributes must take an integral string between 1 and 5.
The min, max, and average attributes represent 'community' ratings. The
value attribute is the user's (of the feed from which the item is fetched,
not necessarily the authenticated user) rating of the book.
"""
_tag = 'rating'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['min'] = 'min'
_attributes['max'] = 'max'
_attributes['average'] = 'average'
_attributes['value'] = 'value'
def __init__(self, min=None, max=None, average=None, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.min = min
self.max = max
self.average = average
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Book(_AtomFromString, gdata.GDataEntry):
"""
Represents an <entry> from either a search, annotation, library, or single
item feed. Note that dc_title attribute is the proper title of the volume,
title is an atom element and may not represent the full title.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
for i in (Creator, Identifier, Publisher, Subject,):
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i])
for i in (Date, Description, Format, Viewability, Embeddability,
Review, Rating): # Review, Rating maybe only in anno/lib entrys
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i)
# there is an atom title as well, should we clobber that?
del(i)
_children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title])
def to_dict(self):
"""Returns a dictionary of the book's available metadata. If the data
cannot be discovered, it is not included as a key in the returned dict.
The possible keys are: authors, embeddability, date, description,
format, identifiers, publishers, rating, review, subjects, title, and
viewability.
Notes:
* Plural keys will be lists
* Singular keys will be strings
* Title, despite usually being a list, joins the title and subtitle
with a space as a single string.
* embeddability and viewability only return the portion of the URI
after #
* identifiers is a list of tuples, where the first item of each tuple
is the type of identifier and the second item is the identifying
string. Note that while doing dict() on this tuple may be possible,
some items may have multiple of the same identifier and converting
to a dict may resulted in collisions/dropped data.
* Rating returns only the user's rating. See Rating class for precise
definition.
"""
d = {}
if self.GetAnnotationLink():
d['annotation'] = self.GetAnnotationLink().href
if self.creator:
d['authors'] = [x.text for x in self.creator]
if self.embeddability:
d['embeddability'] = self.embeddability.value.split('#')[-1]
if self.date:
d['date'] = self.date.text
if self.description:
d['description'] = self.description.text
if self.format:
d['format'] = self.format.text
if self.identifier:
d['identifiers'] = [('google_id', self.identifier[0].text)]
for x in self.identifier[1:]:
l = x.text.split(':') # should we lower the case of the ids?
d['identifiers'].append((l[0], ':'.join(l[1:])))
if self.GetInfoLink():
d['info'] = self.GetInfoLink().href
if self.GetPreviewLink():
d['preview'] = self.GetPreviewLink().href
if self.publisher:
d['publishers'] = [x.text for x in self.publisher]
if self.rating:
d['rating'] = self.rating.value
if self.review:
d['review'] = self.review.text
if self.subject:
d['subjects'] = [x.text for x in self.subject]
if self.GetThumbnailLink():
d['thumbnail'] = self.GetThumbnailLink().href
if self.dc_title:
d['title'] = ' '.join([x.text for x in self.dc_title])
if self.viewability:
d['viewability'] = self.viewability.value.split('#')[-1]
return d
def __init__(self, creator=None, date=None,
description=None, format=None, author=None, identifier=None,
publisher=None, subject=None, dc_title=None, viewability=None,
embeddability=None, review=None, rating=None, category=None,
content=None, contributor=None, atom_id=None, link=None,
published=None, rights=None, source=None, summary=None,
title=None, control=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
self.creator = creator
self.date = date
self.description = description
self.format = format
self.identifier = identifier
self.publisher = publisher
self.subject = subject
self.dc_title = dc_title or []
self.viewability = viewability
self.embeddability = embeddability
self.review = review
self.rating = rating
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id,
link=link, published=published, rights=rights, source=source,
summary=summary, title=title, control=control, updated=updated,
text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
def GetThumbnailLink(self):
"""Returns the atom.Link object representing the thumbnail URI."""
for i in self.link:
if i.rel == THUMBNAIL_REL:
return i
def GetInfoLink(self):
"""
Returns the atom.Link object representing the human-readable info URI.
"""
for i in self.link:
if i.rel == INFO_REL:
return i
def GetPreviewLink(self):
"""Returns the atom.Link object representing the preview URI."""
for i in self.link:
if i.rel == PREVIEW_REL:
return i
def GetAnnotationLink(self):
"""
Returns the atom.Link object representing the Annotation URI.
Note that the use of www.books in the href of this link seems to make
this information useless. Using books.service.ANNOTATION_FEED and
BOOK_SERVER to construct your URI seems to work better.
"""
for i in self.link:
if i.rel == ANNOTATION_REL:
return i
def set_rating(self, value):
"""Set user's rating. Must be an integral string between 1 nad 5"""
assert (value in ('1','2','3','4','5'))
if not isinstance(self.rating, Rating):
self.rating = Rating()
self.rating.value = value
def set_review(self, text, type='text', lang='en'):
"""Set user's review text"""
self.review = Review(text=text, type=type, lang=lang)
def get_label(self):
"""Get users label for the item as a string"""
for i in self.category:
if i.scheme == LABEL_SCHEME:
return i.term
def set_label(self, term):
"""Clear pre-existing label for the item and set term as the label."""
self.remove_label()
self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME))
def remove_label(self):
"""Clear the user's label for the item"""
ln = len(self.category)
for i, j in enumerate(self.category[::-1]):
if j.scheme == LABEL_SCHEME:
del(self.category[ln-1-i])
def clean_annotations(self):
"""Clear all annotations from an item. Useful for taking an item from
another user's library/annotation feed and adding it to the
authenticated user's library without adopting annotations."""
self.remove_label()
self.review = None
self.rating = None
def get_google_id(self):
"""Get Google's ID of the item."""
return self.id.text.split('/')[-1]
class BookFeed(_AtomFromString, gdata.GDataFeed):
"""Represents a feed of entries from a search."""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book])
if __name__ == '__main__':
import doctest
doctest.testfile('datamodels.txt')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,qdu1000-gcc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller for QDU1000 and QRU1000
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
- Imran Shaik <quic_imrashai@quicinc.com>
description: |
Qualcomm global clock control module which supports the clocks, resets and
power domains on QDU1000 and QRU1000
See also: include/dt-bindings/clock/qcom,qdu1000-gcc.h
properties:
compatible:
const: qcom,qdu1000-gcc
clocks:
items:
- description: Board XO source
- description: Sleep clock source
- description: PCIE 0 Pipe clock source
- description: PCIE 0 Phy Auxiliary clock source
- description: USB3 Phy wrapper pipe clock source
required:
- compatible
- clocks
- '#power-domain-cells'
allOf:
- $ref: qcom,gcc.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@100000 {
compatible = "qcom,qdu1000-gcc";
reg = <0x00100000 0x001f4200>;
clocks = <&rpmhcc RPMH_CXO_CLK>, <&sleep_clk>,
<&pcie_0_pipe_clk>, <&pcie_0_phy_aux_clk>,
<&usb3_phy_wrapper_pipe_clk>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml
|
from typing import Any, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
...,
description="List of sources used to answer the question",
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: dict | type[BaseModel],
output_parser: str = "base",
prompt: PromptTemplate | ChatPromptTemplate | None = None,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain with structure.
Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `'pydantic'` or `'base'`.
prompt: Optional prompt to use for the chain.
verbose: Whether to run the chain in verbose mode.
Returns:
The question answering chain.
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema,
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
schema_dict = cast("dict", schema.model_json_schema())
else:
schema_dict = cast("dict", schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
),
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm,
AnswerWithSources,
verbose=verbose,
**kwargs,
)
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/chains/openai_functions/qa_with_structure.py
|
import numpy as np
import scipy.integrate as scin
import scipy.optimize as scop
import warnings
def self_affine_psd(q, pref, hurst, onedim=False):
"""Ideal self-affine power spectrum, dependent only on prefactor and Hurst exponent."""
exp = -2 * (hurst + 1)
if onedim:
exp = -1 - 2 * hurst
return pref * q**exp
def self_affine_psd_fit(q, C, onedim=False):
"""Returns best-fit pre-factor and Hurst exponent for given roughness spectrum."""
# we have to fit here in log space, else the high freq components are irrelevant
# to least squares, magnitude-wise
def self_affine_psd_log(q, pref, hurst):
return np.log10(self_affine_psd(q, pref, hurst, onedim))
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignoring unphysical vals during fit
popt, pcov = scop.curve_fit(self_affine_psd_log, q, np.log10(C))
return popt
def radially_integrated_psd(q, C):
"""Assumes no roll-off/cut-off in PSD."""
if q[0] > q[1]:
q = q[::-1]
C = C[::-1]
return 2 * np.pi * scin.simps(np.multiply(q, C), q)
def radially_averaged_psd(surface, window=True):
"""
Computes the power spectral density/roughness spectrum from roughness readings using angular averaging.
This radial averaging makes this algorithm appropriate for isotropic surfaces.
Parameters
----------
h: brown.surface.Surface (L)
A square 2D array containing the surface height profile, equidistant measurements assumed in both directions.
window: bool
Hanning window applied to data before Discrete Fourier Transform to adhere to inherent periodicity. Defaults
to True, not needed for periodic surfaces.
Returns
-------
frequency, power: array_like
Two 1D numpy arrays containing the roughness frequency (L^-1) vs. power (L^4).
Notes
-----
The routine uses numpy's FFT package and follows the algorithm in Persson (2005), 10.1088/0953-8984/17/1/R01,
and power normalization as in Elson & Bennet (1995), 10.1364/AO.34.000201.
Todo
----
Normalize.
"""
h = np.copy(surface)
N = h.shape[0]
N_center = int(N / 2)
dxy = surface.dxy
L = N * dxy
q_L = 2 * np.pi / L
# windowing
if window:
h = h * np.outer(np.hanning(N), np.hanning(N))
h_a_q = (dxy**2 / (2 * np.pi)**2) * np.fft.fft2(h) # Equ. D.1
C_q = ((2 * np.pi)**2 / L**2) * np.abs(h_a_q)**2 # Equ. C.6
# first quadrant of FFT, rest follows in periodic averaging
m = np.array(range(1, N_center + 1)) # frequency multiplier
mxy = np.array(range(0, N_center)) # grid indices
q = np.array(m) * q_L # frequencies
xv, yv = np.meshgrid(mxy, mxy) # grid index meshes
mxyabs = np.sqrt(xv**2 + yv**2) # grid index vector lengths
# averaging
N = N - 1
C = np.zeros_like(q)
for i, mi in enumerate(m):
mind, maxd = mi - 0.5, mi + 0.5
idcs = np.where((mind < mxyabs) & (mxyabs < maxd))
C_sum = 0.
for j in range(len(idcs[0])):
mx, my = idcs[0][j], idcs[1][j]
C_sum += C_q[mx, my] + C_q[N - mx, my] + C_q[mx, N - my] + C_q[N - mx, N - my]
C[i] = C_sum / (2 * np.pi * mi)
# Parseval's theorem based power normalization
meansqu_target = np.var(h)
meansqu_is = radially_integrated_psd(q, C)
C *= meansqu_target / meansqu_is
return q, C
def axis_averaged_psd(h, dx, window=True, axis=0):
"""
Average over 1D power spectra in x-direction (rows) if axis == 0, y-direction (columns) otherwise.
Notes
-----
The routine uses numpy's FFT package and follows the algorithm in Elson & Bennet (1995), 10.1364/AO.34.000201.
"""
h = np.copy(h)
N = h.shape[0]
L = N * dx
if axis == 1:
h = h.T
# psds
def power(r):
if window:
r *= np.hanning(N)
return dx * np.abs(np.fft.fft(r))**2 / N # Equ. 8a
powers = np.array([power(line) for line in h])
# freqs
q = np.fft.fftfreq(N, dx) * 2 * np.pi
q = q[1:int(N / 2)]
# avg
C = np.average(powers, axis=0)[1:int(N / 2)]
return q, C
|
unknown
|
codeparrot/codeparrot-clean
| ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from nova.virt.baremetal import tilera
from nova.virt.baremetal import fake
from nova.openstack.common import cfg
from nova import flags
from nova import exception
FLAGS = flags.FLAGS
baremetal_opts = [
cfg.StrOpt('baremetal_driver',
default='tilera',
help='Bare-metal driver runs on')
]
FLAGS.register_opts(baremetal_opts)
def get_baremetal_nodes():
d = FLAGS.baremetal_driver
if d == 'tilera':
return tilera.get_baremetal_nodes()
elif d == 'fake':
return fake.get_baremetal_nodes()
else:
raise exception.NovaException(_("Unknown baremetal driver %(d)s"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright (c) 2016, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <btorres-gil@paloaltonetworks.com>
"""Generate API reference page for each module"""
import os
import sys
import pkgutil
import errno
tree_exists = [
'device',
'firewall',
'ha',
'network',
'panorama',
'policies',
]
tree_not_exists = [
'base',
'errors',
'objects',
'updater',
'userid',
]
template_main = """Module: {0}
========{1}
Inheritance diagram
-------------------
.. inheritance-diagram:: pandevice.{0}
:parts: 1{2}
Class Reference
---------------
.. automodule:: pandevice.{0}
"""
template_tree = """
Configuration tree diagram
--------------------------
.. graphviz:: _diagrams/pandevice.{0}.dot """
def mkdir_p(path):
"""Make a full directory path"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def create_module_references(directory=None):
# Set paths to package and modules
curdir = os.path.dirname(os.path.abspath(__file__))
rootpath = [os.path.join(curdir, os.pardir)]
libpath = [os.path.join(curdir, os.pardir, 'pandevice')]
sys.path[:0] = rootpath
sys.path[:0] = libpath
#print "Looking for pandevice in path: %s" % libpath
# Import all modules in package
modules = []
for importer, modname, ispkg in pkgutil.iter_modules(path=libpath,
prefix="pandevice."):
modules.append(__import__(modname, fromlist="dummy"))
output = {}
# Create output for each module
for module in modules:
module_name = module.__name__.split(".")[-1]
header_pad = "="*len(module_name)
if module_name in tree_exists:
config_tree = template_tree.format(module_name)
else:
config_tree = ""
module_string = template_main.format(module_name, header_pad, config_tree)
output[module_name] = module_string
# Write output to file or stdout
path = ""
if directory is not None:
mkdir_p(directory)
path = directory + "/"
for module, lines in output.iteritems():
if module == "interface":
continue
if not lines:
continue
with open("{0}module-{1}.rst".format(path, module), 'w') as file:
file.write(lines)
if __name__ == "__main__":
create_module_references()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp
import stock
import product
import wizard
import report
import company
import procurement
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.